From 93e5f0b65ab8bf5cfdc699887eb3fbd877e252d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 15 Jun 2017 20:12:52 +0300 Subject: [PATCH 0001/1795] drm/i915: Make intel_digital_port_connected() work for any port MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the missing port A handling to intel_digital_port_connected() and also separate SPT from the CPT/LPT code a bit. Cc: Manasi Navare Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170615171252.11921-1-ville.syrjala@linux.intel.com Reviewed-by: Manasi Navare --- drivers/gpu/drm/i915/intel_dp.c | 83 +++++++++++++++++++++++++++------ 1 file changed, 70 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 7a3a42c95381..2eb6e0ff143a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4444,8 +4444,6 @@ static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, u32 bit; switch (port->port) { - case PORT_A: - return true; case PORT_B: bit = SDE_PORTB_HOTPLUG; break; @@ -4469,8 +4467,6 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv, u32 bit; switch (port->port) { - case PORT_A: - return true; case PORT_B: bit = SDE_PORTB_HOTPLUG_CPT; break; @@ -4480,12 +4476,28 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv, case PORT_D: bit = SDE_PORTD_HOTPLUG_CPT; break; + default: + MISSING_CASE(port->port); + return false; + } + + return I915_READ(SDEISR) & bit; +} + +static bool spt_digital_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *port) +{ + u32 bit; + + switch (port->port) { + case PORT_A: + bit = SDE_PORTA_HOTPLUG_SPT; + break; case PORT_E: bit = SDE_PORTE_HOTPLUG_SPT; break; default: - MISSING_CASE(port->port); - return false; + return cpt_digital_port_connected(dev_priv, port); } return I915_READ(SDEISR) & bit; @@ -4537,6 +4549,42 @@ static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv, return I915_READ(PORT_HOTPLUG_STAT) & bit; } +static bool ilk_digital_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *port) +{ + if (port->port == PORT_A) + return I915_READ(DEISR) & DE_DP_A_HOTPLUG; + else + return ibx_digital_port_connected(dev_priv, port); +} + +static bool snb_digital_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *port) +{ + if (port->port == PORT_A) + return I915_READ(DEISR) & DE_DP_A_HOTPLUG; + else + return cpt_digital_port_connected(dev_priv, port); +} + +static bool ivb_digital_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *port) +{ + if (port->port == PORT_A) + return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB; + else + return cpt_digital_port_connected(dev_priv, port); +} + +static bool bdw_digital_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *port) +{ + if (port->port == PORT_A) + return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG; + else + return cpt_digital_port_connected(dev_priv, port); +} + static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, struct intel_digital_port *intel_dig_port) { @@ -4573,16 +4621,25 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, bool intel_digital_port_connected(struct drm_i915_private *dev_priv, struct intel_digital_port *port) { - if (HAS_PCH_IBX(dev_priv)) - return ibx_digital_port_connected(dev_priv, port); - else if (HAS_PCH_SPLIT(dev_priv)) - return cpt_digital_port_connected(dev_priv, port); + if (HAS_GMCH_DISPLAY(dev_priv)) { + if (IS_GM45(dev_priv)) + return gm45_digital_port_connected(dev_priv, port); + else + return g4x_digital_port_connected(dev_priv, port); + } + + if (IS_GEN5(dev_priv)) + return ilk_digital_port_connected(dev_priv, port); + else if (IS_GEN6(dev_priv)) + return snb_digital_port_connected(dev_priv, port); + else if (IS_GEN7(dev_priv)) + return ivb_digital_port_connected(dev_priv, port); + else if (IS_GEN8(dev_priv)) + return bdw_digital_port_connected(dev_priv, port); else if (IS_GEN9_LP(dev_priv)) return bxt_digital_port_connected(dev_priv, port); - else if (IS_GM45(dev_priv)) - return gm45_digital_port_connected(dev_priv, port); else - return g4x_digital_port_connected(dev_priv, port); + return spt_digital_port_connected(dev_priv, port); } static struct edid * From 1f588aeb60b4412019546ce596f179635abc2ac3 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Mon, 19 Jun 2017 11:39:32 -0700 Subject: [PATCH 0002/1795] drm/i915/cnl: Fix RMW on ddi vswing sequence. Paulo noticed that we were missing few bits clear before writing values back to the register on these RMW MMIO operations. v2: Remove "POST_" from CURSOR_COEFF_MASK. (Paulo). v3: Remove unnecessary braces. (Jani). Fixes: cf54ca8bc567 ("drm/i915/cnl: Implement voltage swing sequence.") Cc: Paulo Zanoni Cc: Manasi Navare Cc: Jani Nikula Signed-off-by: Rodrigo Vivi Reviewed-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/1497897572-22520-1-git-send-email-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 9 +++++++++ drivers/gpu/drm/i915/intel_ddi.c | 7 +++++++ 2 files changed, 16 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bd535f12db18..c8647cfa81ba 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1764,8 +1764,11 @@ enum skl_disp_power_wells { _CNL_PORT_TX_DW2_LN0_AE, \ _CNL_PORT_TX_DW2_LN0_F) #define SWING_SEL_UPPER(x) ((x >> 3) << 15) +#define SWING_SEL_UPPER_MASK (1 << 15) #define SWING_SEL_LOWER(x) ((x & 0x7) << 11) +#define SWING_SEL_LOWER_MASK (0x7 << 11) #define RCOMP_SCALAR(x) ((x) << 0) +#define RCOMP_SCALAR_MASK (0xFF << 0) #define _CNL_PORT_TX_DW4_GRP_AE 0x162350 #define _CNL_PORT_TX_DW4_GRP_B 0x1623D0 @@ -1795,8 +1798,11 @@ enum skl_disp_power_wells { _CNL_PORT_TX_DW4_LN0_F) #define LOADGEN_SELECT (1 << 31) #define POST_CURSOR_1(x) ((x) << 12) +#define POST_CURSOR_1_MASK (0x3F << 12) #define POST_CURSOR_2(x) ((x) << 6) +#define POST_CURSOR_2_MASK (0x3F << 6) #define CURSOR_COEFF(x) ((x) << 0) +#define CURSOR_COEFF_MASK (0x3F << 6) #define _CNL_PORT_TX_DW5_GRP_AE 0x162354 #define _CNL_PORT_TX_DW5_GRP_B 0x1623D4 @@ -1825,7 +1831,9 @@ enum skl_disp_power_wells { #define TX_TRAINING_EN (1 << 31) #define TAP3_DISABLE (1 << 29) #define SCALING_MODE_SEL(x) ((x) << 18) +#define SCALING_MODE_SEL_MASK (0x7 << 18) #define RTERM_SELECT(x) ((x) << 3) +#define RTERM_SELECT_MASK (0x7 << 3) #define _CNL_PORT_TX_DW7_GRP_AE 0x16235C #define _CNL_PORT_TX_DW7_GRP_B 0x1623DC @@ -1852,6 +1860,7 @@ enum skl_disp_power_wells { _CNL_PORT_TX_DW7_LN0_AE, \ _CNL_PORT_TX_DW7_LN0_F) #define N_SCALAR(x) ((x) << 24) +#define N_SCALAR_MASK (0x7F << 24) /* The spec defines this only for BXT PHY0, but lets assume that this * would exist for PHY1 too if it had a second channel. diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index db8093863f0c..80e96f1f49d2 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1813,11 +1813,14 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv, /* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */ val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); + val &= ~SCALING_MODE_SEL_MASK; val |= SCALING_MODE_SEL(2); I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); /* Program PORT_TX_DW2 */ val = I915_READ(CNL_PORT_TX_DW2_LN0(port)); + val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | + RCOMP_SCALAR_MASK); val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); /* Rcomp scalar is fixed as 0x98 for every table entry */ @@ -1828,6 +1831,8 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv, /* We cannot write to GRP. It would overrite individual loadgen */ for (ln = 0; ln < 4; ln++) { val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); + val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | + CURSOR_COEFF_MASK); val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); @@ -1837,12 +1842,14 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv, /* Program PORT_TX_DW5 */ /* All DW5 values are fixed for every table entry */ val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); + val &= ~RTERM_SELECT_MASK; val |= RTERM_SELECT(6); val |= TAP3_DISABLE; I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); /* Program PORT_TX_DW7 */ val = I915_READ(CNL_PORT_TX_DW7_LN0(port)); + val &= ~N_SCALAR_MASK; val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val); } From fc5e9d63a8db40e9e3a3b180be33d44af8a3cee8 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 24 May 2017 16:52:05 +0200 Subject: [PATCH 0003/1795] drm/sti: Drop drm_vblank_cleanup Seems entirely cargo-culted. Cc: Benjamin Gaignard Cc: Vincent Abriou Acked-by: Vincent Abriou Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170524145212.27837-31-daniel.vetter@ffwll.ch --- drivers/gpu/drm/sti/sti_drv.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index a4b574283269..06ef1e3886cf 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -237,7 +237,6 @@ static void sti_cleanup(struct drm_device *ddev) } drm_kms_helper_poll_fini(ddev); - drm_vblank_cleanup(ddev); component_unbind_all(ddev->dev, ddev); kfree(private); ddev->dev_private = NULL; From 00a9121b8698ddf7fcb18e107405afacc35f748a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 24 May 2017 16:52:08 +0200 Subject: [PATCH 0004/1795] drm/tegra: Drop drm_vblank_cleanup Again, doesn't seem to serve a purpose. Cc: Thierry Reding Acked-by: Thierry Reding Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170524145212.27837-34-daniel.vetter@ffwll.ch --- drivers/gpu/drm/tegra/drm.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 51c48a8e00ec..0d8839244c3a 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -213,12 +213,10 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) err = tegra_drm_fb_init(drm); if (err < 0) - goto vblank; + goto device; return 0; -vblank: - drm_vblank_cleanup(drm); device: host1x_device_exit(device); fbdev: @@ -247,7 +245,6 @@ static void tegra_drm_unload(struct drm_device *drm) drm_kms_helper_poll_fini(drm); tegra_drm_fb_exit(drm); drm_mode_config_cleanup(drm); - drm_vblank_cleanup(drm); err = host1x_device_exit(device); if (err < 0) From 27fa5510f18b11845a6c46755e41ea66eeaccf0a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 24 May 2017 16:51:44 +0200 Subject: [PATCH 0005/1795] drm/doc: Drop empty include for drm_color_mgmt.h I'm fed up staring at the error message from kernel-doc that it can't find anything. Acked-by: Thierry Reding Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170524145212.27837-10-daniel.vetter@ffwll.ch --- Documentation/gpu/drm-kms.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst index 2d77c9580164..0749000ab3d7 100644 --- a/Documentation/gpu/drm-kms.rst +++ b/Documentation/gpu/drm-kms.rst @@ -523,9 +523,6 @@ Color Management Properties .. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c :doc: overview -.. kernel-doc:: include/drm/drm_color_mgmt.h - :internal: - .. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c :export: From 57d30230c573e3f1a49ae7e0f7f8b73b17881415 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 24 May 2017 16:51:45 +0200 Subject: [PATCH 0006/1795] drm/doc: vblank cleanup Unify and review everything, plus make sure it's all correct markup. Drop the kernel-doc for internal functions. Also rework the overview section, it's become rather outdated. Unfortuantely the kernel-doc in drm_driver isn't rendered yet, but that will change as soon as drm_driver is kernel-docified properly. Also document properly that drm_vblank_cleanup is optional, the core calls this already. v2: Make it clear that cleanup happens in drm_dev_fini for drivers with their own ->release callback (Thierry). Acked-by: Thierry Reding Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170524145212.27837-11-daniel.vetter@ffwll.ch --- Documentation/gpu/drm-kms.rst | 56 +----------- drivers/gpu/drm/drm_vblank.c | 158 ++++++++++++++++------------------ include/drm/drmP.h | 37 ++++++-- include/drm/drm_crtc.h | 3 + 4 files changed, 113 insertions(+), 141 deletions(-) diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst index 0749000ab3d7..307284125d7a 100644 --- a/Documentation/gpu/drm-kms.rst +++ b/Documentation/gpu/drm-kms.rst @@ -551,60 +551,8 @@ various modules/drivers. Vertical Blanking ================= -Vertical blanking plays a major role in graphics rendering. To achieve -tear-free display, users must synchronize page flips and/or rendering to -vertical blanking. The DRM API offers ioctls to perform page flips -synchronized to vertical blanking and wait for vertical blanking. - -The DRM core handles most of the vertical blanking management logic, -which involves filtering out spurious interrupts, keeping race-free -blanking counters, coping with counter wrap-around and resets and -keeping use counts. It relies on the driver to generate vertical -blanking interrupts and optionally provide a hardware vertical blanking -counter. Drivers must implement the following operations. - -- int (\*enable_vblank) (struct drm_device \*dev, int crtc); void - (\*disable_vblank) (struct drm_device \*dev, int crtc); - Enable or disable vertical blanking interrupts for the given CRTC. - -- u32 (\*get_vblank_counter) (struct drm_device \*dev, int crtc); - Retrieve the value of the vertical blanking counter for the given - CRTC. If the hardware maintains a vertical blanking counter its value - should be returned. Otherwise drivers can use the - :c:func:`drm_vblank_count()` helper function to handle this - operation. - -Drivers must initialize the vertical blanking handling core with a call -to :c:func:`drm_vblank_init()` in their load operation. - -Vertical blanking interrupts can be enabled by the DRM core or by -drivers themselves (for instance to handle page flipping operations). -The DRM core maintains a vertical blanking use count to ensure that the -interrupts are not disabled while a user still needs them. To increment -the use count, drivers call :c:func:`drm_vblank_get()`. Upon -return vertical blanking interrupts are guaranteed to be enabled. - -To decrement the use count drivers call -:c:func:`drm_vblank_put()`. Only when the use count drops to zero -will the DRM core disable the vertical blanking interrupts after a delay -by scheduling a timer. The delay is accessible through the -vblankoffdelay module parameter or the ``drm_vblank_offdelay`` global -variable and expressed in milliseconds. Its default value is 5000 ms. -Zero means never disable, and a negative value means disable -immediately. Drivers may override the behaviour by setting the -:c:type:`struct drm_device ` -vblank_disable_immediate flag, which when set causes vblank interrupts -to be disabled immediately regardless of the drm_vblank_offdelay -value. The flag should only be set if there's a properly working -hardware vblank counter present. - -When a vertical blanking interrupt occurs drivers only need to call the -:c:func:`drm_handle_vblank()` function to account for the -interrupt. - -Resources allocated by :c:func:`drm_vblank_init()` must be freed -with a call to :c:func:`drm_vblank_cleanup()` in the driver unload -operation handler. +.. kernel-doc:: drivers/gpu/drm/drm_vblank.c + :doc: vblank handling Vertical Blanking and Interrupt Handling Functions Reference ------------------------------------------------------------ diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 463e4d81fb0d..d833b202f3c7 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -31,6 +31,41 @@ #include "drm_trace.h" #include "drm_internal.h" +/** + * DOC: vblank handling + * + * Vertical blanking plays a major role in graphics rendering. To achieve + * tear-free display, users must synchronize page flips and/or rendering to + * vertical blanking. The DRM API offers ioctls to perform page flips + * synchronized to vertical blanking and wait for vertical blanking. + * + * The DRM core handles most of the vertical blanking management logic, which + * involves filtering out spurious interrupts, keeping race-free blanking + * counters, coping with counter wrap-around and resets and keeping use counts. + * It relies on the driver to generate vertical blanking interrupts and + * optionally provide a hardware vertical blanking counter. + * + * Drivers must initialize the vertical blanking handling core with a call to + * drm_vblank_init(). Minimally, a driver needs to implement + * &drm_crtc_funcs.enable_vblank and &drm_crtc_funcs.disable_vblank plus call + * drm_crtc_handle_vblank() in it's vblank interrupt handler for working vblank + * support. + * + * Vertical blanking interrupts can be enabled by the DRM core or by drivers + * themselves (for instance to handle page flipping operations). The DRM core + * maintains a vertical blanking use count to ensure that the interrupts are not + * disabled while a user still needs them. To increment the use count, drivers + * call drm_crtc_vblank_get() and release the vblank reference again with + * drm_crtc_vblank_put(). In between these two calls vblank interrupts are + * guaranteed to be enabled. + * + * On many hardware disabling the vblank interrupt cannot be done in a race-free + * manner, see &drm_driver.vblank_disable_immediate and + * &drm_driver.max_vblank_count. In that case the vblank core only disables the + * vblanks after a timer has expired, which can be configured through the + * ``vblankoffdelay`` module parameter. + */ + /* Retry timestamp calculation up to 3 times to satisfy * drm_timestamp_precision before giving up. */ @@ -262,11 +297,12 @@ static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe) * drm_accurate_vblank_count - retrieve the master vblank counter * @crtc: which counter to retrieve * - * This function is similar to @drm_crtc_vblank_count but this - * function interpolates to handle a race with vblank irq's. + * This function is similar to drm_crtc_vblank_count() but this function + * interpolates to handle a race with vblank interrupts using the high precision + * timestamping support. * - * This is mostly useful for hardware that can obtain the scanout - * position, but doesn't have a frame counter. + * This is mostly useful for hardware that can obtain the scanout position, but + * doesn't have a hardware frame counter. */ u32 drm_accurate_vblank_count(struct drm_crtc *crtc) { @@ -362,10 +398,14 @@ static void vblank_disable_fn(unsigned long arg) * drm_vblank_cleanup - cleanup vblank support * @dev: DRM device * - * This function cleans up any resources allocated in drm_vblank_init. + * This function cleans up any resources allocated in drm_vblank_init(). It is + * called by the DRM core when @dev is finalized. + * + * Drivers can call drm_vblank_cleanup() if they need to quiescent the vblank + * interrupt in their unload code. But in general this should be handled by + * disabling all active &drm_crtc through e.g. drm_atomic_helper_shutdown, which + * should end up calling drm_crtc_vblank_off(). * - * Drivers which don't use drm_irq_install() need to set &drm_device.irq_enabled - * themselves, to signal to the DRM core that vblank interrupts are enabled. */ void drm_vblank_cleanup(struct drm_device *dev) { @@ -396,6 +436,9 @@ EXPORT_SYMBOL(drm_vblank_cleanup); * @num_crtcs: number of CRTCs supported by @dev * * This function initializes vblank support for @num_crtcs display pipelines. + * Drivers do not need to call drm_vblank_cleanup(), cleanup is already handled + * by the DRM core, or through calling drm_dev_fini() for drivers with a + * &drm_driver.release callback. * * Returns: * Zero on success or a negative error code on failure. @@ -468,11 +511,11 @@ EXPORT_SYMBOL(drm_crtc_vblank_waitqueue); * @crtc: drm_crtc whose timestamp constants should be updated. * @mode: display mode containing the scanout timings * - * Calculate and store various constants which are later - * needed by vblank and swap-completion timestamping, e.g, - * by drm_calc_vbltimestamp_from_scanoutpos(). They are - * derived from CRTC's true scanout timing, so they take - * things like panel scaling or other adjustments into account. + * Calculate and store various constants which are later needed by vblank and + * swap-completion timestamping, e.g, by + * drm_calc_vbltimestamp_from_scanoutpos(). They are derived from CRTC's true + * scanout timing, so they take things like panel scaling or other adjustments + * into account. */ void drm_calc_timestamping_constants(struct drm_crtc *crtc, const struct drm_display_mode *mode) @@ -535,25 +578,14 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants); * if flag is set. * * Implements calculation of exact vblank timestamps from given drm_display_mode - * timings and current video scanout position of a CRTC. This can be called from - * within get_vblank_timestamp() implementation of a kms driver to implement the - * actual timestamping. + * timings and current video scanout position of a CRTC. This can be directly + * used as the &drm_driver.get_vblank_timestamp implementation of a kms driver + * if &drm_driver.get_scanout_position is implemented. * - * Should return timestamps conforming to the OML_sync_control OpenML - * extension specification. The timestamp corresponds to the end of - * the vblank interval, aka start of scanout of topmost-leftmost display - * pixel in the following video frame. - * - * Requires support for optional dev->driver->get_scanout_position() - * in kms driver, plus a bit of setup code to provide a drm_display_mode - * that corresponds to the true scanout timing. - * - * The current implementation only handles standard video modes. It - * returns as no operation if a doublescan or interlaced video mode is - * active. Higher level code is expected to handle this. - * - * This function can be used to implement the &drm_driver.get_vblank_timestamp - * directly, if the driver implements the &drm_driver.get_scanout_position hook. + * The current implementation only handles standard video modes. For double scan + * and interlaced modes the driver is supposed to adjust the hardware mode + * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to + * match the scanout position reported. * * Note that atomic drivers must call drm_calc_timestamping_constants() before * enabling a CRTC. The atomic helpers already take care of that in @@ -738,7 +770,9 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, * * Fetches the "cooked" vblank count value that represents the number of * vblank events since the system was booted, including lost events due to - * modesetting activity. + * modesetting activity. Note that this timer isn't correct against a racing + * vblank interrupt (since it only reports the software vblank counter), see + * drm_accurate_vblank_count() for such use-cases. * * Returns: * The software vblank counter. @@ -749,20 +783,6 @@ u32 drm_crtc_vblank_count(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_vblank_count); -/** - * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the - * system timestamp corresponding to that vblank counter value. - * @dev: DRM device - * @pipe: index of CRTC whose counter to retrieve - * @vblanktime: Pointer to struct timeval to receive the vblank timestamp. - * - * Fetches the "cooked" vblank count value that represents the number of - * vblank events since the system was booted, including lost events due to - * modesetting activity. Returns corresponding system timestamp of the time - * of the vblank interval that corresponds to the current vblank counter value. - * - * This is the legacy version of drm_crtc_vblank_count_and_time(). - */ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, struct timeval *vblanktime) { @@ -852,8 +872,8 @@ static void send_vblank_event(struct drm_device *dev, * handler by calling drm_crtc_send_vblank_event() and make sure that there's no * possible race with the hardware committing the atomic update. * - * Caller must hold event lock. Caller must also hold a vblank reference for - * the event @e, which will be dropped when the next vblank arrives. + * Caller must hold a vblank reference for the event @e, which will be dropped + * when the next vblank arrives. */ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e) @@ -913,14 +933,6 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe) return dev->driver->enable_vblank(dev, pipe); } -/** - * drm_vblank_enable - enable the vblank interrupt on a CRTC - * @dev: DRM device - * @pipe: CRTC index - * - * Returns: - * Zero on success or a negative error code on failure. - */ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; @@ -958,19 +970,6 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) return ret; } -/** - * drm_vblank_get - get a reference count on vblank events - * @dev: DRM device - * @pipe: index of CRTC to own - * - * Acquire a reference count on vblank events to avoid having them disabled - * while in use. - * - * This is the legacy version of drm_crtc_vblank_get(). - * - * Returns: - * Zero on success or a negative error code on failure. - */ static int drm_vblank_get(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; @@ -1014,16 +1013,6 @@ int drm_crtc_vblank_get(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_vblank_get); -/** - * drm_vblank_put - release ownership of vblank events - * @dev: DRM device - * @pipe: index of CRTC to release - * - * Release ownership of a given vblank counter, turning off interrupts - * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. - * - * This is the legacy version of drm_crtc_vblank_put(). - */ static void drm_vblank_put(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; @@ -1067,6 +1056,8 @@ EXPORT_SYMBOL(drm_crtc_vblank_put); * This waits for one vblank to pass on @pipe, using the irq driver interfaces. * It is a failure to call this when the vblank irq for @pipe is disabled, e.g. * due to lack of driver support or because the crtc is off. + * + * This is the legacy version of drm_crtc_wait_one_vblank(). */ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe) { @@ -1116,7 +1107,7 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank); * stored so that drm_vblank_on can restore it again. * * Drivers must use this function when the hardware vblank counter can get - * reset, e.g. when suspending. + * reset, e.g. when suspending or disabling the @crtc in general. */ void drm_crtc_vblank_off(struct drm_crtc *crtc) { @@ -1184,6 +1175,8 @@ EXPORT_SYMBOL(drm_crtc_vblank_off); * drm_crtc_vblank_on() functions. The difference compared to * drm_crtc_vblank_off() is that this function doesn't save the vblank counter * and hence doesn't need to call any driver hooks. + * + * This is useful for recovering driver state e.g. on driver load, or on resume. */ void drm_crtc_vblank_reset(struct drm_crtc *crtc) { @@ -1212,9 +1205,10 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset); * @crtc: CRTC in question * * This functions restores the vblank interrupt state captured with - * drm_crtc_vblank_off() again. Note that calls to drm_crtc_vblank_on() and - * drm_crtc_vblank_off() can be unbalanced and so can also be unconditionally called - * in driver load code to reflect the current hardware state of the crtc. + * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note + * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be + * unbalanced and so can also be unconditionally called in driver load code to + * reflect the current hardware state of the crtc. */ void drm_crtc_vblank_on(struct drm_crtc *crtc) { diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 39df16af7a4a..3aa3809ab524 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -387,22 +387,49 @@ struct drm_device { bool irq_enabled; int irq; - /* + /** + * @vblank_disable_immediate: + * * If true, vblank interrupt will be disabled immediately when the * refcount drops to zero, as opposed to via the vblank disable * timer. - * This can be set to true it the hardware has a working vblank - * counter and the driver uses drm_vblank_on() and drm_vblank_off() - * appropriately. + * + * This can be set to true it the hardware has a working vblank counter + * with high-precision timestamping (otherwise there are races) and the + * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off() + * appropriately. See also @max_vblank_count and + * &drm_crtc_funcs.get_vblank_counter. */ bool vblank_disable_immediate; - /* array of size num_crtcs */ + /** + * @vblank: + * + * Array of vblank tracking structures, one per &struct drm_crtc. For + * historical reasons (vblank support predates kernel modesetting) this + * is free-standing and not part of &struct drm_crtc itself. It must be + * initialized explicitly by calling drm_vblank_init(). + */ struct drm_vblank_crtc *vblank; spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ spinlock_t vbl_lock; + /** + * @max_vblank_count: + * + * Maximum value of the vblank registers. This value +1 will result in a + * wrap-around of the vblank register. It is used by the vblank core to + * handle wrap-arounds. + * + * If set to zero the vblank core will try to guess the elapsed vblanks + * between times when the vblank interrupt is disabled through + * high-precision timestamps. That approach is suffering from small + * races and imprecision over longer time periods, hence exposing a + * hardware vblank counter is always recommended. + * + * If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set. + */ u32 max_vblank_count; /**< size of vblank counter register */ /** diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 629a5fe075b3..3a911a64c257 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -685,6 +685,9 @@ struct drm_crtc_funcs { * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or * enabling a CRTC. * + * See also &drm_device.vblank_disable_immediate and + * &drm_device.max_vblank_count. + * * Returns: * * Raw vblank counter value. From b6dcaaac44746cf32ee489639b06d3668e473386 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 24 May 2017 16:51:46 +0200 Subject: [PATCH 0007/1795] drm/vblank: _ioctl posfix for ioctl handler I alwasy get confused about drm_wait_vblank for a split second until I realize it's the ioctl handler. Unconfuse me, and do it for the legacy modeset vblank control ioctl too. While at it also noticed that I misplaced the irq ioctl handler in the internal header file. Acked-by: Thierry Reding Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170524145212.27837-12-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_internal.h | 12 ++++++++---- drivers/gpu/drm/drm_ioctl.c | 4 ++-- drivers/gpu/drm/drm_vblank.c | 22 ++++------------------ 3 files changed, 14 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index ba3f5fb21959..5cf9e03b5457 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -58,12 +58,16 @@ extern unsigned int drm_timestamp_monotonic; void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe); /* IOCTLS */ -int drm_wait_vblank(struct drm_device *dev, void *data, - struct drm_file *filp); +int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/* drm_irq.c */ + +/* IOCTLS */ int drm_legacy_irq_control(struct drm_device *dev, void *data, struct drm_file *file_priv); -int drm_legacy_modeset_ctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); /* drm_auth.c */ int drm_getmagic(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 865e3ee4d743..c8547d223e85 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -600,9 +600,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index d833b202f3c7..4ef7d310d5be 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -1293,8 +1293,8 @@ static void drm_legacy_vblank_post_modeset(struct drm_device *dev, } } -int drm_legacy_modeset_ctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; unsigned int pipe; @@ -1413,22 +1413,8 @@ static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait) _DRM_VBLANK_NEXTONMISS)); } -/* - * Wait for VBLANK. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param data user argument, pointing to a drm_wait_vblank structure. - * \return zero on success or a negative number on failure. - * - * This function enables the vblank interrupt on the pipe requested, then - * sleeps waiting for the requested sequence number to occur, and drops - * the vblank interrupt refcount afterwards. (vblank IRQ disable follows that - * after a timeout with no further vblank waits scheduled). - */ -int drm_wait_vblank(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_vblank_crtc *vblank; union drm_wait_vblank *vblwait = data; From ca814b25538a5b2c0a8de6665191725f41608f2c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 24 May 2017 16:51:47 +0200 Subject: [PATCH 0008/1795] drm/vblank: Consistent drm_crtc_ prefix We use drm_crtc_ for all the new-style vblank functions which directly take a struct drm_crtc *. drm_accurate_vblank_count was the odd one out, correct this to appease my OCD. Acked-by: Thierry Reding Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170524145212.27837-13-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_vblank.c | 8 ++++---- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/nouveau/nv50_display.c | 2 +- include/drm/drm_vblank.h | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 4ef7d310d5be..7e3f59182571 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -294,7 +294,7 @@ static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe) } /** - * drm_accurate_vblank_count - retrieve the master vblank counter + * drm_crtc_accurate_vblank_count - retrieve the master vblank counter * @crtc: which counter to retrieve * * This function is similar to drm_crtc_vblank_count() but this function @@ -304,7 +304,7 @@ static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe) * This is mostly useful for hardware that can obtain the scanout position, but * doesn't have a hardware frame counter. */ -u32 drm_accurate_vblank_count(struct drm_crtc *crtc) +u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); @@ -323,7 +323,7 @@ u32 drm_accurate_vblank_count(struct drm_crtc *crtc) return vblank; } -EXPORT_SYMBOL(drm_accurate_vblank_count); +EXPORT_SYMBOL(drm_crtc_accurate_vblank_count); static void __disable_vblank(struct drm_device *dev, unsigned int pipe) { @@ -772,7 +772,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, * vblank events since the system was booted, including lost events due to * modesetting activity. Note that this timer isn't correct against a racing * vblank interrupt (since it only reports the software vblank counter), see - * drm_accurate_vblank_count() for such use-cases. + * drm_crtc_accurate_vblank_count() for such use-cases. * * Returns: * The software vblank counter. diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 04493ef1d2f7..3f8b2ee8da8c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1601,7 +1601,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, crcs[3] = crc3; crcs[4] = crc4; drm_crtc_add_crc_entry(&crtc->base, true, - drm_accurate_vblank_count(&crtc->base), + drm_crtc_accurate_vblank_count(&crtc->base), crcs); } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e6e26705c138..636c64ee00a8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12566,7 +12566,7 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; if (!dev->max_vblank_count) - return drm_accurate_vblank_count(&crtc->base); + return drm_crtc_accurate_vblank_count(&crtc->base); return dev->driver->get_vblank_counter(dev, crtc->pipe); } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index e9189e59216b..28cb24624c31 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -4032,7 +4032,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) if (crtc->state->event) { unsigned long flags; /* Get correct count/ts if racing with vblank irq */ - drm_accurate_vblank_count(crtc); + drm_crtc_accurate_vblank_count(crtc); spin_lock_irqsave(&crtc->dev->event_lock, flags); drm_crtc_send_vblank_event(crtc, crtc->state->event); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index 4cde47332dfa..4ceef128582f 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -169,7 +169,7 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc); void drm_crtc_vblank_reset(struct drm_crtc *crtc); void drm_crtc_vblank_on(struct drm_crtc *crtc); void drm_vblank_cleanup(struct drm_device *dev); -u32 drm_accurate_vblank_count(struct drm_crtc *crtc); +u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, unsigned int pipe, int *max_error, From 76bba2cdb56d2a55c0dd4c3432221881d8c9adcc Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 24 May 2017 16:51:38 +0200 Subject: [PATCH 0009/1795] drm/udl: Remove dummy busid callback Since commit ca8e2ad71013049bc88a10b11d83712bfe56cdd4 Author: Thierry Reding Date: Fri Apr 11 15:23:00 2014 +0200 drm: Introduce drm_dev_set_unique() the ->set_busid callback is optional. On top of that the udl one isn't really fully compliant with the drm uabi, but since only modesetting ever binds to it (there's no 3d accel on udl) it doesn't matter. Still, can't harm to aling and use the default used by everyone else. Acked-by: Thierry Reding Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170524145212.27837-4-daniel.vetter@ffwll.ch --- drivers/gpu/drm/udl/udl_drv.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index cd8b01727734..0f02e1acf0ba 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -11,11 +11,6 @@ #include #include "udl_drv.h" -static int udl_driver_set_busid(struct drm_device *d, struct drm_master *m) -{ - return 0; -} - static int udl_usb_suspend(struct usb_interface *interface, pm_message_t message) { @@ -52,7 +47,6 @@ static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, .load = udl_driver_load, .unload = udl_driver_unload, - .set_busid = udl_driver_set_busid, /* gem hooks */ .gem_free_object = udl_gem_free_object, From 5c484cee7ef9c4fd29fa0ba09640d55960977145 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 24 May 2017 16:51:39 +0200 Subject: [PATCH 0010/1795] drm: Remove drm_driver->set_busid hook The only special-case is pci devices, and we can easily handle this in the core. Do so and drop a pile of boilerplate from drivers. Acked-by: Thierry Reding Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170524145212.27837-5-daniel.vetter@ffwll.ch --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 - drivers/gpu/drm/ast/ast_drv.c | 1 - drivers/gpu/drm/bochs/bochs_drv.c | 1 - drivers/gpu/drm/cirrus/cirrus_drv.c | 1 - drivers/gpu/drm/drm_internal.h | 1 + drivers/gpu/drm/drm_ioctl.c | 4 ++-- drivers/gpu/drm/drm_pci.c | 1 - drivers/gpu/drm/gma500/psb_drv.c | 1 - drivers/gpu/drm/i810/i810_drv.c | 1 - drivers/gpu/drm/i915/i915_drv.c | 1 - drivers/gpu/drm/mga/mga_drv.c | 1 - drivers/gpu/drm/mgag200/mgag200_drv.c | 1 - drivers/gpu/drm/nouveau/nouveau_drm.c | 1 - drivers/gpu/drm/qxl/qxl_drv.c | 2 -- drivers/gpu/drm/r128/r128_drv.c | 1 - drivers/gpu/drm/radeon/radeon_drv.c | 1 - drivers/gpu/drm/savage/savage_drv.c | 1 - drivers/gpu/drm/sis/sis_drv.c | 1 - drivers/gpu/drm/tdfx/tdfx_drv.c | 1 - drivers/gpu/drm/via/via_drv.c | 1 - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 1 - include/drm/drm_drv.h | 2 -- include/drm/drm_pci.h | 7 ------- 23 files changed, 3 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 31eddd85eb40..e7a4bce6358d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -734,7 +734,6 @@ static struct drm_driver kms_driver = { .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms, .lastclose = amdgpu_driver_lastclose_kms, - .set_busid = drm_pci_set_busid, .unload = amdgpu_driver_unload_kms, .get_vblank_counter = amdgpu_get_vblank_counter_kms, .enable_vblank = amdgpu_enable_vblank_kms, diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index fd7c9eec92e4..f6794745a024 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -197,7 +197,6 @@ static struct drm_driver driver = { .load = ast_driver_load, .unload = ast_driver_unload, - .set_busid = drm_pci_set_busid, .fops = &ast_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index aa342515ddf4..8fccd3cf000d 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -84,7 +84,6 @@ static struct drm_driver bochs_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET, .load = bochs_load, .unload = bochs_unload, - .set_busid = drm_pci_set_busid, .fops = &bochs_fops, .name = "bochs-drm", .desc = "bochs dispi vga interface (qemu stdvga)", diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index d893ea21a359..c48b9eb76712 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -132,7 +132,6 @@ static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM, .load = cirrus_driver_load, .unload = cirrus_driver_unload, - .set_busid = drm_pci_set_busid, .fops = &cirrus_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 5cf9e03b5457..116de27cf8f3 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -32,6 +32,7 @@ void drm_lastclose(struct drm_device *dev); int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv); void drm_pci_agp_destroy(struct drm_device *dev); +int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master); /* drm_prime.c */ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index c8547d223e85..aa49a2241404 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -143,8 +143,8 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) if (master->unique != NULL) drm_unset_busid(dev, master); - if (dev->driver->set_busid) { - ret = dev->driver->set_busid(dev, master); + if (dev_is_pci(dev->dev)) { + ret = drm_pci_set_busid(dev, master); if (ret) { drm_unset_busid(dev, master); return ret; diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 1eb4fc3eee20..ad31d95e77c9 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -149,7 +149,6 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) master->unique_len = strlen(master->unique); return 0; } -EXPORT_SYMBOL(drm_pci_set_busid); static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) { diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 1f9b35afefee..37d4c36c80f2 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -480,7 +480,6 @@ static struct drm_driver driver = { .load = psb_driver_load, .unload = psb_driver_unload, .lastclose = psb_driver_lastclose, - .set_busid = drm_pci_set_busid, .num_ioctls = ARRAY_SIZE(psb_ioctls), .irq_preinstall = psb_irq_preinstall, diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index 37fd0906f807..e1c35c710e24 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -59,7 +59,6 @@ static struct drm_driver driver = { .load = i810_driver_load, .lastclose = i810_driver_lastclose, .preclose = i810_driver_preclose, - .set_busid = drm_pci_set_busid, .dma_quiescent = i810_driver_dma_quiescent, .ioctls = i810_ioctls, .fops = &i810_driver_fops, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3036d4835b0f..6033355d9469 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2622,7 +2622,6 @@ static struct drm_driver driver = { .open = i915_driver_open, .lastclose = i915_driver_lastclose, .postclose = i915_driver_postclose, - .set_busid = drm_pci_set_busid, .gem_close_object = i915_gem_close_object, .gem_free_object_unlocked = i915_gem_free_object, diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index 63ba0699d107..2a36ec611a44 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -62,7 +62,6 @@ static struct drm_driver driver = { .load = mga_driver_load, .unload = mga_driver_unload, .lastclose = mga_driver_lastclose, - .set_busid = drm_pci_set_busid, .dma_quiescent = mga_driver_dma_quiescent, .get_vblank_counter = mga_get_vblank_counter, .enable_vblank = mga_enable_vblank, diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 9ac007880328..53a5982a04c7 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -91,7 +91,6 @@ static struct drm_driver driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET, .load = mgag200_driver_load, .unload = mgag200_driver_unload, - .set_busid = drm_pci_set_busid, .fops = &mgag200_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index c3dc75fee700..3e1a8da9b20f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1102,7 +1102,6 @@ static int __init nouveau_drm_init(void) { driver_pci = driver_stub; - driver_pci.set_busid = drm_pci_set_busid; driver_platform = driver_stub; nouveau_display_options(); diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index c2fc201d9e1b..bb2d8da7e553 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -262,8 +262,6 @@ static struct drm_driver qxl_driver = { DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_ATOMIC, - .set_busid = drm_pci_set_busid, - .dumb_create = qxl_mode_dumb_create, .dumb_map_offset = qxl_mode_dumb_mmap, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index a982be57d1ef..1d43c434328d 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -62,7 +62,6 @@ static struct drm_driver driver = { .load = r128_driver_load, .preclose = r128_driver_preclose, .lastclose = r128_driver_lastclose, - .set_busid = drm_pci_set_busid, .get_vblank_counter = r128_get_vblank_counter, .enable_vblank = r128_enable_vblank, .disable_vblank = r128_disable_vblank, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 6f906abd612b..dd5e86dafb29 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -545,7 +545,6 @@ static struct drm_driver kms_driver = { .open = radeon_driver_open_kms, .postclose = radeon_driver_postclose_kms, .lastclose = radeon_driver_lastclose_kms, - .set_busid = drm_pci_set_busid, .unload = radeon_driver_unload_kms, .get_vblank_counter = radeon_get_vblank_counter_kms, .enable_vblank = radeon_enable_vblank_kms, diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index 78c6d8e9b42c..2a08da09dbcf 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -55,7 +55,6 @@ static struct drm_driver driver = { .preclose = savage_reclaim_buffers, .lastclose = savage_driver_lastclose, .unload = savage_driver_unload, - .set_busid = drm_pci_set_busid, .ioctls = savage_ioctls, .dma_ioctl = savage_bci_buffers, .fops = &savage_driver_fops, diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 7f05da13ea5e..cdaced381f5d 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -104,7 +104,6 @@ static struct drm_driver driver = { .open = sis_driver_open, .preclose = sis_reclaim_buffers_locked, .postclose = sis_driver_postclose, - .set_busid = drm_pci_set_busid, .dma_quiescent = sis_idle, .lastclose = sis_lastclose, .ioctls = sis_ioctls, diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index c54138c3a376..acd5f8162bb6 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -55,7 +55,6 @@ static const struct file_operations tdfx_driver_fops = { static struct drm_driver driver = { .driver_features = DRIVER_LEGACY, - .set_busid = drm_pci_set_busid, .fops = &tdfx_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index 9e0e5392b6ec..0ca4e0489c0b 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -77,7 +77,6 @@ static struct drm_driver driver = { .open = via_driver_open, .preclose = via_reclaim_buffers_locked, .postclose = via_driver_postclose, - .set_busid = drm_pci_set_busid, .context_dtor = via_final_context, .get_vblank_counter = via_get_vblank_counter, .enable_vblank = via_enable_vblank, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4a641555b960..63218033b0be 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1531,7 +1531,6 @@ static struct drm_driver driver = { .master_drop = vmw_master_drop, .open = vmw_driver_open, .postclose = vmw_postclose, - .set_busid = drm_pci_set_busid, .dumb_create = vmw_dumb_create, .dumb_map_offset = vmw_dumb_map_offset, diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 18f3181674e8..e4de59bc52c8 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -172,8 +172,6 @@ struct drm_driver { */ void (*release) (struct drm_device *); - int (*set_busid)(struct drm_device *dev, struct drm_master *master); - /** * @get_vblank_counter: * diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h index 4579fac1080c..961b16f9b553 100644 --- a/include/drm/drm_pci.h +++ b/include/drm/drm_pci.h @@ -49,7 +49,6 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); -int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master); #else static inline int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, @@ -57,12 +56,6 @@ static inline int drm_get_pci_dev(struct pci_dev *pdev, { return -ENOSYS; } - -static inline int drm_pci_set_busid(struct drm_device *dev, - struct drm_master *master) -{ - return -ENOSYS; -} #endif #define DRM_PCIE_SPEED_25 1 From 10631d724deff712343d96dd3017cd323349f761 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 24 May 2017 16:51:40 +0200 Subject: [PATCH 0011/1795] drm/pci: Deprecate drm_pci_init/exit completely The magic switching between proper pci driver and shadow-attach isn't useful anymore since there's no ums+kms drivers left. Let's split this up properly, calling pci_register_driver for kms drivers and renaming the shadow-attach init to drm_legacy_pci_init/exit. Acked-by: Thierry Reding Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170524145212.27837-6-daniel.vetter@ffwll.ch --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 +-- drivers/gpu/drm/ast/ast_drv.c | 4 +-- drivers/gpu/drm/bochs/bochs_drv.c | 4 +-- drivers/gpu/drm/cirrus/cirrus_drv.c | 4 +-- drivers/gpu/drm/drm_pci.c | 39 ++++++++----------------- drivers/gpu/drm/gma500/psb_drv.c | 4 +-- drivers/gpu/drm/i810/i810_drv.c | 4 +-- drivers/gpu/drm/mga/mga_drv.c | 4 +-- drivers/gpu/drm/mgag200/mgag200_drv.c | 5 ++-- drivers/gpu/drm/nouveau/nouveau_drm.c | 11 +++++-- drivers/gpu/drm/qxl/qxl_drv.c | 4 +-- drivers/gpu/drm/r128/r128_drv.c | 4 +-- drivers/gpu/drm/radeon/radeon_drv.c | 5 ++-- drivers/gpu/drm/savage/savage_drv.c | 4 +-- drivers/gpu/drm/sis/sis_drv.c | 4 +-- drivers/gpu/drm/tdfx/tdfx_drv.c | 4 +-- drivers/gpu/drm/via/via_drv.c | 4 +-- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 4 +-- include/drm/drm_pci.h | 4 +-- 19 files changed, 56 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e7a4bce6358d..4911d304d8b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -816,7 +816,7 @@ static int __init amdgpu_init(void) driver->num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); /* let modprobe override vga console setting */ - return drm_pci_init(driver, pdriver); + return pci_register_driver(pdriver); error_sched: amdgpu_fence_slab_fini(); @@ -831,7 +831,7 @@ error_sync: static void __exit amdgpu_exit(void) { amdgpu_amdkfd_fini(); - drm_pci_exit(driver, pdriver); + pci_unregister_driver(pdriver); amdgpu_unregister_atpx_handler(); amdgpu_sync_fini(); amd_sched_fence_slab_fini(); diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index f6794745a024..3022b39c00f3 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -220,11 +220,11 @@ static int __init ast_init(void) if (ast_modeset == 0) return -EINVAL; - return drm_pci_init(&driver, &ast_pci_driver); + return pci_register_driver(&ast_pci_driver); } static void __exit ast_exit(void) { - drm_pci_exit(&driver, &ast_pci_driver); + pci_unregister_driver(&ast_pci_driver); } module_init(ast_init); diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index 8fccd3cf000d..a1d28845da5f 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -223,12 +223,12 @@ static int __init bochs_init(void) if (bochs_modeset == 0) return -EINVAL; - return drm_pci_init(&bochs_driver, &bochs_pci_driver); + return pci_register_driver(&bochs_pci_driver); } static void __exit bochs_exit(void) { - drm_pci_exit(&bochs_driver, &bochs_pci_driver); + pci_unregister_driver(&bochs_pci_driver); } module_init(bochs_init); diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index c48b9eb76712..910c300f5c37 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -165,12 +165,12 @@ static int __init cirrus_init(void) if (cirrus_modeset == 0) return -EINVAL; - return drm_pci_init(&driver, &cirrus_pci_driver); + return pci_register_driver(&cirrus_pci_driver); } static void __exit cirrus_exit(void) { - drm_pci_exit(&driver, &cirrus_pci_driver); + pci_unregister_driver(&cirrus_pci_driver); } module_init(cirrus_init); diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index ad31d95e77c9..1235c9877d6f 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -280,20 +280,15 @@ err_free: EXPORT_SYMBOL(drm_get_pci_dev); /** - * drm_pci_init - Register matching PCI devices with the DRM subsystem + * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver * @driver: DRM device driver * @pdriver: PCI device driver * - * Initializes a drm_device structures, registering the stubs and initializing - * the AGP device. - * - * NOTE: This function is deprecated. Modern modesetting drm drivers should use - * pci_register_driver() directly, this function only provides shadow-binding - * support for old legacy drivers on top of that core pci function. + * This is only used by legacy dri1 drivers and deprecated. * * Return: 0 on success or a negative error code on failure. */ -int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) +int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) { struct pci_dev *pdev = NULL; const struct pci_device_id *pid; @@ -301,8 +296,8 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) DRM_DEBUG("\n"); - if (!(driver->driver_features & DRIVER_LEGACY)) - return pci_register_driver(pdriver); + if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY))) + return -EINVAL; /* If not using KMS, fall back to stealth mode manual scanning. */ INIT_LIST_HEAD(&driver->legacy_dev_list); @@ -329,6 +324,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) } return 0; } +EXPORT_SYMBOL(drm_legacy_pci_init); int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) { @@ -390,11 +386,6 @@ EXPORT_SYMBOL(drm_pcie_get_max_link_width); #else -int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) -{ - return -1; -} - void drm_pci_agp_destroy(struct drm_device *dev) {} int drm_irq_by_busid(struct drm_device *dev, void *data, @@ -404,27 +395,21 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, } #endif -EXPORT_SYMBOL(drm_pci_init); - /** - * drm_pci_exit - Unregister matching PCI devices from the DRM subsystem + * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver * @driver: DRM device driver * @pdriver: PCI device driver * - * Unregisters one or more devices matched by a PCI driver from the DRM - * subsystem. - * - * NOTE: This function is deprecated. Modern modesetting drm drivers should use - * pci_unregister_driver() directly, this function only provides shadow-binding - * support for old legacy drivers on top of that core pci function. + * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This + * is deprecated and only used by dri1 drivers. */ -void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) +void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) { struct drm_device *dev, *tmp; DRM_DEBUG("\n"); if (!(driver->driver_features & DRIVER_LEGACY)) { - pci_unregister_driver(pdriver); + WARN_ON(1); } else { list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, legacy_dev_list) { @@ -434,4 +419,4 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) } DRM_INFO("Module unloaded\n"); } -EXPORT_SYMBOL(drm_pci_exit); +EXPORT_SYMBOL(drm_legacy_pci_exit); diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 37d4c36c80f2..747c06b227c5 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -516,12 +516,12 @@ static struct pci_driver psb_pci_driver = { static int __init psb_init(void) { - return drm_pci_init(&driver, &psb_pci_driver); + return pci_register_driver(&psb_pci_driver); } static void __exit psb_exit(void) { - drm_pci_exit(&driver, &psb_pci_driver); + pci_unregister_driver(&psb_pci_driver); } late_initcall(psb_init); diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index e1c35c710e24..c69d5c487f51 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -82,12 +82,12 @@ static int __init i810_init(void) return -EINVAL; } driver.num_ioctls = i810_max_ioctl; - return drm_pci_init(&driver, &i810_pci_driver); + return drm_legacy_pci_init(&driver, &i810_pci_driver); } static void __exit i810_exit(void) { - drm_pci_exit(&driver, &i810_pci_driver); + drm_legacy_pci_exit(&driver, &i810_pci_driver); } module_init(i810_init); diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index 2a36ec611a44..1aad27813c23 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -89,12 +89,12 @@ static struct pci_driver mga_pci_driver = { static int __init mga_init(void) { driver.num_ioctls = mga_max_ioctl; - return drm_pci_init(&driver, &mga_pci_driver); + return drm_legacy_pci_init(&driver, &mga_pci_driver); } static void __exit mga_exit(void) { - drm_pci_exit(&driver, &mga_pci_driver); + drm_legacy_pci_exit(&driver, &mga_pci_driver); } module_init(mga_init); diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 53a5982a04c7..4189160af726 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -119,12 +119,13 @@ static int __init mgag200_init(void) if (mgag200_modeset == 0) return -EINVAL; - return drm_pci_init(&driver, &mgag200_pci_driver); + + return pci_register_driver(&mgag200_pci_driver); } static void __exit mgag200_exit(void) { - drm_pci_exit(&driver, &mgag200_pci_driver); + pci_unregister_driver(&mgag200_pci_driver); } module_init(mgag200_init); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 3e1a8da9b20f..efa5489c5aed 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1120,7 +1120,12 @@ nouveau_drm_init(void) nouveau_register_dsm_handler(); nouveau_backlight_ctor(); - return drm_pci_init(&driver_pci, &nouveau_drm_pci_driver); + +#ifdef CONFIG_PCI + return pci_register_driver(&nouveau_drm_pci_driver); +#else + return 0; +#endif } static void __exit @@ -1129,7 +1134,9 @@ nouveau_drm_exit(void) if (!nouveau_modeset) return; - drm_pci_exit(&driver_pci, &nouveau_drm_pci_driver); +#ifdef CONFIG_PCI + pci_unregister_driver(&nouveau_drm_pci_driver); +#endif nouveau_backlight_dtor(); nouveau_unregister_dsm_handler(); diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index bb2d8da7e553..15c97b16ee21 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -301,12 +301,12 @@ static int __init qxl_init(void) if (qxl_modeset == 0) return -EINVAL; qxl_driver.num_ioctls = qxl_max_ioctls; - return drm_pci_init(&qxl_driver, &qxl_pci_driver); + return pci_register_driver(&qxl_pci_driver); } static void __exit qxl_exit(void) { - drm_pci_exit(&qxl_driver, &qxl_pci_driver); + pci_unregister_driver(&qxl_pci_driver); } module_init(qxl_init); diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index 1d43c434328d..0d2b7e42b3a7 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -95,12 +95,12 @@ static int __init r128_init(void) { driver.num_ioctls = r128_max_ioctl; - return drm_pci_init(&driver, &r128_pci_driver); + return drm_legacy_pci_init(&driver, &r128_pci_driver); } static void __exit r128_exit(void) { - drm_pci_exit(&driver, &r128_pci_driver); + drm_legacy_pci_exit(&driver, &r128_pci_driver); } module_init(r128_init); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index dd5e86dafb29..0d02349674f6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -619,14 +619,13 @@ static int __init radeon_init(void) return -EINVAL; } - /* let modprobe override vga console setting */ - return drm_pci_init(driver, pdriver); + return pci_register_driver(pdriver); } static void __exit radeon_exit(void) { radeon_kfd_fini(); - drm_pci_exit(driver, pdriver); + pci_unregister_driver(pdriver); radeon_unregister_atpx_handler(); } diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index 2a08da09dbcf..2bddeb8bf457 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -74,12 +74,12 @@ static struct pci_driver savage_pci_driver = { static int __init savage_init(void) { driver.num_ioctls = savage_max_ioctl; - return drm_pci_init(&driver, &savage_pci_driver); + return drm_legacy_pci_init(&driver, &savage_pci_driver); } static void __exit savage_exit(void) { - drm_pci_exit(&driver, &savage_pci_driver); + drm_legacy_pci_exit(&driver, &savage_pci_driver); } module_init(savage_init); diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index cdaced381f5d..e04a92658cd7 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -124,12 +124,12 @@ static struct pci_driver sis_pci_driver = { static int __init sis_init(void) { driver.num_ioctls = sis_max_ioctl; - return drm_pci_init(&driver, &sis_pci_driver); + return drm_legacy_pci_init(&driver, &sis_pci_driver); } static void __exit sis_exit(void) { - drm_pci_exit(&driver, &sis_pci_driver); + drm_legacy_pci_exit(&driver, &sis_pci_driver); } module_init(sis_init); diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index acd5f8162bb6..3a1476818c65 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -71,12 +71,12 @@ static struct pci_driver tdfx_pci_driver = { static int __init tdfx_init(void) { - return drm_pci_init(&driver, &tdfx_pci_driver); + return drm_legacy_pci_init(&driver, &tdfx_pci_driver); } static void __exit tdfx_exit(void) { - drm_pci_exit(&driver, &tdfx_pci_driver); + drm_legacy_pci_exit(&driver, &tdfx_pci_driver); } module_init(tdfx_init); diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index 0ca4e0489c0b..aaf766f7cca2 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -106,12 +106,12 @@ static int __init via_init(void) { driver.num_ioctls = via_max_ioctl; via_init_command_verifier(); - return drm_pci_init(&driver, &via_pci_driver); + return drm_legacy_pci_init(&driver, &via_pci_driver); } static void __exit via_exit(void) { - drm_pci_exit(&driver, &via_pci_driver); + drm_legacy_pci_exit(&driver, &via_pci_driver); } module_init(via_init); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 63218033b0be..204bf181b69e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1570,7 +1570,7 @@ static int __init vmwgfx_init(void) if (vgacon_text_force()) return -EINVAL; - ret = drm_pci_init(&driver, &vmw_pci_driver); + ret = pci_register_driver(&vmw_pci_driver); if (ret) DRM_ERROR("Failed initializing DRM.\n"); return ret; @@ -1578,7 +1578,7 @@ static int __init vmwgfx_init(void) static void __exit vmwgfx_exit(void) { - drm_pci_exit(&driver, &vmw_pci_driver); + pci_unregister_driver(&vmw_pci_driver); } module_init(vmwgfx_init); diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h index 961b16f9b553..674599025d7d 100644 --- a/include/drm/drm_pci.h +++ b/include/drm/drm_pci.h @@ -43,8 +43,8 @@ struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, size_t align); void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah); -int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); -void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); +int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); +void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); #ifdef CONFIG_PCI int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, From bb2eaba6458ace16f7e3504de8788374cb42b43d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 31 May 2017 11:20:45 +0200 Subject: [PATCH 0012/1795] drm/doc: Improve ioctl/fops docs a bit more I spotted a markup issue, plus adding the descriptions in drm_driver. Plus a few more links while at it. I'm still mildly unhappy with the split between fops and ioctls, but I still think having the ioctls in the uapi chapter makes more sense. Oh well ... v2: Rebase. v3: Move misplace hunk to the right patch. Cc: Stefan Agner Acked-by: Thierry Reding Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170531092045.3950-1-daniel.vetter@ffwll.ch --- Documentation/gpu/drm-internals.rst | 2 ++ Documentation/gpu/drm-uapi.rst | 2 ++ drivers/gpu/drm/drm_file.c | 7 ++++++- drivers/gpu/drm/drm_ioctl.c | 5 ++++- include/drm/drm_drv.h | 18 ++++++++++++++++++ 5 files changed, 32 insertions(+), 2 deletions(-) diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst index f6882ad0b3c3..bece92258647 100644 --- a/Documentation/gpu/drm-internals.rst +++ b/Documentation/gpu/drm-internals.rst @@ -198,6 +198,8 @@ drivers. Open/Close, File Operations and IOCTLs ====================================== +.. _drm_driver_fops: + File Operations --------------- diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst index 858457567d3d..679373b4a03f 100644 --- a/Documentation/gpu/drm-uapi.rst +++ b/Documentation/gpu/drm-uapi.rst @@ -160,6 +160,8 @@ other hand, a driver requires shared state between clients which is visible to user-space and accessible beyond open-file boundaries, they cannot support render nodes. +.. _drm_driver_ioctl: + IOCTL Support on Device Nodes ============================= diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index caad93dab54b..6631f61b66ca 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -75,7 +75,7 @@ DEFINE_MUTEX(drm_global_mutex); * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap(). * * No other file operations are supported by the DRM userspace API. Overall the - * following is an example #file_operations structure:: + * following is an example &file_operations structure:: * * static const example_drm_fops = { * .owner = THIS_MODULE, @@ -92,6 +92,11 @@ DEFINE_MUTEX(drm_global_mutex); * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this * simpler. + * + * The driver's &file_operations must be stored in &drm_driver.fops. + * + * For driver-private IOCTL handling see the more detailed discussion in + * :ref:`IOCTL support in the userland interfaces chapter`. */ static int drm_open_helper(struct file *filp, struct drm_minor *minor); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index aa49a2241404..3a36b3717c28 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -683,7 +683,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { * * DRM driver private IOCTL must be in the range from DRM_COMMAND_BASE to * DRM_COMMAND_END. Finally you need an array of &struct drm_ioctl_desc to wire - * up the handlers and set the access rights: + * up the handlers and set the access rights:: * * static const struct drm_ioctl_desc my_driver_ioctls[] = { * DRM_IOCTL_DEF_DRV(MY_DRIVER_OPERATION, my_driver_operation, @@ -692,6 +692,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = { * * And then assign this to the &drm_driver.ioctls field in your driver * structure. + * + * See the separate chapter on :ref:`file operations` for how + * the driver-specific IOCTLs are wired up. */ /** diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index e4de59bc52c8..f495eee01302 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -515,8 +515,26 @@ struct drm_driver { char *date; u32 driver_features; + + /** + * @ioctls: + * + * Array of driver-private IOCTL description entries. See the chapter on + * :ref:`IOCTL support in the userland interfaces + * chapter` for the full details. + */ + const struct drm_ioctl_desc *ioctls; + /** @num_ioctls: Number of entries in @ioctls. */ int num_ioctls; + + /** + * @fops: + * + * File operations for the DRM device node. See the discussion in + * :ref:`file operations` for in-depth coverage and + * some examples. + */ const struct file_operations *fops; /* Everything below here is for legacy driver, never use! */ From c50a115b6e916b93f6fee0ede8ec1358b083c840 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Thu, 25 May 2017 15:19:22 +0100 Subject: [PATCH 0013/1795] drm: vc4: Use crtc->mode_valid() and encoder->mode_valid() callbacks Now that we have a callback to check if crtc and encoder supports a given mode we can use it in vc4 so that we restrict the number of probed modes to the ones we can actually display. Also, remove the mode_fixup() calls as these are no longer needed because mode_valid() will be called before. Signed-off-by: Jose Abreu Cc: Carlos Palminha Cc: Daniel Vetter Cc: Eric Anholt Cc: David Airlie Reviewed-by: Neil Armstrong Compile-tested and Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/5bea792f91074688c88a2dab8b0a90eec6e98fdf.1495720737.git.joabreu@synopsys.com --- drivers/gpu/drm/vc4/vc4_crtc.c | 13 ++++++------- drivers/gpu/drm/vc4/vc4_dpi.c | 13 ++++++------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 403bbd5f99a9..e3f03efe719b 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -546,18 +546,17 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) drm_crtc_vblank_on(crtc); } -static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) { /* Do not allow doublescan modes from user space */ - if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) { + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) { DRM_DEBUG_KMS("[CRTC:%d] Doublescan mode rejected.\n", crtc->base.id); - return false; + return MODE_NO_DBLESCAN; } - return true; + return MODE_OK; } static int vc4_crtc_atomic_check(struct drm_crtc *crtc, @@ -867,7 +866,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { .mode_set_nofb = vc4_crtc_mode_set_nofb, .disable = vc4_crtc_disable, .enable = vc4_crtc_enable, - .mode_fixup = vc4_crtc_mode_fixup, + .mode_valid = vc4_crtc_mode_valid, .atomic_check = vc4_crtc_atomic_check, .atomic_flush = vc4_crtc_atomic_flush, }; diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index 2e0fe46aeb2e..519cefef800d 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c @@ -224,20 +224,19 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder) DRM_ERROR("Failed to set clock rate: %d\n", ret); } -static bool vc4_dpi_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static enum drm_mode_status vc4_dpi_encoder_mode_valid(struct drm_encoder *encoder, + const struct drm_display_mode *mode) { - if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) - return false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; - return true; + return MODE_OK; } static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = { .disable = vc4_dpi_encoder_disable, .enable = vc4_dpi_encoder_enable, - .mode_fixup = vc4_dpi_encoder_mode_fixup, + .mode_valid = vc4_dpi_encoder_mode_valid, }; static const struct of_device_id vc4_dpi_dt_match[] = { From d7b66de513179065408908a79cfbe3a11a15e6eb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 20 Jun 2017 11:42:28 +0200 Subject: [PATCH 0014/1795] drm: More links for gamma support helpers It's not obvious that when using the new color manager stuff you still need to wire up the legacy helper to get legacy LUT support. Improve this with more links. drm_crtc_funcs->gamma_set already explains this properly. Cc: Peter Rosin Cc: Boris Brezillon Reviewed-by: Boris Brezillon Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170620094228.4757-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_color_mgmt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index 3eda500fc005..fe0982708e95 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -128,6 +128,9 @@ EXPORT_SYMBOL(drm_color_lut_extract); * optional. The gamma and degamma properties are only attached if * their size is not 0 and ctm_property is only attached if has_ctm is * true. + * + * Drivers should use drm_atomic_helper_legacy_gamma_set() to implement the + * legacy &drm_crtc_funcs.gamma_set callback. */ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, uint degamma_lut_size, From 6daac7990a4451f68400068aa97254b8e4f8d977 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 20 Jun 2017 10:12:11 +0530 Subject: [PATCH 0015/1795] drm: sti: sti_dvo: make of_device_ids const. of_device_ids are not supposed to change at runtime. All functions working with of_device_ids provided by work with const of_device_ids. So mark the non-const structs as const. File size before: text data bss dec hex filename 4222 664 0 4886 1316 drivers/gpu/drm/sti/sti_dvo.o File size after constify dvo_of_match: text data bss dec hex filename 4638 248 0 4886 1316 drivers/gpu/drm/sti/sti_dvo.o Signed-off-by: Arvind Yadav Signed-off-by: Benjamin Gaignard Link: http://patchwork.freedesktop.org/patch/msgid/d5ac3cb4c43338419308d658b9bcb59b7540471e.1497868332.git.arvind.yadav.cs@gmail.com --- drivers/gpu/drm/sti/sti_dvo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 24ebc6b2f34d..a51cd9f754db 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -582,7 +582,7 @@ static int sti_dvo_remove(struct platform_device *pdev) return 0; } -static struct of_device_id dvo_of_match[] = { +static const struct of_device_id dvo_of_match[] = { { .compatible = "st,stih407-dvo", }, { /* end node */ } }; From 4c952eaba7789e0b1f2c146e839fd82c5a290241 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 20 Jun 2017 10:35:34 +0530 Subject: [PATCH 0016/1795] drm: sti: sti_hqvdp: make of_device_ids const. of_device_ids are not supposed to change at runtime. All functions working with of_device_ids provided by work with const of_device_ids. So mark the non-const structs as const. File size before: text data bss dec hex filename 15845 640 0 16485 4065 drivers/gpu/drm/sti/sti_hqvdp.o File size after constify hqvdp_of_match: text data bss dec hex filename 16229 224 0 16453 4045 drivers/gpu/drm/sti/sti_hqvdp.o Signed-off-by: Arvind Yadav Signed-off-by: Benjamin Gaignard Link: http://patchwork.freedesktop.org/patch/msgid/0a6ae44cf7d0fb54380809ae0e52234dbd3f367a.1497934979.git.arvind.yadav.cs@gmail.com --- drivers/gpu/drm/sti/sti_hqvdp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index a1c161f77804..1234f87bce20 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1395,7 +1395,7 @@ static int sti_hqvdp_remove(struct platform_device *pdev) return 0; } -static struct of_device_id hqvdp_of_match[] = { +static const struct of_device_id hqvdp_of_match[] = { { .compatible = "st,stih407-hqvdp", }, { /* end node */ } }; From 33def1ff7b09645f3631059ad9ce23e2c65e9016 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 16 Jun 2017 14:03:38 +0100 Subject: [PATCH 0017/1795] drm/i915: Simplify intel_engines_init We do not want to carry on over missing constructors and don't need a duplicated engine mask checking which is already done in the setup phase. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/intel_engine_cs.c | 36 +++++++++----------------- 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index a4487c5b7e37..3b46c1f7b88b 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -291,11 +291,9 @@ cleanup: */ int intel_engines_init(struct drm_i915_private *dev_priv) { - struct intel_device_info *device_info = mkwrite_device_info(dev_priv); struct intel_engine_cs *engine; enum intel_engine_id id, err_id; - unsigned int mask = 0; - int err = 0; + int err; for_each_engine(engine, dev_priv, id) { const struct engine_class_info *class_info = @@ -306,40 +304,30 @@ int intel_engines_init(struct drm_i915_private *dev_priv) init = class_info->init_execlists; else init = class_info->init_legacy; - if (!init) { - kfree(engine); - dev_priv->engine[id] = NULL; - continue; - } + + err = -EINVAL; + err_id = id; + + if (GEM_WARN_ON(!init)) + goto cleanup; err = init(engine); - if (err) { - err_id = id; + if (err) goto cleanup; - } GEM_BUG_ON(!engine->submit_request); - mask |= ENGINE_MASK(id); } - /* - * Catch failures to update intel_engines table when the new engines - * are added to the driver by a warning and disabling the forgotten - * engines. - */ - if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) - device_info->ring_mask = mask; - - device_info->num_rings = hweight32(mask); - return 0; cleanup: for_each_engine(engine, dev_priv, id) { - if (id >= err_id) + if (id >= err_id) { kfree(engine); - else + dev_priv->engine[id] = NULL; + } else { dev_priv->gt.cleanup_engine(engine); + } } return err; } From c58949f4185020d6d447698818fc7614adec69cf Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 19 Jun 2017 11:59:17 +0100 Subject: [PATCH 0018/1795] drm/i915: Do not re-calculate num_rings locally Since bb8f0f5abdd7 ("drm/i915: Split intel_engine allocation and initialisation") intel_info->num_rings is set early in the load sequence and so available to be used direclty in the 2nd load phase. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170616130339.23015-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/intel_ringbuffer.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index acd1da9b62a3..5224b7abb8a3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2140,7 +2140,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, engine->emit_breadcrumb = gen6_sema_emit_breadcrumb; - num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1; + num_rings = INTEL_INFO(dev_priv)->num_rings - 1; if (INTEL_GEN(dev_priv) >= 8) { engine->emit_breadcrumb_sz += num_rings * 6; } else { @@ -2184,8 +2184,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) engine->semaphore.signal = gen8_rcs_signal; - num_rings = - hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1; + num_rings = INTEL_INFO(dev_priv)->num_rings - 1; engine->emit_breadcrumb_sz += num_rings * 8; } } else if (INTEL_GEN(dev_priv) >= 6) { From d0a2987866b6cc1acd3ed37f9cd52e5c8dced005 Mon Sep 17 00:00:00 2001 From: Liviu Dudau Date: Tue, 20 Jun 2017 11:23:20 +0100 Subject: [PATCH 0019/1795] drm: Convert CMA fbdev console suspend helpers to use bool drm_fbdev_cma_set_suspend{,_unlocked} use an integer parameter to describe whether the intended state is a suspend or a resume. It then passes the value to drm_fb_helper_set_suspend{,_unlocked} which uses a boolean. Switch to using bool everywhere. Signed-off-by: Liviu Dudau Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170620102320.8849-1-Liviu.Dudau@arm.com --- drivers/gpu/drm/drm_fb_cma_helper.c | 4 ++-- include/drm/drm_fb_cma_helper.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 53f9bdf470d7..ade319d10e70 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -640,7 +640,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event); * Calls drm_fb_helper_set_suspend, which is a wrapper around * fb_set_suspend implemented by fbdev core. */ -void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state) +void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state) { if (fbdev_cma) drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state); @@ -657,7 +657,7 @@ EXPORT_SYMBOL(drm_fbdev_cma_set_suspend); * fb_set_suspend implemented by fbdev core. */ void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma, - int state) + bool state) { if (fbdev_cma) drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper, diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index 199a63f48659..a323781afc3f 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -24,9 +24,9 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); -void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state); +void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state); void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma, - int state); + bool state); void drm_fb_cma_destroy(struct drm_framebuffer *fb); int drm_fb_cma_create_handle(struct drm_framebuffer *fb, From 829a0af29f70612f505302cc785a1ddd2bac148b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 20 Jun 2017 12:05:45 +0100 Subject: [PATCH 0020/1795] drm/i915: Group all the global context information together Create a substruct to hold all the global context state under drm_i915_private. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20170620110547.15947-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 4 +-- drivers/gpu/drm/i915/i915_drv.c | 9 ++--- drivers/gpu/drm/i915/i915_drv.h | 20 ++++++----- drivers/gpu/drm/i915/i915_gem.c | 13 ++++--- drivers/gpu/drm/i915/i915_gem_context.c | 35 ++++++++++--------- drivers/gpu/drm/i915/i915_gem_context.h | 14 +++++--- drivers/gpu/drm/i915/i915_perf.c | 2 +- drivers/gpu/drm/i915/i915_sysfs.c | 2 +- drivers/gpu/drm/i915/intel_lrc.c | 2 +- drivers/gpu/drm/i915/selftests/mock_context.c | 2 +- .../gpu/drm/i915/selftests/mock_gem_device.c | 4 +-- 11 files changed, 58 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4577b0af6886..6103d0079d16 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1966,7 +1966,7 @@ static int i915_context_status(struct seq_file *m, void *unused) if (ret) return ret; - list_for_each_entry(ctx, &dev_priv->context_list, link) { + list_for_each_entry(ctx, &dev_priv->contexts.list, link) { seq_printf(m, "HW context %u ", ctx->hw_id); if (ctx->pid) { struct task_struct *task; @@ -2072,7 +2072,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) if (ret) return ret; - list_for_each_entry(ctx, &dev_priv->context_list, link) + list_for_each_entry(ctx, &dev_priv->contexts.list, link) for_each_engine(engine, dev_priv, id) i915_dump_lrc_obj(m, ctx, engine); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ee2325b180e7..36585b6e3718 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -588,13 +588,13 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); intel_uc_fini_hw(dev_priv); i915_gem_cleanup_engines(dev_priv); - i915_gem_context_fini(dev_priv); + i915_gem_contexts_fini(dev_priv); i915_gem_cleanup_userptr(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); i915_gem_drain_freed_objects(dev_priv); - WARN_ON(!list_empty(&dev_priv->context_list)); + WARN_ON(!list_empty(&dev_priv->contexts.list)); } static int i915_load_modeset_init(struct drm_device *dev) @@ -1425,9 +1425,10 @@ static void i915_driver_release(struct drm_device *dev) static int i915_driver_open(struct drm_device *dev, struct drm_file *file) { + struct drm_i915_private *i915 = to_i915(dev); int ret; - ret = i915_gem_open(dev, file); + ret = i915_gem_open(i915, file); if (ret) return ret; @@ -1457,7 +1458,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) struct drm_i915_file_private *file_priv = file->driver_priv; mutex_lock(&dev->struct_mutex); - i915_gem_context_close(dev, file); + i915_gem_context_close(file); i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e2d2b785cb65..fb627df0fa87 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2235,13 +2235,6 @@ struct drm_i915_private { DECLARE_HASHTABLE(mm_structs, 7); struct mutex mm_lock; - /* The hw wants to have a stable context identifier for the lifetime - * of the context (for OA, PASID, faults, etc). This is limited - * in execlists to 21 bits. - */ - struct ida context_hw_ida; -#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ - /* Kernel Modesetting */ struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; @@ -2320,7 +2313,16 @@ struct drm_i915_private { */ struct mutex av_mutex; - struct list_head context_list; + struct { + struct list_head list; + + /* The hw wants to have a stable context identifier for the + * lifetime of the context (for OA, PASID, faults, etc). + * This is limited in execlists to 21 bits. + */ + struct ida hw_ida; +#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ + } contexts; u32 fdi_rx_config; @@ -3498,7 +3500,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); -int i915_gem_open(struct drm_device *dev, struct drm_file *file); +int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7dcac3bfb771..c2213016fd86 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3098,7 +3098,7 @@ void i915_gem_set_wedged(struct drm_i915_private *dev_priv) stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); - i915_gem_context_lost(dev_priv); + i915_gem_contexts_lost(dev_priv); mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); } @@ -4564,7 +4564,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) goto err_unlock; assert_kernel_context_is_current(dev_priv); - i915_gem_context_lost(dev_priv); + i915_gem_contexts_lost(dev_priv); mutex_unlock(&dev->struct_mutex); intel_guc_suspend(dev_priv); @@ -4811,7 +4811,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) goto out_unlock; - ret = i915_gem_context_init(dev_priv); + ret = i915_gem_contexts_init(dev_priv); if (ret) goto out_unlock; @@ -4921,7 +4921,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) if (err) goto err_priorities; - INIT_LIST_HEAD(&dev_priv->context_list); INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); init_llist_head(&dev_priv->mm.free_list); INIT_LIST_HEAD(&dev_priv->mm.unbound_list); @@ -5045,7 +5044,7 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) } } -int i915_gem_open(struct drm_device *dev, struct drm_file *file) +int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv; int ret; @@ -5057,7 +5056,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) return -ENOMEM; file->driver_priv = file_priv; - file_priv->dev_priv = to_i915(dev); + file_priv->dev_priv = i915; file_priv->file = file; INIT_LIST_HEAD(&file_priv->rps.link); @@ -5066,7 +5065,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) file_priv->bsd_engine = -1; - ret = i915_gem_context_open(dev, file); + ret = i915_gem_context_open(i915, file); if (ret) kfree(file_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 39ed58a21fc1..7a6a667c23ec 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -188,7 +188,7 @@ void i915_gem_context_free(struct kref *ctx_ref) list_del(&ctx->link); - ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); + ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); kfree(ctx); } @@ -205,7 +205,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) { int ret; - ret = ida_simple_get(&dev_priv->context_hw_ida, + ret = ida_simple_get(&dev_priv->contexts.hw_ida, 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); if (ret < 0) { /* Contexts are only released when no longer active. @@ -213,7 +213,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) * stale contexts and try again. */ i915_gem_retire_requests(dev_priv); - ret = ida_simple_get(&dev_priv->context_hw_ida, + ret = ida_simple_get(&dev_priv->contexts.hw_ida, 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); if (ret < 0) return ret; @@ -265,7 +265,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, } kref_init(&ctx->ref); - list_add_tail(&ctx->link, &dev_priv->context_list); + list_add_tail(&ctx->link, &dev_priv->contexts.list); ctx->i915 = dev_priv; ctx->priority = I915_PRIORITY_NORMAL; @@ -418,7 +418,7 @@ out: return ctx; } -int i915_gem_context_init(struct drm_i915_private *dev_priv) +int i915_gem_contexts_init(struct drm_i915_private *dev_priv) { struct i915_gem_context *ctx; @@ -427,6 +427,8 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv) if (WARN_ON(dev_priv->kernel_context)) return 0; + INIT_LIST_HEAD(&dev_priv->contexts.list); + if (intel_vgpu_active(dev_priv) && HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { if (!i915.enable_execlists) { @@ -437,7 +439,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv) /* Using the simple ida interface, the max is limited by sizeof(int) */ BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); - ida_init(&dev_priv->context_hw_ida); + ida_init(&dev_priv->contexts.hw_ida); ctx = i915_gem_create_context(dev_priv, NULL); if (IS_ERR(ctx)) { @@ -463,7 +465,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv) return 0; } -void i915_gem_context_lost(struct drm_i915_private *dev_priv) +void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -484,7 +486,7 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv) if (!i915.enable_execlists) { struct i915_gem_context *ctx; - list_for_each_entry(ctx, &dev_priv->context_list, link) { + list_for_each_entry(ctx, &dev_priv->contexts.list, link) { if (!i915_gem_context_is_default(ctx)) continue; @@ -503,7 +505,7 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv) } } -void i915_gem_context_fini(struct drm_i915_private *dev_priv) +void i915_gem_contexts_fini(struct drm_i915_private *dev_priv) { struct i915_gem_context *dctx = dev_priv->kernel_context; @@ -514,7 +516,7 @@ void i915_gem_context_fini(struct drm_i915_private *dev_priv) context_close(dctx); dev_priv->kernel_context = NULL; - ida_destroy(&dev_priv->context_hw_ida); + ida_destroy(&dev_priv->contexts.hw_ida); } static int context_idr_cleanup(int id, void *p, void *data) @@ -525,16 +527,17 @@ static int context_idr_cleanup(int id, void *p, void *data) return 0; } -int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) +int i915_gem_context_open(struct drm_i915_private *i915, + struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_context *ctx; idr_init(&file_priv->context_idr); - mutex_lock(&dev->struct_mutex); - ctx = i915_gem_create_context(to_i915(dev), file_priv); - mutex_unlock(&dev->struct_mutex); + mutex_lock(&i915->drm.struct_mutex); + ctx = i915_gem_create_context(i915, file_priv); + mutex_unlock(&i915->drm.struct_mutex); GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); @@ -546,11 +549,11 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) return 0; } -void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) +void i915_gem_context_close(struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - lockdep_assert_held(&dev->struct_mutex); + lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_destroy(&file_priv->context_idr); diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 82c99ba92ad3..808f878db812 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -273,13 +273,17 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) } /* i915_gem_context.c */ -int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv); -void i915_gem_context_lost(struct drm_i915_private *dev_priv); -void i915_gem_context_fini(struct drm_i915_private *dev_priv); -int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); -void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); +int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv); +void i915_gem_contexts_lost(struct drm_i915_private *dev_priv); +void i915_gem_contexts_fini(struct drm_i915_private *dev_priv); + +int i915_gem_context_open(struct drm_i915_private *i915, + struct drm_file *file); +void i915_gem_context_close(struct drm_file *file); + int i915_switch_context(struct drm_i915_gem_request *req); int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); + void i915_gem_context_free(struct kref *ctx_ref); struct i915_gem_context * i915_gem_context_create_gvt(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 38c44407bafc..d1771e8fe4a8 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1746,7 +1746,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, goto out; /* Update all contexts now that we've stalled the submission. */ - list_for_each_entry(ctx, &dev_priv->context_list, link) { + list_for_each_entry(ctx, &dev_priv->contexts.list, link) { struct intel_context *ce = &ctx->engine[RCS]; u32 *regs; diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 1eef3fae4db3..3a481062f219 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -209,7 +209,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj, memcpy(*remap_info + (offset/4), buf, count); /* NB: We defer the remapping until we switch to the context */ - list_for_each_entry(ctx, &dev_priv->context_list, link) + list_for_each_entry(ctx, &dev_priv->contexts.list, link) ctx->remap_slice |= (1<context_list, link) { + list_for_each_entry(ctx, &dev_priv->contexts.list, link) { for_each_engine(engine, dev_priv, id) { struct intel_context *ce = &ctx->engine[engine->id]; u32 *reg; diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index f8b9cc212b02..243325b97d4c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -48,7 +48,7 @@ mock_context(struct drm_i915_private *i915, if (!ctx->vma_lut.ht) goto err_free; - ret = ida_simple_get(&i915->context_hw_ida, + ret = ida_simple_get(&i915->contexts.hw_ida, 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); if (ret < 0) goto err_vma_ht; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 627e2aa09766..0ddb70a16550 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -61,7 +61,7 @@ static void mock_device_release(struct drm_device *dev) mutex_lock(&i915->drm.struct_mutex); for_each_engine(engine, i915, id) mock_engine_free(engine); - i915_gem_context_fini(i915); + i915_gem_contexts_fini(i915); mutex_unlock(&i915->drm.struct_mutex); drain_workqueue(i915->wq); @@ -160,7 +160,7 @@ struct drm_i915_private *mock_gem_device(void) INIT_LIST_HEAD(&i915->mm.unbound_list); INIT_LIST_HEAD(&i915->mm.bound_list); - ida_init(&i915->context_hw_ida); + ida_init(&i915->contexts.hw_ida); INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler); INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler); From 5f09a9c8ab6b16eefbcf81635330d68481af1edc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 20 Jun 2017 12:05:46 +0100 Subject: [PATCH 0021/1795] drm/i915: Allow contexts to be unreferenced locklessly If we move the actual cleanup of the context to a worker, we can allow the final free to be called from any context and avoid undue latency in the caller. v2: Negotiate handling the delayed contexts free by flushing the workqueue before calling i915_gem_context_fini() and performing the final free of the kernel context directly v3: Flush deferred frees before new context allocations Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20170620110547.15947-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gvt/scheduler.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 2 + drivers/gpu/drm/i915/i915_drv.h | 23 +------- drivers/gpu/drm/i915/i915_gem_context.c | 59 +++++++++++++++---- drivers/gpu/drm/i915/i915_gem_context.h | 15 ++++- drivers/gpu/drm/i915/i915_perf.c | 4 +- drivers/gpu/drm/i915/selftests/i915_vma.c | 8 ++- drivers/gpu/drm/i915/selftests/mock_context.c | 9 +++ drivers/gpu/drm/i915/selftests/mock_context.h | 2 + .../gpu/drm/i915/selftests/mock_gem_device.c | 3 +- 10 files changed, 88 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 488fdea348a9..2b8a80c9c18b 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -620,7 +620,7 @@ err: void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) { - i915_gem_context_put_unlocked(vgpu->shadow_ctx); + i915_gem_context_put(vgpu->shadow_ctx); } int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 36585b6e3718..fe3d46ee4ddc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -585,6 +585,8 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { static void i915_gem_fini(struct drm_i915_private *dev_priv) { + flush_workqueue(dev_priv->wq); + mutex_lock(&dev_priv->drm.struct_mutex); intel_uc_fini_hw(dev_priv); i915_gem_cleanup_engines(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fb627df0fa87..0f1330adc37b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2315,6 +2315,8 @@ struct drm_i915_private { struct { struct list_head list; + struct llist_head free_list; + struct work_struct free_work; /* The hw wants to have a stable context identifier for the * lifetime of the context (for OA, PASID, faults, etc). @@ -3545,27 +3547,6 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) return ctx; } -static inline struct i915_gem_context * -i915_gem_context_get(struct i915_gem_context *ctx) -{ - kref_get(&ctx->ref); - return ctx; -} - -static inline void i915_gem_context_put(struct i915_gem_context *ctx) -{ - lockdep_assert_held(&ctx->i915->drm.struct_mutex); - kref_put(&ctx->ref, i915_gem_context_free); -} - -static inline void i915_gem_context_put_unlocked(struct i915_gem_context *ctx) -{ - struct mutex *lock = &ctx->i915->drm.struct_mutex; - - if (kref_put_mutex(&ctx->ref, i915_gem_context_free, lock)) - mutex_unlock(lock); -} - static inline struct intel_timeline * i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 7a6a667c23ec..9cf96380af9f 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -158,13 +158,11 @@ static void vma_lut_free(struct i915_gem_context *ctx) kvfree(lut->ht); } -void i915_gem_context_free(struct kref *ctx_ref) +static void i915_gem_context_free(struct i915_gem_context *ctx) { - struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); int i; lockdep_assert_held(&ctx->i915->drm.struct_mutex); - trace_i915_context_free(ctx); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); vma_lut_free(ctx); @@ -192,6 +190,37 @@ void i915_gem_context_free(struct kref *ctx_ref) kfree(ctx); } +static void contexts_free(struct drm_i915_private *i915) +{ + struct llist_node *freed = llist_del_all(&i915->contexts.free_list); + struct i915_gem_context *ctx; + + lockdep_assert_held(&i915->drm.struct_mutex); + + llist_for_each_entry(ctx, freed, free_link) + i915_gem_context_free(ctx); +} + +static void contexts_free_worker(struct work_struct *work) +{ + struct drm_i915_private *i915 = + container_of(work, typeof(*i915), contexts.free_work); + + mutex_lock(&i915->drm.struct_mutex); + contexts_free(i915); + mutex_unlock(&i915->drm.struct_mutex); +} + +void i915_gem_context_release(struct kref *ref) +{ + struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); + struct drm_i915_private *i915 = ctx->i915; + + trace_i915_context_free(ctx); + if (llist_add(&ctx->free_link, &i915->contexts.free_list)) + queue_work(i915->wq, &i915->contexts.free_work); +} + static void context_close(struct i915_gem_context *ctx) { i915_gem_context_set_closed(ctx); @@ -428,6 +457,8 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) return 0; INIT_LIST_HEAD(&dev_priv->contexts.list); + INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); + init_llist_head(&dev_priv->contexts.free_list); if (intel_vgpu_active(dev_priv) && HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { @@ -505,18 +536,20 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) } } -void i915_gem_contexts_fini(struct drm_i915_private *dev_priv) +void i915_gem_contexts_fini(struct drm_i915_private *i915) { - struct i915_gem_context *dctx = dev_priv->kernel_context; + struct i915_gem_context *ctx; - lockdep_assert_held(&dev_priv->drm.struct_mutex); + lockdep_assert_held(&i915->drm.struct_mutex); - GEM_BUG_ON(!i915_gem_context_is_kernel(dctx)); + /* Keep the context so that we can free it immediately ourselves */ + ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context)); + GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + context_close(ctx); + i915_gem_context_free(ctx); - context_close(dctx); - dev_priv->kernel_context = NULL; - - ida_destroy(&dev_priv->contexts.hw_ida); + /* Must free all deferred contexts (via flush_workqueue) first */ + ida_destroy(&i915->contexts.hw_ida); } static int context_idr_cleanup(int id, void *p, void *data) @@ -957,6 +990,10 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (ret) return ret; + /* Reap stale contexts */ + i915_gem_retire_requests(dev_priv); + contexts_free(dev_priv); + ctx = i915_gem_create_context(dev_priv, file_priv); mutex_unlock(&dev->struct_mutex); if (IS_ERR(ctx)) diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 808f878db812..61146f4aa168 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -86,6 +86,7 @@ struct i915_gem_context { /** link: place with &drm_i915_private.context_list */ struct list_head link; + struct llist_node free_link; /** * @ref: reference count @@ -284,7 +285,7 @@ void i915_gem_context_close(struct drm_file *file); int i915_switch_context(struct drm_i915_gem_request *req); int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); -void i915_gem_context_free(struct kref *ctx_ref); +void i915_gem_context_release(struct kref *ctx_ref); struct i915_gem_context * i915_gem_context_create_gvt(struct drm_device *dev); @@ -299,4 +300,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +static inline struct i915_gem_context * +i915_gem_context_get(struct i915_gem_context *ctx) +{ + kref_get(&ctx->ref); + return ctx; +} + +static inline void i915_gem_context_put(struct i915_gem_context *ctx) +{ + kref_put(&ctx->ref, i915_gem_context_release); +} + #endif /* !__I915_GEM_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index d1771e8fe4a8..afd8260cd096 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2444,7 +2444,7 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream) list_del(&stream->link); if (stream->ctx) - i915_gem_context_put_unlocked(stream->ctx); + i915_gem_context_put(stream->ctx); kfree(stream); } @@ -2633,7 +2633,7 @@ err_alloc: kfree(stream); err_ctx: if (specific_ctx) - i915_gem_context_put_unlocked(specific_ctx); + i915_gem_context_put(specific_ctx); err: return ret; } diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index fb9072d5877f..2e86ec136b35 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -186,16 +186,20 @@ static int igt_vma_create(void *arg) goto end; } - list_for_each_entry_safe(ctx, cn, &contexts, link) + list_for_each_entry_safe(ctx, cn, &contexts, link) { + list_del_init(&ctx->link); mock_context_close(ctx); + } } end: /* Final pass to lookup all created contexts */ err = create_vmas(i915, &objects, &contexts); out: - list_for_each_entry_safe(ctx, cn, &contexts, link) + list_for_each_entry_safe(ctx, cn, &contexts, link) { + list_del_init(&ctx->link); mock_context_close(ctx); + } list_for_each_entry_safe(obj, on, &objects, st_link) i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index 243325b97d4c..9c7c68181f82 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -86,3 +86,12 @@ void mock_context_close(struct i915_gem_context *ctx) i915_gem_context_put(ctx); } + +void mock_init_contexts(struct drm_i915_private *i915) +{ + INIT_LIST_HEAD(&i915->contexts.list); + ida_init(&i915->contexts.hw_ida); + + INIT_WORK(&i915->contexts.free_work, contexts_free_worker); + init_llist_head(&i915->contexts.free_list); +} diff --git a/drivers/gpu/drm/i915/selftests/mock_context.h b/drivers/gpu/drm/i915/selftests/mock_context.h index 2427e5c0916a..383941a61124 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.h +++ b/drivers/gpu/drm/i915/selftests/mock_context.h @@ -25,6 +25,8 @@ #ifndef __MOCK_CONTEXT_H #define __MOCK_CONTEXT_H +void mock_init_contexts(struct drm_i915_private *i915); + struct i915_gem_context * mock_context(struct drm_i915_private *i915, const char *name); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 0ddb70a16550..47613d20bba8 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -57,6 +57,7 @@ static void mock_device_release(struct drm_device *dev) cancel_delayed_work_sync(&i915->gt.retire_work); cancel_delayed_work_sync(&i915->gt.idle_work); + flush_workqueue(i915->wq); mutex_lock(&i915->drm.struct_mutex); for_each_engine(engine, i915, id) @@ -160,7 +161,7 @@ struct drm_i915_private *mock_gem_device(void) INIT_LIST_HEAD(&i915->mm.unbound_list); INIT_LIST_HEAD(&i915->mm.bound_list); - ida_init(&i915->contexts.hw_ida); + mock_init_contexts(i915); INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler); INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler); From 1acfc104cdf8a3408f0e83b4115d4419c6315005 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 20 Jun 2017 12:05:47 +0100 Subject: [PATCH 0022/1795] drm/i915: Enable rcu-only context lookups Whilst the contents of the context is still protected by the big struct_mutex, this is not much of an improvement. It is just one tiny step towards reducing our BKL. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20170620110547.15947-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.h | 16 +++-- drivers/gpu/drm/i915/i915_gem_context.c | 77 +++++++++++----------- drivers/gpu/drm/i915/i915_gem_context.h | 5 ++ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 21 +++--- 4 files changed, 64 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0f1330adc37b..69219b5d1198 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3533,16 +3533,22 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, struct sg_table *pages); +static inline struct i915_gem_context * +__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id) +{ + return idr_find(&file_priv->context_idr, id); +} + static inline struct i915_gem_context * i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) { struct i915_gem_context *ctx; - lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); - - ctx = idr_find(&file_priv->context_idr, id); - if (!ctx) - return ERR_PTR(-ENOENT); + rcu_read_lock(); + ctx = __i915_gem_context_lookup_rcu(file_priv, id); + if (ctx && !kref_get_unless_zero(&ctx->ref)) + ctx = NULL; + rcu_read_unlock(); return ctx; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 9cf96380af9f..71d2ea7dab64 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -187,7 +187,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) list_del(&ctx->link); ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); - kfree(ctx); + kfree_rcu(ctx, rcu); } static void contexts_free(struct drm_i915_private *i915) @@ -1021,20 +1021,19 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) return -ENOENT; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } + if (!ctx) + return -ENOENT; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + goto out; __destroy_hw_context(ctx, file_priv); mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("HW context %d destroyed\n", args->ctx_id); +out: + i915_gem_context_put(ctx); return 0; } @@ -1044,17 +1043,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; struct i915_gem_context *ctx; - int ret; - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; + int ret = 0; ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } + if (!ctx) + return -ENOENT; args->size = 0; switch (args->param) { @@ -1082,8 +1075,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, ret = -EINVAL; break; } - mutex_unlock(&dev->struct_mutex); + i915_gem_context_put(ctx); return ret; } @@ -1095,15 +1088,13 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, struct i915_gem_context *ctx; int ret; + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); + if (!ctx) + return -ENOENT; + ret = i915_mutex_lock_interruptible(dev); if (ret) - return ret; - - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } + goto out; switch (args->param) { case I915_CONTEXT_PARAM_BAN_PERIOD: @@ -1141,6 +1132,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, } mutex_unlock(&dev->struct_mutex); +out: + i915_gem_context_put(ctx); return ret; } @@ -1155,27 +1148,31 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, if (args->flags || args->pad) return -EINVAL; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; + ret = -ENOENT; + rcu_read_lock(); + ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); + if (!ctx) + goto out; - ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } + /* + * We opt for unserialised reads here. This may result in tearing + * in the extremely unlikely event of a GPU hang on this context + * as we are querying them. If we need that extra layer of protection, + * we should wrap the hangstats with a seqlock. + */ if (capable(CAP_SYS_ADMIN)) args->reset_count = i915_reset_count(&dev_priv->gpu_error); else args->reset_count = 0; - args->batch_active = ctx->guilty_count; - args->batch_pending = ctx->active_count; + args->batch_active = READ_ONCE(ctx->guilty_count); + args->batch_pending = READ_ONCE(ctx->active_count); - mutex_unlock(&dev->struct_mutex); - - return 0; + ret = 0; +out: + rcu_read_unlock(); + return ret; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 61146f4aa168..04320f80f9f4 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -99,6 +99,11 @@ struct i915_gem_context { */ struct kref ref; + /** + * @rcu: rcu_head for deferred freeing. + */ + struct rcu_head rcu; + /** * @flags: small set of booleans */ diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index eb46dfa374a7..35d1f8e8906e 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -669,16 +669,17 @@ static int eb_select_context(struct i915_execbuffer *eb) struct i915_gem_context *ctx; ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); - if (unlikely(IS_ERR(ctx))) - return PTR_ERR(ctx); + if (unlikely(!ctx)) + return -ENOENT; if (unlikely(i915_gem_context_is_banned(ctx))) { DRM_DEBUG("Context %u tried to submit while banned\n", ctx->user_handle); + i915_gem_context_put(ctx); return -EIO; } - eb->ctx = i915_gem_context_get(ctx); + eb->ctx = ctx; eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base; eb->context_flags = 0; @@ -2127,7 +2128,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) args->flags |= __EXEC_HAS_RELOC; eb.exec = exec; - eb.ctx = NULL; eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; if (USES_FULL_PPGTT(eb.i915)) eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT; @@ -2182,6 +2182,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (eb_create(&eb)) return -ENOMEM; + err = eb_select_context(&eb); + if (unlikely(err)) + goto err_destroy; + /* * Take a local wakeref for preparing to dispatch the execbuf as * we expect to access the hardware fairly frequently in the @@ -2190,14 +2194,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, * 100ms. */ intel_runtime_pm_get(eb.i915); + err = i915_mutex_lock_interruptible(dev); if (err) goto err_rpm; - err = eb_select_context(&eb); - if (unlikely(err)) - goto err_unlock; - err = eb_relocate(&eb); if (err) /* @@ -2333,11 +2334,11 @@ err_batch_unpin: err_vma: if (eb.exec) eb_release_vmas(&eb); - i915_gem_context_put(eb.ctx); -err_unlock: mutex_unlock(&dev->struct_mutex); err_rpm: intel_runtime_pm_put(eb.i915); + i915_gem_context_put(eb.ctx); +err_destroy: eb_destroy(&eb); if (out_fence_fd != -1) put_unused_fd(out_fence_fd); From d5367307d409e7476ce18cada304da80949b57c7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 20 Jun 2017 10:57:43 +0100 Subject: [PATCH 0023/1795] drm/i915: Wait for concurrent global resets to complete If we enter i915_handle_error() a second time and a global reset is already in progress, we can simply wait for completion of the first reset. Currently we exit early prior to the actual reset being performed -- the worst of both worlds! v2: Plug into the existing reset_queue, and remember that kselftests is playing games with I915_RESET_BACKOFF to prevent hangcheck from screwing up. v3: Rename to i915_reset_device to fit in better with i915_reset_engine Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Michel Thierry Reviewed-by: Michel Thierry Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_irq.c | 23 +++++++++---------- .../gpu/drm/i915/selftests/intel_hangcheck.c | 4 ++++ 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4cd9ee1ba332..8e9f4378b5a7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2600,13 +2600,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } /** - * i915_reset_and_wakeup - do process context error handling work + * i915_reset_device - do process context error handling work * @dev_priv: i915 device private * * Fire an error uevent so userspace can see that a hang or error * was detected. */ -static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) +static void i915_reset_device(struct drm_i915_private *dev_priv) { struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; @@ -2646,13 +2646,6 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); - - /* - * Note: The wake_up also serves as a memory barrier so that - * waiters see the updated value of the dev_priv->gpu_error. - */ - clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); - wake_up_all(&dev_priv->gpu_error.reset_queue); } static inline void @@ -2744,11 +2737,17 @@ void i915_handle_error(struct drm_i915_private *dev_priv, if (!engine_mask) goto out; - if (test_and_set_bit(I915_RESET_BACKOFF, - &dev_priv->gpu_error.flags)) + if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { + wait_event(dev_priv->gpu_error.reset_queue, + !test_bit(I915_RESET_BACKOFF, + &dev_priv->gpu_error.flags)); goto out; + } - i915_reset_and_wakeup(dev_priv); + i915_reset_device(dev_priv); + + clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); + wake_up_all(&dev_priv->gpu_error.reset_queue); out: intel_runtime_pm_put(dev_priv); diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index aa31d6c0cdfb..cc00a361f0fa 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -316,6 +316,8 @@ static int igt_global_reset(void *arg) GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); + wake_up_all(&i915->gpu_error.reset_queue); + if (i915_terminally_wedged(&i915->gpu_error)) err = -EIO; @@ -404,6 +406,7 @@ fini: unlock: mutex_unlock(&i915->drm.struct_mutex); clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); + wake_up_all(&i915->gpu_error.reset_queue); if (i915_terminally_wedged(&i915->gpu_error)) return -EIO; @@ -519,6 +522,7 @@ fini: unlock: mutex_unlock(&i915->drm.struct_mutex); clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); + wake_up_all(&i915->gpu_error.reset_queue); if (i915_terminally_wedged(&i915->gpu_error)) return -EIO; From c64992e035d7cb2b469f933e33ee89625df97df5 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Tue, 20 Jun 2017 10:57:44 +0100 Subject: [PATCH 0024/1795] drm/i915: Look for active requests earlier in the reset path And store the active request so that we only search for it once. v2: Check for request completion inside _prepare_engine, don't use ECANCELED, remove unnecessary null checks (Chris). v3: Capture active requests during reset_prepare and store it the engine hangcheck obj. v4: Rename commit, change i915_gem_reset_request to just confirm the active_request is still incomplete, instead of engine_stalled (Chris). v5: With style; pass the active request to gem_reset_engine, keep single return in reset_prepare_engine (Chris). v6: Moved before reset-engine code appears (Chris) Suggested-by: Chris Wilson Reviewed-by: Chris Wilson (v5) Signed-off-by: Michel Thierry Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-2-michel.thierry@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 15 ++++++++------- drivers/gpu/drm/i915/intel_ringbuffer.h | 1 + 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c2213016fd86..37d1cbf82beb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2839,7 +2839,7 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) /* Ensure irq handler finishes, and not run again. */ for_each_engine(engine, dev_priv, id) { - struct drm_i915_gem_request *request; + struct drm_i915_gem_request *request = NULL; /* Prevent the signaler thread from updating the request * state (by calling dma_fence_signal) as we are processing @@ -2871,6 +2871,8 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) if (request && request->fence.error == -EIO) err = -EIO; /* Previous reset failed! */ } + + engine->hangcheck.active_request = request; } i915_gem_revoke_fences(dev_priv); @@ -2924,7 +2926,7 @@ static void engine_skip_context(struct drm_i915_gem_request *request) static bool i915_gem_reset_request(struct drm_i915_gem_request *request) { /* Read once and return the resolution */ - const bool guilty = engine_stalled(request->engine); + const bool guilty = !i915_gem_request_completed(request); /* The guilty request will get skipped on a hung engine. * @@ -2958,11 +2960,9 @@ static bool i915_gem_reset_request(struct drm_i915_gem_request *request) return guilty; } -static void i915_gem_reset_engine(struct intel_engine_cs *engine) +static void i915_gem_reset_engine(struct intel_engine_cs *engine, + struct drm_i915_gem_request *request) { - struct drm_i915_gem_request *request; - - request = i915_gem_find_active_request(engine); if (request && i915_gem_reset_request(request)) { DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", engine->name, request->global_seqno); @@ -2988,7 +2988,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) for_each_engine(engine, dev_priv, id) { struct i915_gem_context *ctx; - i915_gem_reset_engine(engine); + i915_gem_reset_engine(engine, engine->hangcheck.active_request); ctx = fetch_and_zero(&engine->last_retired_context); if (ctx) engine->context_unpin(engine, ctx); @@ -3012,6 +3012,7 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) lockdep_assert_held(&dev_priv->drm.struct_mutex); for_each_engine(engine, dev_priv, id) { + engine->hangcheck.active_request = NULL; tasklet_enable(&engine->irq_tasklet); kthread_unpark(engine->breadcrumbs.signaler); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6aa20ac8cde3..d33c93444c0d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -121,6 +121,7 @@ struct intel_engine_hangcheck { unsigned long action_timestamp; int deadlock; struct intel_instdone instdone; + struct drm_i915_gem_request *active_request; bool stalled; }; From ed35dd7b259f10bff34e64847995f291ae8b490c Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Tue, 20 Jun 2017 10:57:45 +0100 Subject: [PATCH 0025/1795] drm/i915: Update i915.reset to handle engine resets In preparation for engine reset work update this parameter to handle more than one type of reset. Default at the moment is still full gpu reset. Cc: Chris Wilson Cc: Mika Kuoppala Signed-off-by: Arun Siluvery Signed-off-by: Michel Thierry Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-3-michel.thierry@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_params.c | 6 +++--- drivers/gpu/drm/i915/i915_params.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index b6a7e363d076..045cadb77285 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -46,7 +46,7 @@ struct i915_params i915 __read_mostly = { .prefault_disable = 0, .load_detect_test = 0, .force_reset_modeset_test = 0, - .reset = true, + .reset = 1, .error_capture = true, .invert_brightness = 0, .disable_display = 0, @@ -115,8 +115,8 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type, "Override/Ignore selection of SDVO panel mode in the VBT " "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); -module_param_named_unsafe(reset, i915.reset, bool, 0600); -MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); +module_param_named_unsafe(reset, i915.reset, int, 0600); +MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset [default], 2=engine reset)"); #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) module_param_named(error_capture, i915.error_capture, bool, 0600); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 34148cc8637c..febbfdbd30bd 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -51,6 +51,7 @@ func(int, use_mmio_flip); \ func(int, mmio_debug); \ func(int, edp_vswing); \ + func(int, reset); \ func(unsigned int, inject_load_failure); \ /* leave bools at the end to not create holes */ \ func(bool, alpha_support); \ @@ -60,7 +61,6 @@ func(bool, prefault_disable); \ func(bool, load_detect_test); \ func(bool, force_reset_modeset_test); \ - func(bool, reset); \ func(bool, error_capture); \ func(bool, disable_display); \ func(bool, verbose_state_checks); \ From 142bc7d99bcfd17a9bc66b46eb1b5d1b93364549 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Tue, 20 Jun 2017 10:57:46 +0100 Subject: [PATCH 0026/1795] drm/i915: Modify error handler for per engine hang recovery This is a preparatory patch which modifies error handler to do per engine hang recovery. The actual patch which implements this sequence follows later in the series. The aim is to prepare existing recovery function to adapt to this new function where applicable (which fails at this point because core implementation is lacking) and continue recovery using legacy full gpu reset. A helper function is also added to query the availability of engine reset. A subsequent patch will add the capability to query which type of reset is present (engine -> full -> no-reset) via the get-param ioctl. It has been decided that the error events that are used to notify user of reset will only be sent in case if full chip reset. In case of just single (or multiple) engine resets, userspace won't be notified by these events. Note that this implementation of engine reset is for i915 directly submitting to the ELSP, where the driver manages the hang detection, recovery and resubmission. With GuC submission these tasks are shared between driver and firmware; i915 will still responsible for detecting a hang, and when it does it will have to request GuC to reset that Engine and remind the firmware about the outstanding submissions. This will be added in different patch. v2: rebase, advertise engine reset availability in platform definition, add note about GuC submission. v3: s/*engine_reset*/*reset_engine*/. (Chris) Handle reset as 2 level resets, by first going to engine only and fall backing to full/chip reset as needed, i.e. reset_engine will need the struct_mutex. v4: Pass the engine mask to i915_reset. (Chris) v5: Rebase, update selftests. v6: Rebase, prepare for mutex-less reset engine. v7: Pass reset_engine mask as a function parameter, and iterate over the engine mask for reset_engine. (Chris) v8: Use i915.reset >=2 in has_reset_engine; remove redundant reset logging; add a reset-engine-in-progress flag to prevent concurrent resets, and avoid dual purposing of reset-backoff. (Chris) v9: Support reset of different engines in parallel (Chris) v10: Handle reset-engine flag locking better (Chris) v11: Squash in reporting of per-engine-reset availability. Cc: Chris Wilson Cc: Mika Kuoppala Signed-off-by: Ian Lister Signed-off-by: Tomas Elf Signed-off-by: Arun Siluvery Signed-off-by: Michel Thierry Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-4-michel.thierry@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 15 ++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 10 ++++++++ drivers/gpu/drm/i915/i915_irq.c | 38 +++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_pci.c | 5 +++- drivers/gpu/drm/i915/intel_uncore.c | 11 +++++++++ 5 files changed, 78 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index fe3d46ee4ddc..e5b31e29382c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -331,6 +331,8 @@ static int i915_getparam(struct drm_device *dev, void *data, break; case I915_PARAM_HAS_GPU_RESET: value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); + if (value && intel_has_reset_engine(dev_priv)) + value = 2; break; case I915_PARAM_HAS_RESOURCE_STREAMER: value = HAS_RESOURCE_STREAMER(dev_priv); @@ -1915,6 +1917,19 @@ error: goto finish; } +/** + * i915_reset_engine - reset GPU engine to recover from a hang + * @engine: engine to reset + * + * Reset a specific GPU engine. Useful if a hang is detected. + * Returns zero on successful reset or otherwise an error code. + */ +int i915_reset_engine(struct intel_engine_cs *engine) +{ + /* FIXME: replace me with engine reset sequence */ + return -ENODEV; +} + static int i915_pm_suspend(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 69219b5d1198..4220abe1f28b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -752,6 +752,7 @@ struct intel_csr { func(has_csr); \ func(has_ddi); \ func(has_dp_mst); \ + func(has_reset_engine); \ func(has_fbc); \ func(has_fpga_dbg); \ func(has_full_ppgtt); \ @@ -1549,6 +1550,12 @@ struct i915_gpu_error { * inspect the bit and do the reset directly, otherwise the worker * waits for the struct_mutex. * + * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to + * acquire the struct_mutex to reset an engine, we need an explicit + * flag to prevent two concurrent reset attempts in the same engine. + * As the number of engines continues to grow, allocate the flags from + * the most significant bits. + * * #I915_WEDGED - If reset fails and we can no longer use the GPU, * we set the #I915_WEDGED bit. Prior to command submission, e.g. * i915_gem_request_alloc(), this bit is checked and the sequence @@ -1558,6 +1565,7 @@ struct i915_gpu_error { #define I915_RESET_BACKOFF 0 #define I915_RESET_HANDOFF 1 #define I915_WEDGED (BITS_PER_LONG - 1) +#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES) /** * Waitqueue to signal when a hang is detected. Used to for waiters @@ -3092,6 +3100,8 @@ extern void i915_driver_unload(struct drm_device *dev); extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); extern void i915_reset(struct drm_i915_private *dev_priv); +extern int i915_reset_engine(struct intel_engine_cs *engine); +extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv); extern int intel_guc_reset(struct drm_i915_private *dev_priv); extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8e9f4378b5a7..f25e73fe567c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2715,6 +2715,8 @@ void i915_handle_error(struct drm_i915_private *dev_priv, u32 engine_mask, const char *fmt, ...) { + struct intel_engine_cs *engine; + unsigned int tmp; va_list args; char error_msg[80]; @@ -2734,9 +2736,31 @@ void i915_handle_error(struct drm_i915_private *dev_priv, i915_capture_error_state(dev_priv, engine_mask, error_msg); i915_clear_error_registers(dev_priv); + /* + * Try engine reset when available. We fall back to full reset if + * single reset fails. + */ + if (intel_has_reset_engine(dev_priv)) { + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + BUILD_BUG_ON(I915_RESET_HANDOFF >= I915_RESET_ENGINE); + if (test_and_set_bit(I915_RESET_ENGINE + engine->id, + &dev_priv->gpu_error.flags)) + continue; + + if (i915_reset_engine(engine) == 0) + engine_mask &= ~intel_engine_flag(engine); + + clear_bit(I915_RESET_ENGINE + engine->id, + &dev_priv->gpu_error.flags); + wake_up_bit(&dev_priv->gpu_error.flags, + I915_RESET_ENGINE + engine->id); + } + } + if (!engine_mask) goto out; + /* Full reset needs the mutex, stop any other user trying to do so. */ if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { wait_event(dev_priv->gpu_error.reset_queue, !test_bit(I915_RESET_BACKOFF, @@ -2744,8 +2768,22 @@ void i915_handle_error(struct drm_i915_private *dev_priv, goto out; } + /* Prevent any other reset-engine attempt. */ + for_each_engine(engine, dev_priv, tmp) { + while (test_and_set_bit(I915_RESET_ENGINE + engine->id, + &dev_priv->gpu_error.flags)) + wait_on_bit(&dev_priv->gpu_error.flags, + I915_RESET_ENGINE + engine->id, + TASK_UNINTERRUPTIBLE); + } + i915_reset_device(dev_priv); + for_each_engine(engine, dev_priv, tmp) { + clear_bit(I915_RESET_ENGINE + engine->id, + &dev_priv->gpu_error.flags); + } + clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); wake_up_all(&dev_priv->gpu_error.reset_queue); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 506ec32b9e53..04aaf553e3fa 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -310,7 +310,8 @@ static const struct intel_device_info intel_haswell_info = { BDW_COLORS, \ .has_logical_ring_contexts = 1, \ .has_full_48bit_ppgtt = 1, \ - .has_64bit_reloc = 1 + .has_64bit_reloc = 1, \ + .has_reset_engine = 1 #define BDW_PLATFORM \ BDW_FEATURES, \ @@ -342,6 +343,7 @@ static const struct intel_device_info intel_cherryview_info = { .has_gmch_display = 1, .has_aliasing_ppgtt = 1, .has_full_ppgtt = 1, + .has_reset_engine = 1, .display_mmio_offset = VLV_DISPLAY_BASE, GEN_CHV_PIPEOFFSETS, CURSOR_OFFSETS, @@ -387,6 +389,7 @@ static const struct intel_device_info intel_skylake_gt3_info = { .has_aliasing_ppgtt = 1, \ .has_full_ppgtt = 1, \ .has_full_48bit_ppgtt = 1, \ + .has_reset_engine = 1, \ GEN_DEFAULT_PIPEOFFSETS, \ IVB_CURSOR_OFFSETS, \ BDW_COLORS diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 9882724bc2b6..1ed3dd8df850 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1719,6 +1719,17 @@ bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) return intel_get_gpu_reset(dev_priv) != NULL; } +/* + * When GuC submission is enabled, GuC manages ELSP and can initiate the + * engine reset too. For now, fall back to full GPU reset if it is enabled. + */ +bool intel_has_reset_engine(struct drm_i915_private *dev_priv) +{ + return (dev_priv->info.has_reset_engine && + !dev_priv->guc.execbuf_client && + i915.reset >= 2); +} + int intel_guc_reset(struct drm_i915_private *dev_priv) { int ret; From a1ef70e144534777965426393dcaa1721e908e83 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Tue, 20 Jun 2017 10:57:47 +0100 Subject: [PATCH 0027/1795] drm/i915: Add support for per engine reset recovery This change implements support for per-engine reset as an initial, less intrusive hang recovery option to be attempted before falling back to the legacy full GPU reset recovery mode if necessary. This is only supported from Gen8 onwards. Hangchecker determines which engines are hung and invokes error handler to recover from it. Error handler schedules recovery for each of those engines that are hung. The recovery procedure is as follows, - identifies the request that caused the hang and it is dropped - force engine to idle: this is done by issuing a reset request - reset the engine - re-init the engine to resume submissions. If engine reset fails then we fall back to heavy weight full gpu reset which resets all engines and reinitiazes complete state of HW and SW. v2: Rebase. v3: s/*engine_reset*/*reset_engine*/; freeze engine and irqs before calling i915_gem_reset_engine (Chris). v4: Rebase, modify i915_gem_reset_prepare to use a ring mask and reuse the function for reset_engine. v5: intel_reset_engine_start/cancel instead of request/unrequest_reset. v6: Clean up reset_engine function to not require mutex, i.e. no need to call revoke/restore_fences and _retire_requests (Chris). v7: Remove leftovers from v5, i.e. no need to disable irq, hold forcewake or wakeup the handoff bit (Chris). v8: engine_retire_requests should be (and it was) static; explain that we have to re-init the engine after reset, which is why the init_hw call is needed; check reset-in-progress flag (Chris). v9: Rebase, include code to pass the active request to gem_reset_engine (as it is already done in full reset). Remove unnecessary intel_reset_engine_start/cancel, these are executed as part of the reset. v10: Rebase, use the right I915_RESET_ENGINE flag. v11: Fixup to call reset_finish_engine even on error. Cc: Chris Wilson Cc: Mika Kuoppala Signed-off-by: Tomas Elf Signed-off-by: Arun Siluvery Signed-off-by: Michel Thierry Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-6-michel.thierry@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 50 +++++++++++++++++- drivers/gpu/drm/i915/i915_drv.h | 5 ++ drivers/gpu/drm/i915/i915_gem.c | 93 ++++++++++++++++++++------------- 3 files changed, 110 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e5b31e29382c..2ecac000e5da 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1923,11 +1923,57 @@ error: * * Reset a specific GPU engine. Useful if a hang is detected. * Returns zero on successful reset or otherwise an error code. + * + * Procedure is: + * - identifies the request that caused the hang and it is dropped + * - reset engine (which will force the engine to idle) + * - re-init/configure engine */ int i915_reset_engine(struct intel_engine_cs *engine) { - /* FIXME: replace me with engine reset sequence */ - return -ENODEV; + struct i915_gpu_error *error = &engine->i915->gpu_error; + struct drm_i915_gem_request *active_request; + int ret; + + GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); + + DRM_DEBUG_DRIVER("resetting %s\n", engine->name); + + active_request = i915_gem_reset_prepare_engine(engine); + if (IS_ERR(active_request)) { + DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n"); + ret = PTR_ERR(active_request); + goto out; + } + + /* + * The request that caused the hang is stuck on elsp, we know the + * active request and can drop it, adjust head to skip the offending + * request to resume executing remaining requests in the queue. + */ + i915_gem_reset_engine(engine, active_request); + + /* Finally, reset just this engine. */ + ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine)); + + i915_gem_reset_finish_engine(engine); + + if (ret) { + /* If we fail here, we expect to fallback to a global reset */ + DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n", + engine->name, ret); + goto out; + } + + /* + * The engine and its registers (and workarounds in case of render) + * have been reset to their default values. Follow the init_ring + * process to program RING_MODE, HWSP and re-enable submission. + */ + ret = engine->init_hw(engine); + +out: + return ret; } static int i915_pm_suspend(struct device *kdev) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4220abe1f28b..152e30750dd4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3474,11 +3474,16 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) return READ_ONCE(error->reset_count); } +struct drm_i915_gem_request * +i915_gem_reset_prepare_engine(struct intel_engine_cs *engine); int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); void i915_gem_reset(struct drm_i915_private *dev_priv); +void i915_gem_reset_finish_engine(struct intel_engine_cs *engine); void i915_gem_reset_finish(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv); bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); +void i915_gem_reset_engine(struct intel_engine_cs *engine, + struct drm_i915_gem_request *request); void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 37d1cbf82beb..1353491c1010 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2831,45 +2831,61 @@ static bool engine_stalled(struct intel_engine_cs *engine) return true; } +/* + * Ensure irq handler finishes, and not run again. + * Also return the active request so that we only search for it once. + */ +struct drm_i915_gem_request * +i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_request *request = NULL; + + /* Prevent the signaler thread from updating the request + * state (by calling dma_fence_signal) as we are processing + * the reset. The write from the GPU of the seqno is + * asynchronous and the signaler thread may see a different + * value to us and declare the request complete, even though + * the reset routine have picked that request as the active + * (incomplete) request. This conflict is not handled + * gracefully! + */ + kthread_park(engine->breadcrumbs.signaler); + + /* Prevent request submission to the hardware until we have + * completed the reset in i915_gem_reset_finish(). If a request + * is completed by one engine, it may then queue a request + * to a second via its engine->irq_tasklet *just* as we are + * calling engine->init_hw() and also writing the ELSP. + * Turning off the engine->irq_tasklet until the reset is over + * prevents the race. + */ + tasklet_kill(&engine->irq_tasklet); + tasklet_disable(&engine->irq_tasklet); + + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); + + if (engine_stalled(engine)) { + request = i915_gem_find_active_request(engine); + if (request && request->fence.error == -EIO) + request = ERR_PTR(-EIO); /* Previous reset failed! */ + } + + return request; +} + int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; + struct drm_i915_gem_request *request; enum intel_engine_id id; int err = 0; - /* Ensure irq handler finishes, and not run again. */ for_each_engine(engine, dev_priv, id) { - struct drm_i915_gem_request *request = NULL; - - /* Prevent the signaler thread from updating the request - * state (by calling dma_fence_signal) as we are processing - * the reset. The write from the GPU of the seqno is - * asynchronous and the signaler thread may see a different - * value to us and declare the request complete, even though - * the reset routine have picked that request as the active - * (incomplete) request. This conflict is not handled - * gracefully! - */ - kthread_park(engine->breadcrumbs.signaler); - - /* Prevent request submission to the hardware until we have - * completed the reset in i915_gem_reset_finish(). If a request - * is completed by one engine, it may then queue a request - * to a second via its engine->irq_tasklet *just* as we are - * calling engine->init_hw() and also writing the ELSP. - * Turning off the engine->irq_tasklet until the reset is over - * prevents the race. - */ - tasklet_kill(&engine->irq_tasklet); - tasklet_disable(&engine->irq_tasklet); - - if (engine->irq_seqno_barrier) - engine->irq_seqno_barrier(engine); - - if (engine_stalled(engine)) { - request = i915_gem_find_active_request(engine); - if (request && request->fence.error == -EIO) - err = -EIO; /* Previous reset failed! */ + request = i915_gem_reset_prepare_engine(engine); + if (IS_ERR(request)) { + err = PTR_ERR(request); + continue; } engine->hangcheck.active_request = request; @@ -2960,8 +2976,8 @@ static bool i915_gem_reset_request(struct drm_i915_gem_request *request) return guilty; } -static void i915_gem_reset_engine(struct intel_engine_cs *engine, - struct drm_i915_gem_request *request) +void i915_gem_reset_engine(struct intel_engine_cs *engine, + struct drm_i915_gem_request *request) { if (request && i915_gem_reset_request(request)) { DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", @@ -3004,6 +3020,12 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) } } +void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) +{ + tasklet_enable(&engine->irq_tasklet); + kthread_unpark(engine->breadcrumbs.signaler); +} + void i915_gem_reset_finish(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; @@ -3013,8 +3035,7 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) for_each_engine(engine, dev_priv, id) { engine->hangcheck.active_request = NULL; - tasklet_enable(&engine->irq_tasklet); - kthread_unpark(engine->breadcrumbs.signaler); + i915_gem_reset_finish_engine(engine); } } From 702c8f8e5daba24777b29899fb9576fe64a924ea Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Tue, 20 Jun 2017 10:57:48 +0100 Subject: [PATCH 0028/1795] drm/i915: Add engine reset count to error state Driver maintains count of how many times a given engine is reset, useful to capture this in error state also. It gives an idea of how engine is coping up with the workloads it is executing before this error state. A follow-up patch will provide this information in debugfs. v2: s/engine_reset/reset_engine/ (Chris) Define count as unsigned int (Tvrtko) Cc: Chris Wilson Cc: Mika Kuoppala Signed-off-by: Arun Siluvery Signed-off-by: Michel Thierry Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-7-michel.thierry@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-7-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 10 ++++++++++ drivers/gpu/drm/i915/i915_gpu_error.c | 3 +++ 3 files changed, 16 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2ecac000e5da..d35a114c46ed 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1971,7 +1971,10 @@ int i915_reset_engine(struct intel_engine_cs *engine) * process to program RING_MODE, HWSP and re-enable submission. */ ret = engine->init_hw(engine); + if (ret) + goto out; + error->reset_engine_count[engine->id]++; out: return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 152e30750dd4..90cc63e5ced8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -917,6 +917,7 @@ struct i915_gpu_state { enum intel_engine_hangcheck_action hangcheck_action; struct i915_address_space *vm; int num_requests; + u32 reset_count; /* position of active request inside the ring */ u32 rq_head, rq_post, rq_tail; @@ -1567,6 +1568,9 @@ struct i915_gpu_error { #define I915_WEDGED (BITS_PER_LONG - 1) #define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES) + /** Number of times an engine has been reset */ + u32 reset_engine_count[I915_NUM_ENGINES]; + /** * Waitqueue to signal when a hang is detected. Used to for waiters * to release the struct_mutex for the reset to procede. @@ -3474,6 +3478,12 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) return READ_ONCE(error->reset_count); } +static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, + struct intel_engine_cs *engine) +{ + return READ_ONCE(error->reset_engine_count[engine->id]); +} + struct drm_i915_gem_request * i915_gem_reset_prepare_engine(struct intel_engine_cs *engine); int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index e18f350bc364..ae70283470a6 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -463,6 +463,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n", ee->hangcheck_timestamp, jiffies_to_msecs(jiffies - ee->hangcheck_timestamp)); + err_printf(m, " engine reset count: %u\n", ee->reset_count); error_print_request(m, " ELSP[0]: ", &ee->execlist[0]); error_print_request(m, " ELSP[1]: ", &ee->execlist[1]); @@ -1236,6 +1237,8 @@ static void error_record_engine_registers(struct i915_gpu_state *error, ee->hangcheck_timestamp = engine->hangcheck.action_timestamp; ee->hangcheck_action = engine->hangcheck.action; ee->hangcheck_stalled = engine->hangcheck.stalled; + ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, + engine); if (USES_PPGTT(dev_priv)) { int i; From 061d06a21cd1e049f1f36fcf14d68893e9fb9493 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Tue, 20 Jun 2017 10:57:49 +0100 Subject: [PATCH 0029/1795] drm/i915: Export per-engine reset count info to debugfs A new variable is added to export the reset counts to debugfs, this includes full gpu reset and engine reset count. This is useful for tests where they are expected to trigger reset; these counts are checked before and after the test to ensure the same. v2: Include reset engine count in i915_engine_info too (Chris). Cc: Chris Wilson Cc: Mika Kuoppala Signed-off-by: Arun Siluvery Signed-off-by: Michel Thierry Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-8-michel.thierry@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-8-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 6103d0079d16..1f1176b6400e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1403,6 +1403,23 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) return 0; } +static int i915_reset_info(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct i915_gpu_error *error = &dev_priv->gpu_error; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error)); + + for_each_engine(engine, dev_priv, id) { + seq_printf(m, "%s = %u\n", engine->name, + i915_reset_engine_count(error, engine)); + } + + return 0; +} + static int ironlake_drpc_info(struct seq_file *m) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -3285,6 +3302,7 @@ static int i915_display_info(struct seq_file *m, void *unused) static int i915_engine_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct i915_gpu_error *error = &dev_priv->gpu_error; struct intel_engine_cs *engine; enum intel_engine_id id; @@ -3308,6 +3326,8 @@ static int i915_engine_info(struct seq_file *m, void *unused) engine->hangcheck.seqno, jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp), engine->timeline->inflight_seqnos); + seq_printf(m, "\tReset count: %d\n", + i915_reset_engine_count(error, engine)); rcu_read_lock(); @@ -4820,6 +4840,7 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_huc_load_status", i915_huc_load_status_info, 0}, {"i915_frequency_info", i915_frequency_info, 0}, {"i915_hangcheck_info", i915_hangcheck_info, 0}, + {"i915_reset_info", i915_reset_info, 0}, {"i915_drpc_info", i915_drpc_info, 0}, {"i915_emon_status", i915_emon_status, 0}, {"i915_ring_freq_table", i915_ring_freq_table, 0}, From abeb4def31d9c3dbf06052e65cd884730ec8667e Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Tue, 20 Jun 2017 10:57:50 +0100 Subject: [PATCH 0030/1795] drm/i915/selftests: reset engine self tests Check that we can reset specific engines, also check the fallback to full reset if something didn't work. v2: rebase. v3: use RESET_ENGINE_IN_PROGRESS flag. v4: use I915_RESET_ENGINE flag. Signed-off-by: Michel Thierry Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-12-michel.thierry@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-9-chris@chris-wilson.co.uk --- .../gpu/drm/i915/selftests/intel_hangcheck.c | 148 ++++++++++++++++++ 1 file changed, 148 insertions(+) diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index cc00a361f0fa..af475189bd52 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -324,6 +324,54 @@ static int igt_global_reset(void *arg) return err; } +static int igt_reset_engine(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned int reset_count, reset_engine_count; + int err = 0; + + /* Check that we can issue a global GPU and engine reset */ + + if (!intel_has_reset_engine(i915)) + return 0; + + for_each_engine(engine, i915, id) { + set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags); + reset_count = i915_reset_count(&i915->gpu_error); + reset_engine_count = i915_reset_engine_count(&i915->gpu_error, + engine); + + err = i915_reset_engine(engine); + if (err) { + pr_err("i915_reset_engine failed\n"); + break; + } + + if (i915_reset_count(&i915->gpu_error) != reset_count) { + pr_err("Full GPU reset recorded! (engine reset expected)\n"); + err = -EINVAL; + break; + } + + if (i915_reset_engine_count(&i915->gpu_error, engine) == + reset_engine_count) { + pr_err("No %s engine reset recorded!\n", engine->name); + err = -EINVAL; + break; + } + + clear_bit(I915_RESET_ENGINE + engine->id, + &i915->gpu_error.flags); + } + + if (i915_terminally_wedged(&i915->gpu_error)) + err = -EIO; + + return err; +} + static u32 fake_hangcheck(struct drm_i915_gem_request *rq) { u32 reset_count; @@ -530,13 +578,113 @@ unlock: return err; } +static int igt_render_engine_reset_fallback(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine = i915->engine[RCS]; + struct hang h; + struct drm_i915_gem_request *rq; + unsigned int reset_count, reset_engine_count; + int err = 0; + + /* Check that we can issue a global GPU and engine reset */ + + if (!intel_has_reset_engine(i915)) + return 0; + + set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); + mutex_lock(&i915->drm.struct_mutex); + + err = hang_init(&h, i915); + if (err) + goto unlock; + + rq = hang_create_request(&h, engine, i915->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto fini; + } + + i915_gem_request_get(rq); + __i915_add_request(rq, true); + + /* make reset engine fail */ + rq->fence.error = -EIO; + + if (!wait_for_hang(&h, rq)) { + pr_err("Failed to start request %x\n", rq->fence.seqno); + err = -EIO; + goto out_rq; + } + + reset_engine_count = i915_reset_engine_count(&i915->gpu_error, engine); + reset_count = fake_hangcheck(rq); + + /* unlock since we'll call handle_error */ + mutex_unlock(&i915->drm.struct_mutex); + clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); + wake_up_all(&i915->gpu_error.reset_queue); + + i915_handle_error(i915, intel_engine_flag(engine), "live test"); + + if (i915_reset_engine_count(&i915->gpu_error, engine) != + reset_engine_count) { + pr_err("render engine reset recorded! (full reset expected)\n"); + err = -EINVAL; + goto out_rq; + } + + if (i915_reset_count(&i915->gpu_error) == reset_count) { + pr_err("No full GPU reset recorded!\n"); + err = -EINVAL; + goto out_rq; + } + + /* + * by using fence.error = -EIO, full reset sets the wedged flag, do one + * more full reset to re-enable the hw. + */ + if (i915_terminally_wedged(&i915->gpu_error)) { + set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); + mutex_lock(&i915->drm.struct_mutex); + rq->fence.error = 0; + + set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags); + i915_reset(i915); + GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, + &i915->gpu_error.flags)); + + if (i915_reset_count(&i915->gpu_error) == reset_count) { + pr_err("No full GPU reset recorded!\n"); + err = -EINVAL; + goto out_rq; + } + } + +out_rq: + i915_gem_request_put(rq); +fini: + hang_fini(&h); +unlock: + mutex_unlock(&i915->drm.struct_mutex); + clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); + wake_up_all(&i915->gpu_error.reset_queue); + + if (i915_terminally_wedged(&i915->gpu_error)) + return -EIO; + + return err; +} + int intel_hangcheck_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_hang_sanitycheck), SUBTEST(igt_global_reset), + SUBTEST(igt_reset_engine), SUBTEST(igt_wait_reset), SUBTEST(igt_reset_queue), + SUBTEST(igt_render_engine_reset_fallback), }; if (!intel_has_gpu_reset(i915)) From d3d3765fc1c176ff472a503d909508820c3f99d6 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Tue, 20 Jun 2017 10:57:51 +0100 Subject: [PATCH 0031/1795] drm/i915: Enable Engine reset and recovery support This feature is made available only from Gen8, for previous gen devices driver uses legacy full gpu reset. Cc: Chris Wilson Cc: Mika Kuoppala Signed-off-by: Tomas Elf Signed-off-by: Arun Siluvery Signed-off-by: Michel Thierry Link: http://patchwork.freedesktop.org/patch/msgid/20170615201828.23144-10-michel.thierry@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170620095751.13127-10-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_params.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 045cadb77285..14e2c2e57f96 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -46,7 +46,7 @@ struct i915_params i915 __read_mostly = { .prefault_disable = 0, .load_detect_test = 0, .force_reset_modeset_test = 0, - .reset = 1, + .reset = 2, .error_capture = true, .invert_brightness = 0, .disable_display = 0, @@ -116,7 +116,7 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type, "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); module_param_named_unsafe(reset, i915.reset, int, 0600); -MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset [default], 2=engine reset)"); +MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) module_param_named(error_capture, i915.error_capture, bool, 0600); From 25ffaa67459e988e73210543f7e05dfbf3f16163 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 20 Jun 2017 13:43:20 +0100 Subject: [PATCH 0032/1795] drm/i915: Pass the right flags to i915_vma_move_to_active() i915_vma_move_to_active() takes the execobject flags and not a boolean! Instead of passing EXEC_OBJECT_WRITE we passed true [i.e. EXEC_OBJECT_NEEDS_FENCE] causing us to start tracking the vma->last_fence access and since we forgot to clear that on unbinding, we caused a use-after-free. [ 321.263854] BUG: KASAN: use-after-free in i915_gem_request_retire+0x1728/0x1740 [i915] [ 321.264001] Read of size 8 at addr ffff880100fc67d8 by task gem_exec_reloc/2868 [ 321.264181] CPU: 0 PID: 2868 Comm: gem_exec_reloc Not tainted 4.12.0-rc6-CI-Custom_2759+ #1 [ 321.264195] Hardware name: GIGABYTE GB-BXBT-1900/MZBAYAB-00, BIOS F6 02/17/2015 [ 321.264208] Call Trace: [ 321.264234] dump_stack+0x67/0x99 [ 321.264260] print_address_description+0x77/0x290 [ 321.264437] ? i915_gem_request_retire+0x1728/0x1740 [i915] [ 321.264459] kasan_report+0x269/0x350 [ 321.264487] __asan_report_load8_noabort+0x14/0x20 [ 321.264660] i915_gem_request_retire+0x1728/0x1740 [i915] [ 321.264841] ? intel_ring_context_pin+0x131/0x690 [i915] [ 321.265021] i915_gem_request_alloc+0x2c6/0x1220 [i915] [ 321.265044] ? _raw_spin_unlock_irqrestore+0x3d/0x60 [ 321.265226] i915_gem_do_execbuffer+0xac0/0x2a20 [i915] [ 321.265250] ? __lock_acquire+0xceb/0x5450 [ 321.265269] ? entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 321.265291] ? kvmalloc_node+0x6b/0x80 [ 321.265310] ? kvmalloc_node+0x6b/0x80 [ 321.265489] ? eb_relocate_slow+0xbe0/0xbe0 [i915] [ 321.265520] ? ___slab_alloc.constprop.28+0x2ab/0x3d0 [ 321.265549] ? debug_check_no_locks_freed+0x280/0x280 [ 321.265591] ? __might_fault+0xc6/0x1b0 [ 321.265782] i915_gem_execbuffer2+0x14a/0x3f0 [i915] [ 321.265815] drm_ioctl+0x4ba/0xaa0 [ 321.265986] ? i915_gem_execbuffer+0xde0/0xde0 [i915] [ 321.266017] ? drm_getunique+0x270/0x270 [ 321.266068] do_vfs_ioctl+0x17f/0xfa0 [ 321.266091] ? __fget+0x1ba/0x330 [ 321.266112] ? lock_acquire+0x390/0x390 [ 321.266133] ? ioctl_preallocate+0x1d0/0x1d0 [ 321.266164] ? __fget+0x1db/0x330 [ 321.266194] ? __fget_light+0x79/0x1f0 [ 321.266219] SyS_ioctl+0x3c/0x70 [ 321.266247] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 321.266265] RIP: 0033:0x7fcede207357 [ 321.266279] RSP: 002b:00007ffef0effe58 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [ 321.266307] RAX: ffffffffffffffda RBX: 0000000000000002 RCX: 00007fcede207357 [ 321.266321] RDX: 00007ffef0effef0 RSI: 0000000040406469 RDI: 0000000000000004 [ 321.266335] RBP: ffffffff812097c6 R08: 0000000000000008 R09: 0000000000000000 [ 321.266349] R10: 0000000000000008 R11: 0000000000000246 R12: ffff880116bcff98 [ 321.266363] R13: ffffffff81cb7cb3 R14: ffff880116bcff70 R15: 0000000000000000 [ 321.266385] ? __this_cpu_preempt_check+0x13/0x20 [ 321.266406] ? trace_hardirqs_off_caller+0x1d6/0x2c0 [ 321.266487] Allocated by task 2868: [ 321.266568] save_stack_trace+0x16/0x20 [ 321.266586] kasan_kmalloc+0xee/0x180 [ 321.266602] kasan_slab_alloc+0x12/0x20 [ 321.266620] kmem_cache_alloc+0xc7/0x2e0 [ 321.266795] i915_vma_instance+0x28c/0x1540 [i915] [ 321.266964] eb_lookup_vmas+0x5a7/0x2250 [i915] [ 321.267130] i915_gem_do_execbuffer+0x69a/0x2a20 [i915] [ 321.267296] i915_gem_execbuffer2+0x14a/0x3f0 [i915] [ 321.267315] drm_ioctl+0x4ba/0xaa0 [ 321.267333] do_vfs_ioctl+0x17f/0xfa0 [ 321.267350] SyS_ioctl+0x3c/0x70 [ 321.267369] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 321.267428] Freed by task 177: [ 321.267502] save_stack_trace+0x16/0x20 [ 321.267521] kasan_slab_free+0xad/0x180 [ 321.267539] kmem_cache_free+0xc5/0x340 [ 321.267710] i915_vma_unbind+0x666/0x10a0 [i915] [ 321.267880] i915_vma_close+0x23a/0x2f0 [i915] [ 321.268048] __i915_gem_free_objects+0x17d/0xc70 [i915] [ 321.268215] __i915_gem_free_work+0x49/0x70 [i915] [ 321.268234] process_one_work+0x66f/0x1410 [ 321.268252] worker_thread+0xe1/0xe90 [ 321.268269] kthread+0x304/0x410 [ 321.268285] ret_from_fork+0x27/0x40 [ 321.268346] The buggy address belongs to the object at ffff880100fc6640 which belongs to the cache i915_vma of size 656 [ 321.268550] The buggy address is located 408 bytes inside of 656-byte region [ffff880100fc6640, ffff880100fc68d0) [ 321.268741] The buggy address belongs to the page: [ 321.268837] page:ffffea000403f000 count:1 mapcount:0 mapping: (null) index:0xffff880100fc5980 compound_mapcount: 0 [ 321.269045] flags: 0x8000000000008100(slab|head) [ 321.269147] raw: 8000000000008100 0000000000000000 ffff880100fc5980 00000001001e001d [ 321.269312] raw: ffffea0004038e20 ffff880116b46240 ffff88011646c640 0000000000000000 [ 321.269484] page dumped because: kasan: bad access detected [ 321.269665] Memory state around the buggy address: [ 321.269778] ffff880100fc6680: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 321.269949] ffff880100fc6700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 321.270115] >ffff880100fc6780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 321.270279] ^ [ 321.270410] ffff880100fc6800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 321.270576] ffff880100fc6880: fb fb fb fb fb fb fb fb fb fb fc fc fc fc fc fc [ 321.270740] ================================================================== [ 321.270903] Disabling lock debugging due to kernel taint Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101511 Fixes: 7dd4f6729f92 ("drm/i915: Async GPU relocation processing") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170620124321.1108-2-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 35d1f8e8906e..b2457556591c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1200,7 +1200,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, reservation_object_unlock(batch->resv); i915_vma_unpin(batch); - i915_vma_move_to_active(vma, rq, true); + i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); reservation_object_lock(vma->resv, NULL); reservation_object_add_excl_fence(vma->resv, &rq->fence); reservation_object_unlock(vma->resv); From 85909716621702c777de512377fe952fb5677aec Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 20 Jun 2017 10:44:33 +0530 Subject: [PATCH 0033/1795] drm: armada: make of_device_ids const. of_device_ids are not supposed to change at runtime. All functions working with of_device_ids provided by work with const of_device_ids. So mark the non-const structs as const. File size before: text data bss dec hex filename 8836 744 0 9580 256c drivers/gpu/drm/armada/armada_crtc.o File size after constify armada_lcd_of_match: text data bss dec hex filename 9220 328 0 9548 254c drivers/gpu/drm/armada/armada_crtc.o Signed-off-by: Arvind Yadav Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/ff9a9a908cf347775ab62cfadfde986de72dcf13.1497935382.git.arvind.yadav.cs@gmail.com --- drivers/gpu/drm/armada/armada_crtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 4fe19fde84f9..94b76bdd7553 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -1364,7 +1364,7 @@ static int armada_lcd_remove(struct platform_device *pdev) return 0; } -static struct of_device_id armada_lcd_of_match[] = { +static const struct of_device_id armada_lcd_of_match[] = { { .compatible = "marvell,dove-lcd", .data = &armada510_ops, From a25bcadd0d9204d9d9e00de4d71d7be570f44c6c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Jun 2017 16:34:20 +1000 Subject: [PATCH 0034/1795] drm/i915: select CRC32 kbuild test robot found a build failure when building with thin archives: http://marc.info/?l=linux-kbuild&m=149802285009737&w=2 Signed-off-by: Nicholas Piggin Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621063420.24913-1-npiggin@gmail.com --- drivers/gpu/drm/i915/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index a5cd5dacf055..e9e64e8e9765 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -21,6 +21,7 @@ config DRM_I915 select ACPI_BUTTON if ACPI select SYNC_FILE select IOSF_MBI + select CRC32 help Choose this option if you have a system that has "Intel Graphics Media Accelerator" or "HD Graphics" integrated graphics, From 760a898d8069111704e1bd43f00ebf369ae46e57 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 20 Jun 2017 13:43:19 +0100 Subject: [PATCH 0035/1795] drm/i915: Retire the VMA's fence tracker before unbinding Since we may track unfenced access (GPU access to the vma that explicitly requires no fence), vma->last_fence may be set without any attached fence (vma->fence) and so will not be flushed when we call i915_vma_put_fence(). Since we stopped doing a full retire of the activity trackers for unbind, we need to explicitly retire each tracker. Fixes: b0decaf75bd9 ("drm/i915: Track active vma requests") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170620124321.1108-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_vma.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 532c709febbd..1cfe137cdc32 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -672,6 +672,11 @@ int i915_vma_unbind(struct i915_vma *vma) break; } + if (!ret) { + ret = i915_gem_active_retire(&vma->last_fence, + &vma->vm->i915->drm.struct_mutex); + } + __i915_vma_unpin(vma); if (ret) return ret; From 7a3bc034ed5c4b57c145a025070739ec41434e94 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 20 Jun 2017 13:43:21 +0100 Subject: [PATCH 0036/1795] drm/i915: Assert the vma's active tracking is clear before free In looking at a use-after-free on Baytrail, it looks like the VMA's activity tracking is suspect. Add some asserts to catch freeing the VMA before we have decoupled all of its i915_gem_active trackers. References: https://bugs.freedesktop.org/show_bug.cgi?id=101511 Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20170620124321.1108-3-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_vma.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 1cfe137cdc32..958be0a95960 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -579,11 +579,17 @@ err_unpin: static void i915_vma_destroy(struct i915_vma *vma) { + int i; + GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(!i915_vma_is_closed(vma)); GEM_BUG_ON(vma->fence); + for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) + GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i])); + GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); + list_del(&vma->vm_link); if (!i915_vma_is_ggtt(vma)) i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); @@ -680,9 +686,8 @@ int i915_vma_unbind(struct i915_vma *vma) __i915_vma_unpin(vma); if (ret) return ret; - - GEM_BUG_ON(i915_vma_is_active(vma)); } + GEM_BUG_ON(i915_vma_is_active(vma)); if (i915_vma_is_pinned(vma)) return -EBUSY; From 0caf81b5c53d9bd332a95dbcb44db8de0b397a7c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 17 Jun 2017 12:57:44 +0100 Subject: [PATCH 0037/1795] drm/i915: Hold struct_mutex for per-file stats in debugfs/i915_gem_object As we walk the obj->vma_list in per_file_stats(), we need to hold struct_mutex to prevent alteration of that list. Fixes: 1d2ac403ae3b ("drm: Protect dev->filelist with its own mutex") Fixes: c84455b4bacc ("drm/i915: Move debug only per-request pid tracking from request to ctx") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101460 Signed-off-by: Chris Wilson Cc: Daniel Vetter Cc: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20170617115744.4452-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_debugfs.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1f1176b6400e..f7aa6cbe3a2e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data) struct file_stats *stats = data; struct i915_vma *vma; + lockdep_assert_held(&obj->base.dev->struct_mutex); + stats->count++; stats->total += obj->base.size; if (!obj->bind_count) @@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) struct drm_i915_gem_request *request; struct task_struct *task; + mutex_lock(&dev->struct_mutex); + memset(&stats, 0, sizeof(stats)); stats.file_priv = file->driver_priv; spin_lock(&file->table_lock); @@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data) * still alive (e.g. get_pid(current) => fork() => exit()). * Therefore, we need to protect this ->comm access using RCU. */ - mutex_lock(&dev->struct_mutex); request = list_first_entry_or_null(&file_priv->mm.request_list, struct drm_i915_gem_request, client_link); @@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) PIDTYPE_PID); print_file_stats(m, task ? task->comm : "", stats); rcu_read_unlock(); + mutex_unlock(&dev->struct_mutex); } mutex_unlock(&dev->filelist_mutex); From 6c8e54714cc54d080189e42fb40b24dddfe85339 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 29 Mar 2017 16:44:00 +0200 Subject: [PATCH 0038/1795] drm/atmel-hlcdc: Remove unnecessary NULL check drm_fbdev_cma_hotplug_event() already checks for NULL pointers before dereferencing, so callers don't need to do that. Reviewed-by: Daniel Vetter Signed-off-by: Thierry Reding Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170329144401.1804-11-thierry.reding@gmail.com --- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 30dbffdb45a3..51d22ad9a3a3 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -451,8 +451,7 @@ static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; - if (dc->fbdev) - drm_fbdev_cma_hotplug_event(dc->fbdev); + drm_fbdev_cma_hotplug_event(dc->fbdev); } struct atmel_hlcdc_dc_commit { From 5a1535b110d73ed8d29c504eeb5fa92ac5b47cd9 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 29 Mar 2017 16:44:01 +0200 Subject: [PATCH 0039/1795] drm/rockchip: Remove unnecessary NULL check The expression &private->fbdev_helper can never be NULL, so the check is completely unnecessary. Reviewed-by: Daniel Vetter Signed-off-by: Thierry Reding Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170329144401.1804-12-thierry.reding@gmail.com --- drivers/gpu/drm/rockchip/rockchip_drm_fb.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 81f9548672b0..df6bceabeca8 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -168,10 +168,8 @@ err_gem_object_unreference: static void rockchip_drm_output_poll_changed(struct drm_device *dev) { struct rockchip_drm_private *private = dev->dev_private; - struct drm_fb_helper *fb_helper = &private->fbdev_helper; - if (fb_helper) - drm_fb_helper_hotplug_event(fb_helper); + drm_fb_helper_hotplug_event(&private->fbdev_helper); } static void From c284a0bdb18d73d5480ca53598e8fead412c6653 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 21 Jun 2017 15:45:55 +0530 Subject: [PATCH 0040/1795] drm: sti: sti_hqvdp: undo preparation of a clock source. Undo preparation of a clock source, if sti_hqvdp_start_xp70 and sti_hqvdp_atomic_check are not successful. Signed-off-by: Arvind Yadav Signed-off-by: Benjamin Gaignard Link: http://patchwork.freedesktop.org/patch/msgid/7afad3012fb6e40f43a1eb5a64dc6364c38bd052.1498039961.git.arvind.yadav.cs@gmail.com --- drivers/gpu/drm/sti/sti_hqvdp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 1234f87bce20..53a46dda8bd5 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -958,6 +958,7 @@ static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp) } if (i == POLL_MAX_ATTEMPT) { DRM_ERROR("Could not reset\n"); + clk_disable_unprepare(hqvdp->clk); goto out; } @@ -994,6 +995,7 @@ static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp) } if (i == POLL_MAX_ATTEMPT) { DRM_ERROR("Could not boot\n"); + clk_disable_unprepare(hqvdp->clk); goto out; } @@ -1081,6 +1083,7 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane, &hqvdp->vtg_nb, crtc)) { DRM_ERROR("Cannot register VTG notifier\n"); + clk_disable_unprepare(hqvdp->clk_pix_main); return -EINVAL; } hqvdp->vtg_registered = true; From 4ee056f41807858b2eae263e74ae8b81800c0337 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 21 Jun 2017 13:48:04 +0100 Subject: [PATCH 0041/1795] drm/i915: Cancel pending execlist tasklet upon wedging Highly unlikely, but if the stop_machine() did suspend the tasklet, we want to make sure that when it wakes it finds there is nothing to do. Otherwise, it will loudly complain that the ELSP port tracking no longer matches the hardware, and we will be mightly confused. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170621124804.4529-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_gem.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1353491c1010..ae3ce1314bd1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3092,6 +3092,13 @@ static void engine_set_wedged(struct intel_engine_cs *engine) engine->execlist_first = NULL; spin_unlock_irqrestore(&engine->timeline->lock, flags); + + /* The port is checked prior to scheduling a tasklet, but + * just in case we have suspended the tasklet to do the + * wedging make sure that when it wakes, it decides there + * is no work to do by clearing the irq_posted bit. + */ + clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); } } From 22a884cfe5a49b6fd63f10ff137906028b4cf923 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 15:04:29 +0200 Subject: [PATCH 0042/1795] drm: Check for drm_device->dev in drm_set_busid I've failed to remember that we have virtual drivers like vgem which have no underlying struct device. Fix this asap. Reported-by: Chris Wilson Cc: Chris Wilson Reviewed-by: Chris Wilson Fixes: 5c484cee7ef9 ("drm: Remove drm_driver->set_busid hook") Cc: Thierry Reding Cc: Daniel Vetter Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621130429.20537-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 3a36b3717c28..d74473570acd 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -143,7 +143,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) if (master->unique != NULL) drm_unset_busid(dev, master); - if (dev_is_pci(dev->dev)) { + if (dev->dev && dev_is_pci(dev->dev)) { ret = drm_pci_set_busid(dev, master); if (ret) { drm_unset_busid(dev, master); From edeb729f7929d1372ab426a7f56fd0b337ba5751 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 16 Jun 2017 10:30:33 +0200 Subject: [PATCH 0043/1795] drm/vc4: Send a VBLANK event when disabling a CRTC VBLANK events are missed when the CRTC is being disabled because the driver does not wait till the end of the frame before stopping the HVS and PV blocks. In this case, we should explicitly issue a VBLANK event if there's one waiting. Signed-off-by: Boris Brezillon Reviewed-by: Eric Anholt Link: http://patchwork.freedesktop.org/patch/msgid/1497601833-24588-1-git-send-email-boris.brezillon@free-electrons.com --- drivers/gpu/drm/vc4/vc4_crtc.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index e3f03efe719b..30a1df11e063 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -518,6 +518,19 @@ static void vc4_crtc_disable(struct drm_crtc *crtc) WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) & (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) != SCALER_DISPSTATX_EMPTY); + + /* + * Make sure we issue a vblank event after disabling the CRTC if + * someone was waiting it. + */ + if (crtc->state->event) { + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + } } static void vc4_crtc_enable(struct drm_crtc *crtc) From 10f9818307230a7d8d8b47f38be128d02872d3d2 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 10:28:46 +0200 Subject: [PATCH 0044/1795] drm/shmob: Drop drm_vblank_cleanup It doesn't do anything in the driver load error paths that the drm core doesn't also do (cleanup is done in drm_dev_fini). Cc: Laurent Pinchart Acked-by: Laurent Pinchart Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-10-daniel.vetter@ffwll.ch --- drivers/gpu/drm/shmobile/shmob_drm_drv.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 800d1d2c435d..c2ca07357aac 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -277,7 +277,7 @@ static int shmob_drm_probe(struct platform_device *pdev) ret = drm_irq_install(ddev, platform_get_irq(pdev, 0)); if (ret < 0) { dev_err(&pdev->dev, "failed to install IRQ handler\n"); - goto err_vblank_cleanup; + goto err_modeset_cleanup; } /* @@ -292,8 +292,6 @@ static int shmob_drm_probe(struct platform_device *pdev) err_irq_uninstall: drm_irq_uninstall(ddev); -err_vblank_cleanup: - drm_vblank_cleanup(ddev); err_modeset_cleanup: drm_kms_helper_poll_fini(ddev); drm_mode_config_cleanup(ddev); From 33b6b7b12d144de7e892a64cb75d086110c87da1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 10:28:49 +0200 Subject: [PATCH 0045/1795] drm/zte: Drop drm_vblank_cleanup It again looks all cargo-culted for no good reasons. Cc: Shawn Guo Acked-by: Shawn Guo Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-13-daniel.vetter@ffwll.ch --- drivers/gpu/drm/zte/zx_drm_drv.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c index f46c855d274b..fe1aa5315e19 100644 --- a/drivers/gpu/drm/zte/zx_drm_drv.c +++ b/drivers/gpu/drm/zte/zx_drm_drv.c @@ -149,7 +149,6 @@ out_fbdev_fini: out_poll_fini: drm_kms_helper_poll_fini(drm); drm_mode_config_cleanup(drm); - drm_vblank_cleanup(drm); out_unbind: component_unbind_all(dev, drm); out_unregister: @@ -171,7 +170,6 @@ static void zx_drm_unbind(struct device *dev) } drm_kms_helper_poll_fini(drm); drm_mode_config_cleanup(drm); - drm_vblank_cleanup(drm); component_unbind_all(dev, drm); dev_set_drvdata(dev, NULL); drm->dev_private = NULL; From 2b3d860efa3461af109469e6de2eea48f6ef5cdd Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Thu, 25 May 2017 15:19:17 +0100 Subject: [PATCH 0046/1795] drm: arcpgu: Use crtc->mode_valid() callback Now that we have a callback to check if crtc supports a given mode we can use it in arcpgu so that we restrict the number of probbed modes to the ones we can actually display. This is specially useful because arcpgu crtc is responsible to set a clock value in the commit() stage but unfortunatelly this clock does not support all the needed ranges. Also, remove the atomic_check() callback as mode_valid() callback will be called before. Signed-off-by: Jose Abreu Reviewed-by: Alexey Brodkin Cc: Carlos Palminha Cc: Alexey Brodkin Cc: Daniel Vetter Cc: Dave Airlie Cc: Laurent Pinchart Changes v4->v5: - Change commit message to "arcpgu" (Alexey) Changes v3->v4: - Do not use aux function (Laurent) Reviewed-by: Neil Armstrong Acked-by: Dave Airlie Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/c3bcd69016c77f68a03ff3cb6b22ca6f90e930b0.1495720737.git.joabreu@synopsys.com --- drivers/gpu/drm/arc/arcpgu_crtc.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index ad9a95916f1f..99fbdaecf100 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -64,6 +64,19 @@ static const struct drm_crtc_funcs arc_pgu_crtc_funcs = { .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; +enum drm_mode_status arc_pgu_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); + long rate, clk_rate = mode->clock * 1000; + + rate = clk_round_rate(arcpgu->clk, clk_rate); + if (rate != clk_rate) + return MODE_NOCLOCK; + + return MODE_OK; +} + static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); @@ -129,20 +142,6 @@ static void arc_pgu_crtc_disable(struct drm_crtc *crtc) ~ARCPGU_CTRL_ENABLE_MASK); } -static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); - struct drm_display_mode *mode = &state->adjusted_mode; - long rate, clk_rate = mode->clock * 1000; - - rate = clk_round_rate(arcpgu->clk, clk_rate); - if (rate != clk_rate) - return -EINVAL; - - return 0; -} - static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -158,6 +157,7 @@ static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = { + .mode_valid = arc_pgu_crtc_mode_valid, .mode_set = drm_helper_crtc_mode_set, .mode_set_base = drm_helper_crtc_mode_set_base, .mode_set_nofb = arc_pgu_crtc_mode_set_nofb, @@ -165,7 +165,6 @@ static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = { .disable = arc_pgu_crtc_disable, .prepare = arc_pgu_crtc_disable, .commit = arc_pgu_crtc_enable, - .atomic_check = arc_pgu_crtc_atomic_check, .atomic_begin = arc_pgu_crtc_atomic_begin, }; From 51d05e1b29676a0425749a1533b87e3ad3c6f176 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 22 Jun 2017 11:47:22 +0100 Subject: [PATCH 0047/1795] drm/i915: Clear execbuf's vma backpointer upon release commit 2889caa92321 ("drm/i915: Eliminate lots of iterations over the execobjects array") jiggled around the error handling and replace a test that we cleaned up properly after ourselves with an assertion. That assertion failed because in the release function (moments after the assertion) we were indeed forgetting to mark the vma as cleared. The consequence was when testing an invalid relocation address, we would try to release the vma twice (following the couple of attempts to verify the address) and on the second release notice that the first release was incomplete. Testcase: igt/gem_reloc_overflow/invalid-address Fixes: 2889caa92321 ("drm/i915: Eliminate lots of iterations over the execobjects array") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170622104722.2583-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b2457556591c..ec33b358fba9 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -879,6 +879,7 @@ static void eb_release_vmas(const struct i915_execbuffer *eb) GEM_BUG_ON(vma->exec_entry != entry); vma->exec_entry = NULL; + __exec_to_vma(entry) = 0; if (entry->flags & __EXEC_OBJECT_HAS_PIN) __eb_unreserve_vma(vma, entry); From 0108648749bfa5713ed0ceede2ee091f428a29d7 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 2 Jun 2017 10:32:06 +0200 Subject: [PATCH 0048/1795] drm: Add drm_atomic_helper_wait_for_flip_done() Add an helper to wait for all page flips of an atomic state to be done. v2: - Pimp kerneldoc as discussed with Boris on irc - Add missing doc for @dev. - Use old_state for consitency with wait_for_vblanks Signed-off-by: Boris Brezillon (v1) Acked-by: Boris Brezillon Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1496392332-8722-2-git-send-email-boris.brezillon@free-electrons.com --- drivers/gpu/drm/drm_atomic_helper.c | 43 +++++++++++++++++++++++- include/drm/drm_atomic_helper.h | 3 ++ include/drm/drm_modeset_helper_vtables.h | 3 +- 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 93b0221d5d0f..45b4f34bebcd 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1185,9 +1185,13 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences); * * Helper to, after atomic commit, wait for vblanks on all effected * crtcs (ie. before cleaning up old framebuffers using - * drm_atomic_helper_cleanup_planes()). It will only wait on crtcs where the + * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the * framebuffers have actually changed to optimize for the legacy cursor and * plane update use-case. + * + * Drivers using the nonblocking commit tracking support initialized by calling + * drm_atomic_helper_setup_commit() should look at + * drm_atomic_helper_wait_for_flip_done() as an alternative. */ void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, @@ -1234,6 +1238,43 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, } EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); +/** + * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done + * @dev: DRM device + * @old_state: atomic state object with old state structures + * + * Helper to, after atomic commit, wait for page flips on all effected + * crtcs (ie. before cleaning up old framebuffers using + * drm_atomic_helper_cleanup_planes()). Compared to + * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all + * CRTCs, assuming that cursors-only updates are signalling their completion + * immediately (or using a different path). + * + * This requires that drivers use the nonblocking commit tracking support + * initialized using drm_atomic_helper_setup_commit(). + */ +void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, + struct drm_atomic_state *old_state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + int i; + + for_each_crtc_in_state(old_state, crtc, crtc_state, i) { + struct drm_crtc_commit *commit = old_state->crtcs[i].commit; + int ret; + + if (!commit) + continue; + + ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); + } +} +EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done); + /** * drm_atomic_helper_commit_tail - commit atomic update to hardware * @old_state: atomic state object with old state structures diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index f0a8678ae98e..3bfeb2b2f746 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -52,6 +52,9 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state); +void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, + struct drm_atomic_state *old_state); + void drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, struct drm_atomic_state *old_state); diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 85984b208218..474a1029ec79 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1169,7 +1169,8 @@ struct drm_mode_config_helper_funcs { * After the atomic update is committed to the hardware this hook needs * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate * to be executed by the hardware, for example using - * drm_atomic_helper_wait_for_vblanks(), and then clean up the old + * drm_atomic_helper_wait_for_vblanks() or + * drm_atomic_helper_wait_for_flip_done(), and then clean up the old * framebuffers using drm_atomic_helper_cleanup_planes(). * * When disabling a CRTC this hook _must_ stall for the commit to From 4a234c5fae1686c8327edeaabc0f2d6fb86db470 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 22 Jun 2017 10:58:36 +0100 Subject: [PATCH 0049/1795] drm/i915: pass the vma to insert_entries The vma already contains most of the information we need for insertion. But also in preparation for supporting huge gtt pages, it would be useful to know the details of the vma, such that we can we can easily determine the page sizes we are allowed to use when inserting into the 48b PPGTT. This is especially true for 64K where we can't just arbitrarily use it, since we require aligning/padding the vm space to 2M, which sometimes we can't enforce in the upper levels. Suggested-by: Chris Wilson Signed-off-by: Matthew Auld Cc: Joonas Lahtinen Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170622095836.6800-1-matthew.auld@intel.com Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem_gtt.c | 68 ++++++++----------- drivers/gpu/drm/i915/i915_gem_gtt.h | 3 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 10 ++- drivers/gpu/drm/i915/selftests/mock_gtt.c | 3 +- 4 files changed, 38 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 61fc7e90a7da..de67084d5fcf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -207,8 +207,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma, if (vma->obj->gt_ro) pte_flags |= PTE_READ_ONLY; - vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, - cache_level, pte_flags); + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); return 0; } @@ -907,37 +906,35 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, } static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, - struct sg_table *pages, - u64 start, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 unused) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); struct sgt_dma iter = { - .sg = pages->sgl, + .sg = vma->pages->sgl, .dma = sg_dma_address(iter.sg), .max = iter.dma + iter.sg->length, }; - struct gen8_insert_pte idx = gen8_insert_pte(start); + struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, cache_level); } static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, - struct sg_table *pages, - u64 start, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 unused) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = { - .sg = pages->sgl, + .sg = vma->pages->sgl, .dma = sg_dma_address(iter.sg), .max = iter.dma + iter.sg->length, }; struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; - struct gen8_insert_pte idx = gen8_insert_pte(start); + struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter, &idx, cache_level)) @@ -1621,13 +1618,12 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, } static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, - struct sg_table *pages, - u64 start, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - unsigned first_entry = start >> PAGE_SHIFT; + unsigned first_entry = vma->node.start >> PAGE_SHIFT; unsigned act_pt = first_entry / GEN6_PTES; unsigned act_pte = first_entry % GEN6_PTES; const u32 pte_encode = vm->pte_encode(0, cache_level, flags); @@ -1635,7 +1631,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, gen6_pte_t *vaddr; vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]); - iter.sg = pages->sgl; + iter.sg = vma->pages->sgl; iter.dma = sg_dma_address(iter.sg); iter.max = iter.dma + iter.sg->length; do { @@ -2090,8 +2086,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, } static void gen8_ggtt_insert_entries(struct i915_address_space *vm, - struct sg_table *st, - u64 start, + struct i915_vma *vma, enum i915_cache_level level, u32 unused) { @@ -2102,8 +2097,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, dma_addr_t addr; gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; - gtt_entries += start >> PAGE_SHIFT; - for_each_sgt_dma(addr, sgt_iter, st) + gtt_entries += vma->node.start >> PAGE_SHIFT; + for_each_sgt_dma(addr, sgt_iter, vma->pages) gen8_set_pte(gtt_entries++, pte_encode | addr); wmb(); @@ -2137,17 +2132,16 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm, * mapped BAR (dev_priv->mm.gtt->gtt). */ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, - struct sg_table *st, - u64 start, + struct i915_vma *vma, enum i915_cache_level level, u32 flags) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; - unsigned int i = start >> PAGE_SHIFT; + unsigned int i = vma->node.start >> PAGE_SHIFT; struct sgt_iter iter; dma_addr_t addr; - for_each_sgt_dma(addr, iter, st) + for_each_sgt_dma(addr, iter, vma->pages) iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); wmb(); @@ -2229,8 +2223,7 @@ static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, struct insert_entries { struct i915_address_space *vm; - struct sg_table *st; - u64 start; + struct i915_vma *vma; enum i915_cache_level level; }; @@ -2238,19 +2231,18 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) { struct insert_entries *arg = _arg; - gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0); + gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0); bxt_vtd_ggtt_wa(arg->vm); return 0; } static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, - struct sg_table *st, - u64 start, + struct i915_vma *vma, enum i915_cache_level level, u32 unused) { - struct insert_entries arg = { vm, st, start, level }; + struct insert_entries arg = { vma->vm, vma, level }; stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); } @@ -2316,15 +2308,15 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm, } static void i915_ggtt_insert_entries(struct i915_address_space *vm, - struct sg_table *pages, - u64 start, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 unused) { unsigned int flags = (cache_level == I915_CACHE_NONE) ? AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; - intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); + intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, + flags); } static void i915_ggtt_clear_range(struct i915_address_space *vm, @@ -2353,8 +2345,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, pte_flags |= PTE_READ_ONLY; intel_runtime_pm_get(i915); - vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, - cache_level, pte_flags); + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); intel_runtime_pm_put(i915); /* @@ -2407,16 +2398,13 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, goto err_pages; } - appgtt->base.insert_entries(&appgtt->base, - vma->pages, vma->node.start, - cache_level, pte_flags); + appgtt->base.insert_entries(&appgtt->base, vma, cache_level, + pte_flags); } if (flags & I915_VMA_GLOBAL_BIND) { intel_runtime_pm_get(i915); - vma->vm->insert_entries(vma->vm, - vma->pages, vma->node.start, - cache_level, pte_flags); + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); intel_runtime_pm_put(i915); } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 1b2a56c3e5d3..b4e3aa7c0ce1 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -313,8 +313,7 @@ struct i915_address_space { enum i915_cache_level cache_level, u32 flags); void (*insert_entries)(struct i915_address_space *vm, - struct sg_table *st, - u64 start, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); void (*cleanup)(struct i915_address_space *vm); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 50710e3f1caa..6b132caffa18 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -197,6 +197,9 @@ static int lowlevel_hole(struct drm_i915_private *i915, { I915_RND_STATE(seed_prng); unsigned int size; + struct i915_vma mock_vma; + + memset(&mock_vma, 0, sizeof(struct i915_vma)); /* Keep creating larger objects until one cannot fit into the hole */ for (size = 12; (hole_end - hole_start) >> size; size++) { @@ -255,8 +258,11 @@ static int lowlevel_hole(struct drm_i915_private *i915, vm->allocate_va_range(vm, addr, BIT_ULL(size))) break; - vm->insert_entries(vm, obj->mm.pages, addr, - I915_CACHE_NONE, 0); + mock_vma.pages = obj->mm.pages; + mock_vma.node.size = BIT_ULL(size); + mock_vma.node.start = addr; + + vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); } count = n; diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index a61309c7cb3e..f2118cf535a0 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -33,8 +33,7 @@ static void mock_insert_page(struct i915_address_space *vm, } static void mock_insert_entries(struct i915_address_space *vm, - struct sg_table *st, - u64 start, + struct i915_vma *vma, enum i915_cache_level level, u32 flags) { } From b9eb89b21fa3f759d775fbd50236ece663771736 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 20 Jun 2017 16:03:06 +0300 Subject: [PATCH 0050/1795] drm/i915: Use HAS_PCH_CPT() everywhere MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have a few cases comparing pch_type directly. Let's just replace them with HAS_PCH_CPT() since CPT/PPT is what they're looking for. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170620130310.13245-2-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_device_info.c | 2 +- drivers/gpu/drm/i915/intel_sdvo.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 77d3214e1a77..5f91ddc78c7a 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -363,7 +363,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) */ if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || - (dev_priv->pch_type == PCH_CPT && + (HAS_PCH_CPT(dev_priv) && !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { DRM_INFO("Display fused off, disabling\n"); info->num_pipes = 0; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 3f8f30b412cd..f902922d4ae6 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1343,7 +1343,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; } - if (INTEL_PCH_TYPE(dev_priv) >= PCH_CPT) + if (HAS_PCH_CPT(dev_priv)) sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe); else sdvox |= SDVO_PIPE_SEL(crtc->pipe); From aa0321300651df92dad7925a1da7fcaaf2f78c57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 20 Jun 2017 16:03:07 +0300 Subject: [PATCH 0051/1795] drm/i915: s/Couar/Cougar/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix a typo in the PCH type debug message. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170620130310.13245-3-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d35a114c46ed..7ef113d062f5 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -132,7 +132,7 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { ret = PCH_CPT; - DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); + DRM_DEBUG_KMS("Assuming CougarPoint PCH\n"); } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { ret = PCH_LPT; DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); From 243dec586fca7f9acd08c0e5a9d7884fe5e9c848 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 20 Jun 2017 16:03:08 +0300 Subject: [PATCH 0052/1795] drm/i915: Document that PPT==CPT and WPT==LPT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For our purposes PPT is equivalent to CPT, and WPT is equivalent to LPT. Document that fact. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170620130310.13245-4-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 90cc63e5ced8..96372f6e9cac 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1150,8 +1150,8 @@ struct i915_psr { enum intel_pch { PCH_NONE = 0, /* No PCH present */ PCH_IBX, /* Ibexpeak PCH */ - PCH_CPT, /* Cougarpoint PCH */ - PCH_LPT, /* Lynxpoint PCH */ + PCH_CPT, /* Cougarpoint/Pantherpoint PCH */ + PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */ PCH_SPT, /* Sunrisepoint PCH */ PCH_KBP, /* Kabypoint PCH */ PCH_CNP, /* Cannonpoint PCH */ From d4cdbf0334c01ac2fa29238bd7b87142ea0f5740 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 20 Jun 2017 16:03:09 +0300 Subject: [PATCH 0053/1795] drm/i915: Clean up some expressions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Write the '!(SNB||IVB)' checks in the CPT/PPT detections as '!SNB && !IVB' to make it less messy looking, and clear out some useless parens the from the virtualization PCH detection case. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170620130310.13245-5-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 7ef113d062f5..80a6d4d13e6f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -185,15 +185,15 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) dev_priv->pch_id = id; dev_priv->pch_type = PCH_CPT; DRM_DEBUG_KMS("Found CougarPoint PCH\n"); - WARN_ON(!(IS_GEN6(dev_priv) || - IS_IVYBRIDGE(dev_priv))); + WARN_ON(!IS_GEN6(dev_priv) && + !IS_IVYBRIDGE(dev_priv)); } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { /* PantherPoint is CPT compatible */ dev_priv->pch_id = id; dev_priv->pch_type = PCH_CPT; DRM_DEBUG_KMS("Found PantherPoint PCH\n"); - WARN_ON(!(IS_GEN6(dev_priv) || - IS_IVYBRIDGE(dev_priv))); + WARN_ON(!IS_GEN6(dev_priv) && + !IS_IVYBRIDGE(dev_priv)); } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { dev_priv->pch_id = id; dev_priv->pch_type = PCH_LPT; @@ -240,9 +240,9 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Found CannonPoint LP PCH\n"); WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); - } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || - (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || - ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && + } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE || + id == INTEL_PCH_P3X_DEVICE_ID_TYPE || + (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE && pch->subsystem_vendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET && pch->subsystem_device == From c5e855d07834d67f30fab07e7d7316b090b8073f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 21 Jun 2017 20:49:44 +0300 Subject: [PATCH 0054/1795] drm/i915: Always use 9 bits of the LPC bridge device ID for PCH detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the code less confusiong by always using the top 9 bits of the LPC bridge device ID to detect the PCH type. We need to add a bit of new code for WPT, and we need to adjust the KBP ID as well. All the other pre-CNP IDs are fine as is. The virtualization cases I think are fine. These P2X and P3X IDs actually just look like the old PIIX4 and PIIX3 IDs to me. Not sure why they're not called PIIX3/4 though. The qemu one has a comment saying the full ID is 0x2918 which is fine with 9 bits. v2: Keep the CNP ID as 0xa300 (DK) Cc: Dhinakaran Pandiyan Reviewed-by: Jani Nikula Reviewed-by: Dhinakaran Pandiyan Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170621174944.23306-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.c | 35 +++++++++++++++++++-------------- drivers/gpu/drm/i915/i915_drv.h | 13 +++++++----- 2 files changed, 28 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 80a6d4d13e6f..370429e2071f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -173,29 +173,25 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { if (pch->vendor == PCI_VENDOR_ID_INTEL) { unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; - unsigned short id_ext = pch->device & - INTEL_PCH_DEVICE_ID_MASK_EXT; + + dev_priv->pch_id = id; if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { - dev_priv->pch_id = id; dev_priv->pch_type = PCH_IBX; DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); WARN_ON(!IS_GEN5(dev_priv)); } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { - dev_priv->pch_id = id; dev_priv->pch_type = PCH_CPT; DRM_DEBUG_KMS("Found CougarPoint PCH\n"); WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { /* PantherPoint is CPT compatible */ - dev_priv->pch_id = id; dev_priv->pch_type = PCH_CPT; DRM_DEBUG_KMS("Found PantherPoint PCH\n"); WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { - dev_priv->pch_id = id; dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint PCH\n"); WARN_ON(!IS_HASWELL(dev_priv) && @@ -203,39 +199,49 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { - dev_priv->pch_id = id; dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); + } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) { + /* WildcatPoint is LPT compatible */ + dev_priv->pch_type = PCH_LPT; + DRM_DEBUG_KMS("Found WildcatPoint PCH\n"); + WARN_ON(!IS_HASWELL(dev_priv) && + !IS_BROADWELL(dev_priv)); + WARN_ON(IS_HSW_ULT(dev_priv) || + IS_BDW_ULT(dev_priv)); + } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) { + /* WildcatPoint is LPT compatible */ + dev_priv->pch_type = PCH_LPT; + DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n"); + WARN_ON(!IS_HASWELL(dev_priv) && + !IS_BROADWELL(dev_priv)); + WARN_ON(!IS_HSW_ULT(dev_priv) && + !IS_BDW_ULT(dev_priv)); } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { - dev_priv->pch_id = id; dev_priv->pch_type = PCH_SPT; DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); - } else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { - dev_priv->pch_id = id_ext; + } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_SPT; DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { - dev_priv->pch_id = id; dev_priv->pch_type = PCH_KBP; DRM_DEBUG_KMS("Found KabyPoint PCH\n"); WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) { - dev_priv->pch_id = id; dev_priv->pch_type = PCH_CNP; DRM_DEBUG_KMS("Found CannonPoint PCH\n"); WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); - } else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) { - dev_priv->pch_id = id_ext; + } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CNP; DRM_DEBUG_KMS("Found CannonPoint LP PCH\n"); WARN_ON(!IS_CANNONLAKE(dev_priv) && @@ -247,7 +253,6 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) PCI_SUBVENDOR_ID_REDHAT_QUMRANET && pch->subsystem_device == PCI_SUBDEVICE_ID_QEMU)) { - dev_priv->pch_id = id; dev_priv->pch_type = intel_virt_detect_pch(dev_priv); } else diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 96372f6e9cac..5524e6fc9476 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3011,16 +3011,17 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) -#define INTEL_PCH_DEVICE_ID_MASK 0xff00 -#define INTEL_PCH_DEVICE_ID_MASK_EXT 0xff80 +#define INTEL_PCH_DEVICE_ID_MASK 0xff80 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 +#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80 +#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 -#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 +#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280 #define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 @@ -3035,9 +3036,11 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) #define HAS_PCH_LPT_LP(dev_priv) \ - ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) + ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \ + (dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) #define HAS_PCH_LPT_H(dev_priv) \ - ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) + ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \ + (dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE) #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) From 7d2818f54e38e40232c65866b58c8b627dbf5711 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Thu, 22 Jun 2017 03:28:11 +0200 Subject: [PATCH 0055/1795] drm/vc4: Allow vblank_disable_immediate on non-fw-kms. (v2) With instantaneous high precision vblank timestamping that updates at leading edge of vblank, the emulated "hw vblank counter" from vblank timestamping which increments at leading edge of vblank, and reliable page flip execution and completion at leading edge of vblank, we should meet the requirements for fast vblank irq disable/enable. Testing against rpi-4.12-rc5 Linux kernel with timing measurement equipment indicates this works fine, so allow immediate vblank disable for power saving. For debugging in case of unexpected trouble, booting with kernel cmdline option drm.vblankoffdelay=0 would keep vblank irqs on to approximate old behavior. v2: Respin onto drm-misc-next, per Eric's suggestion. Drop !vc4->firmware_kms check, as the firmware_kms implementation does not exist in upstream. Signed-off-by: Mario Kleiner Cc: Eric Anholt Reviewed-by: Eric Anholt Link: http://patchwork.freedesktop.org/patch/msgid/20170622012811.2139-1-mario.kleiner.de@gmail.com --- drivers/gpu/drm/vc4/vc4_kms.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index bc6ecdc6f104..c2c9f82b2df1 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -241,6 +241,9 @@ int vc4_kms_load(struct drm_device *dev) sema_init(&vc4->async_modeset, 1); + /* Set support for vblank irq fast disable, before drm_vblank_init() */ + dev->vblank_disable_immediate = true; + ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); From 334dbd69c604cb33d906a30195010d090e6eb80c Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 21 Jun 2017 11:49:59 -0700 Subject: [PATCH 0056/1795] drm/vc4: Hook up plane prepare_fb to lookup dma-buf reservations. This way drm_atomic_helper_wait_for_fences() will actually do something. The vc4_seqno_cb has been doing the fence waits on V3D manually, so far. Signed-off-by: Eric Anholt Link: http://patchwork.freedesktop.org/patch/msgid/20170621185002.28563-1-eric@anholt.net Acked-by: Daniel Vetter Reviewed-by: Boris Brezillon --- drivers/gpu/drm/vc4/vc4_plane.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index fa6809d8b0fe..8853e9a4f005 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -759,9 +759,26 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) vc4_state->dlist[vc4_state->ptr0_offset] = addr; } +static int vc4_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct vc4_bo *bo; + struct dma_fence *fence; + + if ((plane->state->fb == state->fb) || !state->fb) + return 0; + + bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); + fence = reservation_object_get_excl_rcu(bo->resv); + drm_atomic_set_fence_for_plane(state, fence); + + return 0; +} + static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { .atomic_check = vc4_plane_atomic_check, .atomic_update = vc4_plane_atomic_update, + .prepare_fb = vc4_prepare_fb, }; static void vc4_plane_destroy(struct drm_plane *plane) From 53ad06949d3b18ffeb530ff876b1ae35e7640f39 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 21 Jun 2017 11:50:00 -0700 Subject: [PATCH 0057/1795] drm/vc4: Wait for fences interruptibly in blocking mode. We should allow SIGIO and things to interrupt us before we get to the no-error stage of the commit process. This code is effectively copied from drm_atomic_helper_commit(). Signed-off-by: Eric Anholt Link: http://patchwork.freedesktop.org/patch/msgid/20170621185002.28563-2-eric@anholt.net Acked-by: Daniel Vetter Reviewed-by: Boris Brezillon --- drivers/gpu/drm/vc4/vc4_kms.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index c2c9f82b2df1..86fcdb0bd75f 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -142,6 +142,16 @@ static int vc4_atomic_commit(struct drm_device *dev, return ret; } + if (!nonblock) { + ret = drm_atomic_helper_wait_for_fences(dev, state, true); + if (ret) { + drm_atomic_helper_cleanup_planes(dev, state); + kfree(c); + up(&vc4->async_modeset); + return ret; + } + } + for_each_plane_in_state(state, plane, new_state, i) { if ((plane->state->fb != new_state->fb) && new_state->fb) { struct drm_gem_cma_object *cma_bo = From cf1b372ec11dae9fe7dcc26f47cb4bc939db0c32 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 21 Jun 2017 11:50:01 -0700 Subject: [PATCH 0058/1795] drm/vc4: Use the atomic state's commit workqueue. Now that we're using the atomic helpers for fence waits, we can use the same codepath as drm_atomic_helper_commit() does for async, getting rid of our custom vc4_commit struct. Signed-off-by: Eric Anholt Link: http://patchwork.freedesktop.org/patch/msgid/20170621185002.28563-3-eric@anholt.net Acked-by: Daniel Vetter Reviewed-by: Boris Brezillon --- drivers/gpu/drm/vc4/vc4_kms.c | 71 +++++++---------------------------- 1 file changed, 13 insertions(+), 58 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 86fcdb0bd75f..27edae427025 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -29,16 +29,9 @@ static void vc4_output_poll_changed(struct drm_device *dev) drm_fbdev_cma_hotplug_event(vc4->fbdev); } -struct vc4_commit { - struct drm_device *dev; - struct drm_atomic_state *state; - struct vc4_seqno_cb cb; -}; - static void -vc4_atomic_complete_commit(struct vc4_commit *c) +vc4_atomic_complete_commit(struct drm_atomic_state *state) { - struct drm_atomic_state *state = c->state; struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -72,28 +65,14 @@ vc4_atomic_complete_commit(struct vc4_commit *c) drm_atomic_state_put(state); up(&vc4->async_modeset); - - kfree(c); } -static void -vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb) +static void commit_work(struct work_struct *work) { - struct vc4_commit *c = container_of(cb, struct vc4_commit, cb); - - vc4_atomic_complete_commit(c); -} - -static struct vc4_commit *commit_init(struct drm_atomic_state *state) -{ - struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); - - if (!c) - return NULL; - c->dev = state->dev; - c->state = state; - - return c; + struct drm_atomic_state *state = container_of(work, + struct drm_atomic_state, + commit_work); + vc4_atomic_complete_commit(state); } /** @@ -115,29 +94,19 @@ static int vc4_atomic_commit(struct drm_device *dev, { struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; - int i; - uint64_t wait_seqno = 0; - struct vc4_commit *c; - struct drm_plane *plane; - struct drm_plane_state *new_state; - - c = commit_init(state); - if (!c) - return -ENOMEM; ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) return ret; + INIT_WORK(&state->commit_work, commit_work); + ret = down_interruptible(&vc4->async_modeset); - if (ret) { - kfree(c); + if (ret) return ret; - } ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) { - kfree(c); up(&vc4->async_modeset); return ret; } @@ -146,22 +115,11 @@ static int vc4_atomic_commit(struct drm_device *dev, ret = drm_atomic_helper_wait_for_fences(dev, state, true); if (ret) { drm_atomic_helper_cleanup_planes(dev, state); - kfree(c); up(&vc4->async_modeset); return ret; } } - for_each_plane_in_state(state, plane, new_state, i) { - if ((plane->state->fb != new_state->fb) && new_state->fb) { - struct drm_gem_cma_object *cma_bo = - drm_fb_cma_get_gem_obj(new_state->fb, 0); - struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); - - wait_seqno = max(bo->seqno, wait_seqno); - } - } - /* * This is the point of no return - everything below never fails except * when the hw goes bonghits. Which means we can commit the new state on @@ -187,13 +145,10 @@ static int vc4_atomic_commit(struct drm_device *dev, */ drm_atomic_state_get(state); - if (nonblock) { - vc4_queue_seqno_cb(dev, &c->cb, wait_seqno, - vc4_atomic_complete_commit_seqno_cb); - } else { - vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false); - vc4_atomic_complete_commit(c); - } + if (nonblock) + queue_work(system_unbound_wq, &state->commit_work); + else + vc4_atomic_complete_commit(state); return 0; } From 55a0b9d70a6c571b7d4f2f7eecb451c73b2f94a0 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 21 Jun 2017 11:50:02 -0700 Subject: [PATCH 0059/1795] drm/vc4: Remove dead vc4_event_pending(). It is no longer used as of commit 34c8ea400ff6 ("drm/vc4: Mimic drm_atomic_helper_commit() behavior") Signed-off-by: Eric Anholt Link: http://patchwork.freedesktop.org/patch/msgid/20170621185002.28563-4-eric@anholt.net Acked-by: Daniel Vetter Reviewed-by: Boris Brezillon --- drivers/gpu/drm/vc4/vc4_crtc.c | 8 -------- drivers/gpu/drm/vc4/vc4_drv.h | 1 - 2 files changed, 9 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 30a1df11e063..f20c01759c0d 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -674,14 +674,6 @@ static void vc4_disable_vblank(struct drm_crtc *crtc) CRTC_WRITE(PV_INTEN, 0); } -/* Must be called with the event lock held */ -bool vc4_event_pending(struct drm_crtc *crtc) -{ - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - - return !!vc4_crtc->event; -} - static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) { struct drm_crtc *crtc = &vc4_crtc->base; diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index df22698d62ee..1047953216a8 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -491,7 +491,6 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); /* vc4_crtc.c */ extern struct platform_driver vc4_crtc_driver; -bool vc4_event_pending(struct drm_crtc *crtc); int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg); bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, bool in_vblank_irq, int *vpos, int *hpos, From ae7c59f0e7daf337d8ae95ecb42ae586883c29f4 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Thu, 22 Jun 2017 07:03:10 +0200 Subject: [PATCH 0060/1795] drm: atmel-hlcdc: add missing .set_property helper to the crtc The default implementation should be used. Signed-off-by: Peter Rosin Signed-off-by: Boris Brezillon Link: http://patchwork.freedesktop.org/patch/msgid/1498107791-17450-2-git-send-email-peda@axentia.se --- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 53489859997b..cc00ce3bbaa5 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -429,6 +429,7 @@ static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = { .atomic_destroy_state = atmel_hlcdc_crtc_destroy_state, .enable_vblank = atmel_hlcdc_crtc_enable_vblank, .disable_vblank = atmel_hlcdc_crtc_disable_vblank, + .set_property = drm_atomic_helper_crtc_set_property, }; int atmel_hlcdc_crtc_create(struct drm_device *dev) From 364a7bf574ebbd7c001a1c3f79816d0480062f80 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Thu, 22 Jun 2017 07:03:11 +0200 Subject: [PATCH 0061/1795] drm: atmel-hlcdc: add support for 8-bit color lookup table mode All layers of all supported chips support this, the only variable is the base address of the lookup table in the register map. Acked-by: Daniel Vetter Signed-off-by: Peter Rosin Signed-off-by: Boris Brezillon Link: http://patchwork.freedesktop.org/patch/msgid/1498107791-17450-3-git-send-email-peda@axentia.se --- .../gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | 5 ++++ drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c | 14 +++++++++ drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h | 16 ++++++++++ .../gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c | 29 +++++++++++++++++++ 4 files changed, 64 insertions(+) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index cc00ce3bbaa5..f6b8c5908a20 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -430,6 +430,7 @@ static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = { .enable_vblank = atmel_hlcdc_crtc_enable_vblank, .disable_vblank = atmel_hlcdc_crtc_disable_vblank, .set_property = drm_atomic_helper_crtc_set_property, + .gamma_set = drm_atomic_helper_legacy_gamma_set, }; int atmel_hlcdc_crtc_create(struct drm_device *dev) @@ -485,6 +486,10 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev) drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); drm_crtc_vblank_reset(&crtc->base); + drm_mode_crtc_set_gamma_size(&crtc->base, ATMEL_HLCDC_CLUT_SIZE); + drm_crtc_enable_color_mgmt(&crtc->base, 0, false, + ATMEL_HLCDC_CLUT_SIZE); + dc->crtc = &crtc->base; return 0; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 51d22ad9a3a3..516d9547d331 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -42,6 +42,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = { .default_color = 3, .general_config = 4, }, + .clut_offset = 0x400, }, }; @@ -73,6 +74,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = { .disc_pos = 5, .disc_size = 6, }, + .clut_offset = 0x400, }, { .name = "overlay1", @@ -91,6 +93,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = { .chroma_key_mask = 8, .general_config = 9, }, + .clut_offset = 0x800, }, { .name = "high-end-overlay", @@ -112,6 +115,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = { .scaler_config = 13, .csc = 14, }, + .clut_offset = 0x1000, }, { .name = "cursor", @@ -131,6 +135,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = { .chroma_key_mask = 8, .general_config = 9, }, + .clut_offset = 0x1400, }, }; @@ -162,6 +167,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = { .disc_pos = 5, .disc_size = 6, }, + .clut_offset = 0x600, }, { .name = "overlay1", @@ -180,6 +186,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = { .chroma_key_mask = 8, .general_config = 9, }, + .clut_offset = 0xa00, }, { .name = "overlay2", @@ -198,6 +205,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = { .chroma_key_mask = 8, .general_config = 9, }, + .clut_offset = 0xe00, }, { .name = "high-end-overlay", @@ -223,6 +231,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = { }, .csc = 14, }, + .clut_offset = 0x1200, }, { .name = "cursor", @@ -244,6 +253,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = { .general_config = 9, .scaler_config = 13, }, + .clut_offset = 0x1600, }, }; @@ -275,6 +285,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = { .disc_pos = 5, .disc_size = 6, }, + .clut_offset = 0x600, }, { .name = "overlay1", @@ -293,6 +304,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = { .chroma_key_mask = 8, .general_config = 9, }, + .clut_offset = 0xa00, }, { .name = "overlay2", @@ -311,6 +323,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = { .chroma_key_mask = 8, .general_config = 9, }, + .clut_offset = 0xe00, }, { .name = "high-end-overlay", @@ -336,6 +349,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = { }, .csc = 14, }, + .clut_offset = 0x1200, }, }; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h index b0596a84c1b8..4237b0446721 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h @@ -88,6 +88,11 @@ #define ATMEL_HLCDC_YUV422SWP BIT(17) #define ATMEL_HLCDC_DSCALEOPT BIT(20) +#define ATMEL_HLCDC_C1_MODE ATMEL_HLCDC_CLUT_MODE(0) +#define ATMEL_HLCDC_C2_MODE ATMEL_HLCDC_CLUT_MODE(1) +#define ATMEL_HLCDC_C4_MODE ATMEL_HLCDC_CLUT_MODE(2) +#define ATMEL_HLCDC_C8_MODE ATMEL_HLCDC_CLUT_MODE(3) + #define ATMEL_HLCDC_XRGB4444_MODE ATMEL_HLCDC_RGB_MODE(0) #define ATMEL_HLCDC_ARGB4444_MODE ATMEL_HLCDC_RGB_MODE(1) #define ATMEL_HLCDC_RGBA4444_MODE ATMEL_HLCDC_RGB_MODE(2) @@ -142,6 +147,8 @@ #define ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE BIT(2) #define ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN BIT(3) +#define ATMEL_HLCDC_CLUT_SIZE 256 + #define ATMEL_HLCDC_MAX_LAYERS 6 /** @@ -259,6 +266,7 @@ struct atmel_hlcdc_layer_desc { int id; int regs_offset; int cfgs_offset; + int clut_offset; struct atmel_hlcdc_formats *formats; struct atmel_hlcdc_layer_cfg_layout layout; int max_width; @@ -414,6 +422,14 @@ static inline u32 atmel_hlcdc_layer_read_cfg(struct atmel_hlcdc_layer *layer, (cfgid * sizeof(u32))); } +static inline void atmel_hlcdc_layer_write_clut(struct atmel_hlcdc_layer *layer, + unsigned int c, u32 val) +{ + regmap_write(layer->regmap, + layer->desc->clut_offset + c * sizeof(u32), + val); +} + static inline void atmel_hlcdc_layer_init(struct atmel_hlcdc_layer *layer, const struct atmel_hlcdc_layer_desc *desc, struct regmap *regmap) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 1124200bb280..b5bd9b005225 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -83,6 +83,7 @@ drm_plane_state_to_atmel_hlcdc_plane_state(struct drm_plane_state *s) #define SUBPIXEL_MASK 0xffff static uint32_t rgb_formats[] = { + DRM_FORMAT_C8, DRM_FORMAT_XRGB4444, DRM_FORMAT_ARGB4444, DRM_FORMAT_RGBA4444, @@ -100,6 +101,7 @@ struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats = { }; static uint32_t rgb_and_yuv_formats[] = { + DRM_FORMAT_C8, DRM_FORMAT_XRGB4444, DRM_FORMAT_ARGB4444, DRM_FORMAT_RGBA4444, @@ -128,6 +130,9 @@ struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_and_yuv_formats = { static int atmel_hlcdc_format_to_plane_mode(u32 format, u32 *mode) { switch (format) { + case DRM_FORMAT_C8: + *mode = ATMEL_HLCDC_C8_MODE; + break; case DRM_FORMAT_XRGB4444: *mode = ATMEL_HLCDC_XRGB4444_MODE; break; @@ -424,6 +429,29 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane, ATMEL_HLCDC_LAYER_FORMAT_CFG, cfg); } +static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane) +{ + struct drm_crtc *crtc = plane->base.crtc; + struct drm_color_lut *lut; + int idx; + + if (!crtc || !crtc->state) + return; + + if (!crtc->state->color_mgmt_changed || !crtc->state->gamma_lut) + return; + + lut = (struct drm_color_lut *)crtc->state->gamma_lut->data; + + for (idx = 0; idx < ATMEL_HLCDC_CLUT_SIZE; idx++, lut++) { + u32 val = ((lut->red << 8) & 0xff0000) | + (lut->green & 0xff00) | + (lut->blue >> 8); + + atmel_hlcdc_layer_write_clut(&plane->layer, idx, val); + } +} + static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state) { @@ -768,6 +796,7 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p, atmel_hlcdc_plane_update_pos_and_size(plane, state); atmel_hlcdc_plane_update_general_settings(plane, state); atmel_hlcdc_plane_update_format(plane, state); + atmel_hlcdc_plane_update_clut(plane); atmel_hlcdc_plane_update_buffers(plane, state); atmel_hlcdc_plane_update_disc_area(plane, state); From 6445e394c54b61974491fccbf2e4fcd990d2fa8b Mon Sep 17 00:00:00 2001 From: Mark Yao Date: Thu, 22 Jun 2017 15:17:24 +0800 Subject: [PATCH 0062/1795] drm/rockchip: dw_hdmi: add RK3399 HDMI support RK3399 and RK3288 shared the same HDMI IP controller, only some light difference with GRF configure. Signed-off-by: Yakir Yang Signed-off-by: Mark Yao Acked-by: Rob Herring --- .../display/rockchip/dw_hdmi-rockchip.txt | 4 +- drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 67 ++++++++++++++++--- 2 files changed, 59 insertions(+), 12 deletions(-) diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt index 046076c6b277..7039a15b0d04 100644 --- a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt +++ b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt @@ -11,7 +11,9 @@ following device-specific properties. Required properties: -- compatible: Shall contain "rockchip,rk3288-dw-hdmi". +- compatible: should be one of the following: + "rockchip,rk3288-dw-hdmi" + "rockchip,rk3399-dw-hdmi" - reg: See dw_hdmi.txt. - reg-io-width: See dw_hdmi.txt. Shall be 4. - interrupts: HDMI interrupt number diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index f8208489724e..e7bab14134da 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -20,13 +20,30 @@ #include "rockchip_drm_drv.h" #include "rockchip_drm_vop.h" -#define GRF_SOC_CON6 0x025c -#define HDMI_SEL_VOP_LIT (1 << 4) +#define RK3288_GRF_SOC_CON6 0x025C +#define RK3288_HDMI_LCDC_SEL BIT(4) +#define RK3399_GRF_SOC_CON20 0x6250 +#define RK3399_HDMI_LCDC_SEL BIT(6) + +#define HIWORD_UPDATE(val, mask) (val | (mask) << 16) + +/** + * struct rockchip_hdmi_chip_data - splite the grf setting of kind of chips + * @lcdsel_grf_reg: grf register offset of lcdc select + * @lcdsel_big: reg value of selecting vop big for HDMI + * @lcdsel_lit: reg value of selecting vop little for HDMI + */ +struct rockchip_hdmi_chip_data { + u32 lcdsel_grf_reg; + u32 lcdsel_big; + u32 lcdsel_lit; +}; struct rockchip_hdmi { struct device *dev; struct regmap *regmap; struct drm_encoder encoder; + const struct rockchip_hdmi_chip_data *chip_data; }; #define to_rockchip_hdmi(x) container_of(x, struct rockchip_hdmi, x) @@ -198,17 +215,20 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder) { struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder); u32 val; - int mux; + int ret; - mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder); - if (mux) - val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16); + ret = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder); + if (ret) + val = hdmi->chip_data->lcdsel_lit; else - val = HDMI_SEL_VOP_LIT << 16; + val = hdmi->chip_data->lcdsel_big; + + ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val); + if (ret != 0) + dev_err(hdmi->dev, "Could not write to GRF: %d\n", ret); - regmap_write(hdmi->regmap, GRF_SOC_CON6, val); dev_dbg(hdmi->dev, "vop %s output to hdmi\n", - (mux) ? "LIT" : "BIG"); + ret ? "LIT" : "BIG"); } static int @@ -232,16 +252,40 @@ static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_fun .atomic_check = dw_hdmi_rockchip_encoder_atomic_check, }; -static const struct dw_hdmi_plat_data rockchip_hdmi_drv_data = { +static struct rockchip_hdmi_chip_data rk3288_chip_data = { + .lcdsel_grf_reg = RK3288_GRF_SOC_CON6, + .lcdsel_big = HIWORD_UPDATE(0, RK3288_HDMI_LCDC_SEL), + .lcdsel_lit = HIWORD_UPDATE(RK3288_HDMI_LCDC_SEL, RK3288_HDMI_LCDC_SEL), +}; + +static const struct dw_hdmi_plat_data rk3288_hdmi_drv_data = { .mode_valid = dw_hdmi_rockchip_mode_valid, .mpll_cfg = rockchip_mpll_cfg, .cur_ctr = rockchip_cur_ctr, .phy_config = rockchip_phy_config, + .phy_data = &rk3288_chip_data, +}; + +static struct rockchip_hdmi_chip_data rk3399_chip_data = { + .lcdsel_grf_reg = RK3399_GRF_SOC_CON20, + .lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL), + .lcdsel_lit = HIWORD_UPDATE(RK3399_HDMI_LCDC_SEL, RK3399_HDMI_LCDC_SEL), +}; + +static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = { + .mode_valid = dw_hdmi_rockchip_mode_valid, + .mpll_cfg = rockchip_mpll_cfg, + .cur_ctr = rockchip_cur_ctr, + .phy_config = rockchip_phy_config, + .phy_data = &rk3399_chip_data, }; static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = { { .compatible = "rockchip,rk3288-dw-hdmi", - .data = &rockchip_hdmi_drv_data + .data = &rk3288_hdmi_drv_data + }, + { .compatible = "rockchip,rk3399-dw-hdmi", + .data = &rk3399_hdmi_drv_data }, {}, }; @@ -268,6 +312,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, match = of_match_node(dw_hdmi_rockchip_dt_ids, pdev->dev.of_node); plat_data = match->data; hdmi->dev = &pdev->dev; + hdmi->chip_data = plat_data->phy_data; encoder = &hdmi->encoder; encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); From 5e3bc6d1ab48a20cbce3d59d42937bc977ec61cf Mon Sep 17 00:00:00 2001 From: Mark Yao Date: Fri, 9 Jun 2017 15:10:41 +0800 Subject: [PATCH 0063/1795] drm/rockchip: dw_hdmi: introduce the VPLL clock setting For RK3399 HDMI, there is an external clock need for HDMI PHY, and it should keep the same clock rate with VOP DCLK. VPLL have supported the clock for HDMI PHY, but there is no clock divider bewteen VPLL and HDMI PHY. So we need to set the VPLL rate manually in HDMI driver. Signed-off-by: Yakir Yang Signed-off-by: Mark Yao Acked-by: Rob Herring --- .../display/rockchip/dw_hdmi-rockchip.txt | 2 +- drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 25 ++++++++++++++++++- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt index 7039a15b0d04..122d4e8b879b 100644 --- a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt +++ b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt @@ -32,7 +32,7 @@ Optional properties I2C master controller. - clock-names: See dw_hdmi.txt. The "cec" clock is optional. - clock-names: May contain "cec" as defined in dw_hdmi.txt. - +- clock-names: May contain "vpll", external clock for some hdmi phy. Example: diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index e7bab14134da..c00d7e273e84 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -7,10 +7,12 @@ * (at your option) any later version. */ +#include +#include #include #include -#include #include + #include #include #include @@ -44,6 +46,7 @@ struct rockchip_hdmi { struct regmap *regmap; struct drm_encoder encoder; const struct rockchip_hdmi_chip_data *chip_data; + struct clk *vpll_clk; }; #define to_rockchip_hdmi(x) container_of(x, struct rockchip_hdmi, x) @@ -160,6 +163,7 @@ static const struct dw_hdmi_phy_config rockchip_phy_config[] = { static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi) { struct device_node *np = hdmi->dev->of_node; + int ret; hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); if (IS_ERR(hdmi->regmap)) { @@ -167,6 +171,22 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi) return PTR_ERR(hdmi->regmap); } + hdmi->vpll_clk = devm_clk_get(hdmi->dev, "vpll"); + if (PTR_ERR(hdmi->vpll_clk) == -ENOENT) { + hdmi->vpll_clk = NULL; + } else if (PTR_ERR(hdmi->vpll_clk) == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } else if (IS_ERR(hdmi->vpll_clk)) { + dev_err(hdmi->dev, "failed to get grf clock\n"); + return PTR_ERR(hdmi->vpll_clk); + } + + ret = clk_prepare_enable(hdmi->vpll_clk); + if (ret) { + dev_err(hdmi->dev, "Failed to enable HDMI vpll: %d\n", ret); + return ret; + } + return 0; } @@ -209,6 +229,9 @@ static void dw_hdmi_rockchip_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adj_mode) { + struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder); + + clk_set_rate(hdmi->vpll_clk, adj_mode->clock * 1000); } static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder) From 8814b40bf6b2293eede832d35957b4e9ba495ae3 Mon Sep 17 00:00:00 2001 From: Mark Yao Date: Fri, 9 Jun 2017 15:10:46 +0800 Subject: [PATCH 0064/1795] drm/rockchip: dw_hdmi: introduce the pclk for grf For RK3399's GRF module, if we want to operate the graphic related grf registers, we need to enable the pclk_vio_grf which supply power for VIO GRF IOs, so it's better to introduce an optional grf clock in driver. Signed-off-by: Yakir Yang Signed-off-by: Mark Yao Acked-by: Rob Herring --- .../display/rockchip/dw_hdmi-rockchip.txt | 1 + drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt index 122d4e8b879b..fad8b7619647 100644 --- a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt +++ b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt @@ -32,6 +32,7 @@ Optional properties I2C master controller. - clock-names: See dw_hdmi.txt. The "cec" clock is optional. - clock-names: May contain "cec" as defined in dw_hdmi.txt. +- clock-names: May contain "grf", power for grf io. - clock-names: May contain "vpll", external clock for some hdmi phy. Example: diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index c00d7e273e84..ccd5d595ada7 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -47,6 +47,7 @@ struct rockchip_hdmi { struct drm_encoder encoder; const struct rockchip_hdmi_chip_data *chip_data; struct clk *vpll_clk; + struct clk *grf_clk; }; #define to_rockchip_hdmi(x) container_of(x, struct rockchip_hdmi, x) @@ -181,6 +182,16 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi) return PTR_ERR(hdmi->vpll_clk); } + hdmi->grf_clk = devm_clk_get(hdmi->dev, "grf"); + if (PTR_ERR(hdmi->grf_clk) == -ENOENT) { + hdmi->grf_clk = NULL; + } else if (PTR_ERR(hdmi->grf_clk) == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } else if (IS_ERR(hdmi->grf_clk)) { + dev_err(hdmi->dev, "failed to get grf clock\n"); + return PTR_ERR(hdmi->grf_clk); + } + ret = clk_prepare_enable(hdmi->vpll_clk); if (ret) { dev_err(hdmi->dev, "Failed to enable HDMI vpll: %d\n", ret); @@ -246,10 +257,17 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder) else val = hdmi->chip_data->lcdsel_big; + ret = clk_prepare_enable(hdmi->grf_clk); + if (ret < 0) { + dev_err(hdmi->dev, "failed to enable grfclk %d\n", ret); + return; + } + ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val); if (ret != 0) dev_err(hdmi->dev, "Could not write to GRF: %d\n", ret); + clk_disable_unprepare(hdmi->grf_clk); dev_dbg(hdmi->dev, "vop %s output to hdmi\n", ret ? "LIT" : "BIG"); } From 6545135a5ed2eac064f23bee3a19a81cfffbe573 Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Tue, 20 Jun 2017 13:39:14 +0200 Subject: [PATCH 0065/1795] drm/qxl: fix __user annotations Drop them from u64 fields, tag local variables correctly instead. While being at it switch the code to use u64_to_user_ptr(). Signed-off-by: Gerd Hoffmann Acked-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170620113916.6967-2-kraxel@redhat.com --- drivers/gpu/drm/qxl/qxl_ioctl.c | 17 +++++++++-------- include/uapi/drm/qxl_drm.h | 6 +++--- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 0b82a87916ae..31effed4a3c8 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -163,7 +163,7 @@ static int qxl_process_single_command(struct qxl_device *qdev, return -EINVAL; if (!access_ok(VERIFY_READ, - (void *)(unsigned long)cmd->command, + u64_to_user_ptr(cmd->command), cmd->command_size)) return -EFAULT; @@ -183,7 +183,9 @@ static int qxl_process_single_command(struct qxl_device *qdev, /* TODO copy slow path code from i915 */ fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); - unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size); + unwritten = __copy_from_user_inatomic_nocache + (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), + u64_to_user_ptr(cmd->command), cmd->command_size); { struct qxl_drawable *draw = fb_cmd; @@ -201,10 +203,9 @@ static int qxl_process_single_command(struct qxl_device *qdev, num_relocs = 0; for (i = 0; i < cmd->relocs_num; ++i) { struct drm_qxl_reloc reloc; + struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs); - if (copy_from_user(&reloc, - &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i], - sizeof(reloc))) { + if (copy_from_user(&reloc, u + i, sizeof(reloc))) { ret = -EFAULT; goto out_free_bos; } @@ -282,10 +283,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { - struct drm_qxl_command *commands = - (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; + struct drm_qxl_command __user *commands = + u64_to_user_ptr(execbuffer->commands); - if (copy_from_user(&user_cmd, &commands[cmd_num], + if (copy_from_user(&user_cmd, commands + cmd_num, sizeof(user_cmd))) return -EFAULT; diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h index 7eef42213051..880999d2d863 100644 --- a/include/uapi/drm/qxl_drm.h +++ b/include/uapi/drm/qxl_drm.h @@ -80,8 +80,8 @@ struct drm_qxl_reloc { }; struct drm_qxl_command { - __u64 __user command; /* void* */ - __u64 __user relocs; /* struct drm_qxl_reloc* */ + __u64 command; /* void* */ + __u64 relocs; /* struct drm_qxl_reloc* */ __u32 type; __u32 command_size; __u32 relocs_num; @@ -91,7 +91,7 @@ struct drm_qxl_command { struct drm_qxl_execbuffer { __u32 flags; /* for future use */ __u32 commands_num; - __u64 __user commands; /* struct drm_qxl_command* */ + __u64 commands; /* struct drm_qxl_command* */ }; struct drm_qxl_update_area { From 45dfe57771b141c41d5b609b0a0aaf7f4ddb8942 Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Tue, 20 Jun 2017 13:39:15 +0200 Subject: [PATCH 0066/1795] drm/qxl: declare a bunch of functions as static Flagged by sparse. Signed-off-by: Gerd Hoffmann Acked-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170620113916.6967-3-kraxel@redhat.com --- drivers/gpu/drm/qxl/qxl_display.c | 18 +++++++++--------- drivers/gpu/drm/qxl/qxl_object.c | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index f5ef81595f5a..0059ea42736d 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -437,7 +437,7 @@ static void qxl_monitors_config_set(struct qxl_device *qdev, } -void qxl_mode_set_nofb(struct drm_crtc *crtc) +static void qxl_mode_set_nofb(struct drm_crtc *crtc) { struct qxl_device *qdev = crtc->dev->dev_private; struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); @@ -475,8 +475,8 @@ static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { .atomic_flush = qxl_crtc_atomic_flush, }; -int qxl_primary_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) +static int qxl_primary_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) { struct qxl_device *qdev = plane->dev->dev_private; struct qxl_framebuffer *qfb; @@ -547,8 +547,8 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane, } } -int qxl_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) +static int qxl_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) { return 0; } @@ -647,8 +647,8 @@ out_free_release: } -void qxl_cursor_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) +static void qxl_cursor_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct qxl_device *qdev = plane->dev->dev_private; struct qxl_release *release; @@ -675,8 +675,8 @@ void qxl_cursor_atomic_disable(struct drm_plane *plane, qxl_release_fence_buffer_objects(release); } -int qxl_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) +static int qxl_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) { struct drm_gem_object *obj; struct qxl_bo *user_bo; diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 9a7eef7dd604..0a67ddf19c3d 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -221,7 +221,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) return bo; } -int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) +static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) { struct drm_device *ddev = bo->gem_base.dev; int r; @@ -244,7 +244,7 @@ int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) return r; } -int __qxl_bo_unpin(struct qxl_bo *bo) +static int __qxl_bo_unpin(struct qxl_bo *bo) { struct drm_device *ddev = bo->gem_base.dev; int r, i; From 747fddd4e85369630c212e04a78caf90e2ec8f48 Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Tue, 20 Jun 2017 13:39:16 +0200 Subject: [PATCH 0067/1795] drm/qxl: move extern variable declaration header file Flagged by sparse. Signed-off-by: Gerd Hoffmann Acked-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170620113916.6967-4-kraxel@redhat.com --- drivers/gpu/drm/qxl/qxl_drv.c | 1 - drivers/gpu/drm/qxl/qxl_drv.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 15c97b16ee21..403e135895bf 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -37,7 +37,6 @@ #include "qxl_drv.h" #include "qxl_object.h" -extern int qxl_max_ioctls; static const struct pci_device_id pciidlist[] = { { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0 }, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 3591d2330a09..3397a1907336 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -64,6 +64,7 @@ extern int qxl_log_level; extern int qxl_num_crtc; +extern int qxl_max_ioctls; enum { QXL_INFO_LEVEL = 1, From 91e976777d4ad068ef61b005a192457a879c81dc Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 10:28:38 +0200 Subject: [PATCH 0068/1795] drm/amd|radeon: Drop drm_vblank_cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both drivers shut down all crtc beforehand already, which will shut up any pending vblank (the only thing vblank_cleanup really does is disable the disable timer). Hence we don't need this here and can remove it. Cc: Alex Deucher Cc: Christian König Acked-by: Alex Deucher Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-2-daniel.vetter@ffwll.ch --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 1 - drivers/gpu/drm/radeon/radeon_irq_kms.c | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index a6b7e367a860..4a15be658935 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -262,7 +262,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) { unsigned i, j; - drm_vblank_cleanup(adev->ddev); if (adev->irq.installed) { drm_irq_uninstall(adev->ddev); adev->irq.installed = false; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 1b7528df7f7f..3efe07f3e3db 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -324,7 +324,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev) */ void radeon_irq_kms_fini(struct radeon_device *rdev) { - drm_vblank_cleanup(rdev->ddev); if (rdev->irq.installed) { drm_irq_uninstall(rdev->ddev); rdev->irq.installed = false; From 8f6ece97da008f05fb01ab6964f58dedf860877e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 10:28:43 +0200 Subject: [PATCH 0069/1795] drm/mxsfb: Drop drm_vblank_cleanup Almost right but still racy, it's called before the interrupts are uninstalled. So let's just drop it. Cc: Marek Vasut Reviewed-by: Marek Vasut Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-7-daniel.vetter@ffwll.ch --- drivers/gpu/drm/mxsfb/mxsfb_drv.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index d1b9c34c7c00..1853557fef46 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -256,7 +256,6 @@ static void mxsfb_unload(struct drm_device *drm) drm_kms_helper_poll_fini(drm); drm_mode_config_cleanup(drm); - drm_vblank_cleanup(drm); pm_runtime_get_sync(drm->dev); drm_irq_uninstall(drm); From c03e53b030025e9243b6e5cb717ad143f3a442ce Mon Sep 17 00:00:00 2001 From: Puthikorn Voravootivat Date: Thu, 22 Jun 2017 12:03:37 -0700 Subject: [PATCH 0070/1795] drm/i915: Set PWM divider to match desired frequency in vbt Read desired PWM frequency from panel vbt and calculate the value for divider in DPCD address 0x724 and 0x728 to have as many bits as possible for PWM duty cyle for granularity of brightness adjustment while the frequency divisor is still within 25% of the desired value. Signed-off-by: Puthikorn Voravootivat Reviewed-by: Dhinakaran Pandiyan Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170622190339.142671-2-puthik@chromium.org --- drivers/gpu/drm/i915/intel_dp_aux_backlight.c | 98 +++++++++++++++++-- 1 file changed, 90 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c index 228ca06d9f0b..d2830ba3162e 100644 --- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -98,13 +98,87 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev } } +/* + * Set PWM Frequency divider to match desired frequency in vbt. + * The PWM Frequency is calculated as 27Mhz / (F x P). + * - Where F = PWM Frequency Pre-Divider value programmed by field 7:0 of the + * EDP_BACKLIGHT_FREQ_SET register (DPCD Address 00728h) + * - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the + * EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h) + */ +static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1; + u8 pn, pn_min, pn_max; + + /* Find desired value of (F x P) + * Note that, if F x P is out of supported range, the maximum value or + * minimum value will applied automatically. So no need to check that. + */ + freq = dev_priv->vbt.backlight.pwm_freq_hz; + DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq); + if (!freq) { + DRM_DEBUG_KMS("Use panel default backlight frequency\n"); + return false; + } + + fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq); + + /* Use highest possible value of Pn for more granularity of brightness + * adjustment while satifying the conditions below. + * - Pn is in the range of Pn_min and Pn_max + * - F is in the range of 1 and 255 + * - FxP is within 25% of desired value. + * Note: 25% is arbitrary value and may need some tweak. + */ + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) { + DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n"); + return false; + } + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) { + DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n"); + return false; + } + pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); + fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); + if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) { + DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n"); + return false; + } + + for (pn = pn_max; pn >= pn_min; pn--) { + f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255); + fxp_actual = f << pn; + if (fxp_min <= fxp_actual && fxp_actual <= fxp_max) + break; + } + + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) { + DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n"); + return false; + } + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight freq\n"); + return false; + } + return true; +} + static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); - uint8_t dpcd_buf = 0; - uint8_t edp_backlight_mode = 0; + uint8_t dpcd_buf, new_dpcd_buf, edp_backlight_mode; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { @@ -113,18 +187,15 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st return; } + new_dpcd_buf = dpcd_buf; edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; switch (edp_backlight_mode) { case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: - dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; - dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; - if (drm_dp_dpcd_writeb(&intel_dp->aux, - DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf) < 0) { - DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); - } + new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; break; /* Do nothing when it is already DPCD mode */ @@ -133,6 +204,17 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st break; } + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP) + if (intel_dp_aux_set_pwm_freq(connector)) + new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; + + if (new_dpcd_buf != dpcd_buf) { + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); + } + } + set_aux_backlight_enable(intel_dp, true); intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level); } From 560a758d39c616f83ac25ff6e0816a49ebe6401c Mon Sep 17 00:00:00 2001 From: Puthikorn Voravootivat Date: Thu, 22 Jun 2017 12:03:38 -0700 Subject: [PATCH 0071/1795] drm/i915: Add heuristic to determine better way to adjust brightness Add heuristic to decide that AUX or PWM pin should use for backlight brightness adjustment and modify i915 param description to have auto, force disable, and force enable. The heuristic to determine that using AUX pin is better than using PWM pin is that the panel support any of the feature list here. - Regional backlight brightness adjustment - Backlight PWM frequency set - More than 8 bits resolution of brightness level - Backlight enablement via AUX and not by BL_ENABLE pin Signed-off-by: Puthikorn Voravootivat Reviewed-by: Dhinakaran Pandiyan Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170622190339.142671-3-puthik@chromium.org --- drivers/gpu/drm/i915/i915_params.c | 7 ++- drivers/gpu/drm/i915/i915_params.h | 2 +- drivers/gpu/drm/i915/intel_dp_aux_backlight.c | 61 ++++++++++++++++++- 3 files changed, 63 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 14e2c2e57f96..5b5ab15d191f 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -63,7 +63,7 @@ struct i915_params i915 __read_mostly = { .huc_firmware_path = NULL, .enable_dp_mst = true, .inject_load_failure = 0, - .enable_dpcd_backlight = false, + .enable_dpcd_backlight = -1, .enable_gvt = false, }; @@ -246,9 +246,10 @@ MODULE_PARM_DESC(enable_dp_mst, module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); MODULE_PARM_DESC(inject_load_failure, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); -module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600); +module_param_named_unsafe(enable_dpcd_backlight, i915.enable_dpcd_backlight, int, 0600); MODULE_PARM_DESC(enable_dpcd_backlight, - "Enable support for DPCD backlight control (default:false)"); + "Enable support for DPCD backlight control " + "(-1:auto (default), 0:force disable, 1:force enabled if supported"); module_param_named(enable_gvt, i915.enable_gvt, bool, 0400); MODULE_PARM_DESC(enable_gvt, diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index febbfdbd30bd..0d6cf9138dc4 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -53,6 +53,7 @@ func(int, edp_vswing); \ func(int, reset); \ func(unsigned int, inject_load_failure); \ + func(int, enable_dpcd_backlight); \ /* leave bools at the end to not create holes */ \ func(bool, alpha_support); \ func(bool, enable_cmd_parser); \ @@ -66,7 +67,6 @@ func(bool, verbose_state_checks); \ func(bool, nuclear_pageflip); \ func(bool, enable_dp_mst); \ - func(bool, enable_dpcd_backlight); \ func(bool, enable_gvt) #define MEMBER(T, member) T member diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c index d2830ba3162e..fea161727c6e 100644 --- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -251,15 +251,66 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector) /* Check the eDP Display control capabilities registers to determine if * the panel can support backlight control over the aux channel */ - if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && - (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && - !(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + if ((intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP) && + (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) { DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); return true; } return false; } +/* + * Heuristic function whether we should use AUX for backlight adjustment or not. + * + * We should use AUX for backlight brightness adjustment if panel doesn't this + * via PWM pin or using AUX is better than using PWM pin. + * + * The heuristic to determine that using AUX pin is better than using PWM pin is + * that the panel support any of the feature list here. + * - Regional backlight brightness adjustment + * - Backlight PWM frequency set + * - More than 8 bits resolution of brightness level + * - Backlight enablement via AUX and not by BL_ENABLE pin + * + * If all above are not true, assume that using PWM pin is better. + */ +static bool +intel_dp_aux_display_control_heuristic(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + uint8_t reg_val; + + /* Panel doesn't support adjusting backlight brightness via PWN pin */ + if (!(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) + return true; + + /* Panel supports regional backlight brightness adjustment */ + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_GENERAL_CAP_3, + ®_val) != 1) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_GENERAL_CAP_3); + return false; + } + if (reg_val > 0) + return true; + + /* Panel supports backlight PWM frequency set */ + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP) + return true; + + /* Panel supports more than 8 bits resolution of brightness level */ + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + return true; + + /* Panel supports enabling backlight via AUX but not by BL_ENABLE pin */ + if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && + !(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP)) + return true; + + return false; + +} + int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) { struct intel_panel *panel = &intel_connector->panel; @@ -270,6 +321,10 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) if (!intel_dp_aux_display_control_capable(intel_connector)) return -ENODEV; + if (i915.enable_dpcd_backlight == -1 && + !intel_dp_aux_display_control_heuristic(intel_connector)) + return -ENODEV; + panel->backlight.setup = intel_dp_aux_setup_backlight; panel->backlight.enable = intel_dp_aux_enable_backlight; panel->backlight.disable = intel_dp_aux_disable_backlight; From ae25eceab616d16a07bcaa434b84463d58a3bdc3 Mon Sep 17 00:00:00 2001 From: Puthikorn Voravootivat Date: Thu, 22 Jun 2017 12:03:39 -0700 Subject: [PATCH 0072/1795] drm/i915: Add option to support dynamic backlight via DPCD This patch adds option to enable dynamic backlight for eDP panel that supports this feature via DPCD register and set minimum / maximum brightness to 0% and 100% of the normal brightness. Signed-off-by: Puthikorn Voravootivat Reviewed-by: Dhinakaran Pandiyan Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170622190339.142671-4-puthik@chromium.org --- drivers/gpu/drm/i915/i915_params.c | 5 ++++ drivers/gpu/drm/i915/i915_params.h | 3 ++- drivers/gpu/drm/i915/intel_dp_aux_backlight.c | 26 +++++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 5b5ab15d191f..88b9d3e6713a 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -65,6 +65,7 @@ struct i915_params i915 __read_mostly = { .inject_load_failure = 0, .enable_dpcd_backlight = -1, .enable_gvt = false, + .enable_dbc = true, }; module_param_named(modeset, i915.modeset, int, 0400); @@ -254,3 +255,7 @@ MODULE_PARM_DESC(enable_dpcd_backlight, module_param_named(enable_gvt, i915.enable_gvt, bool, 0400); MODULE_PARM_DESC(enable_gvt, "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); + +module_param_named_unsafe(enable_dbc, i915.enable_dbc, bool, 0600); +MODULE_PARM_DESC(enable_dbc, + "Enable support for dynamic backlight control (default:true)"); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 0d6cf9138dc4..057e203e6bda 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -67,7 +67,8 @@ func(bool, verbose_state_checks); \ func(bool, nuclear_pageflip); \ func(bool, enable_dp_mst); \ - func(bool, enable_gvt) + func(bool, enable_gvt); \ + func(bool, enable_dbc) #define MEMBER(T, member) T member struct i915_params { diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c index fea161727c6e..b25cd88fc1c5 100644 --- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -173,6 +173,24 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector) return true; } +/* +* Set minimum / maximum dynamic brightness percentage. This value is expressed +* as the percentage of normal brightness in 5% increments. +*/ +static bool +intel_dp_aux_set_dynamic_backlight_percent(struct intel_dp *intel_dp, + u32 min, u32 max) +{ + u8 dbc[] = { DIV_ROUND_CLOSEST(min, 5), DIV_ROUND_CLOSEST(max, 5) }; + + if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, + dbc, sizeof(dbc)) < 0) { + DRM_DEBUG_KMS("Failed to write aux DBC brightness level\n"); + return false; + } + return true; +} + static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -208,6 +226,14 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st if (intel_dp_aux_set_pwm_freq(connector)) new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; + if (i915.enable_dbc && + (intel_dp->edp_dpcd[2] & DP_EDP_DYNAMIC_BACKLIGHT_CAP)) { + if(intel_dp_aux_set_dynamic_backlight_percent(intel_dp, 0, 100)) { + new_dpcd_buf |= DP_EDP_DYNAMIC_BACKLIGHT_ENABLE; + DRM_DEBUG_KMS("Enable dynamic brightness.\n"); + } + } + if (new_dpcd_buf != dpcd_buf) { if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { From 2bf5ccc25c5bc3c8eaf30c589f7d3a5e7490d3b9 Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Fri, 23 Jun 2017 17:54:18 +0800 Subject: [PATCH 0073/1795] drm: arcpgu: arc_pgu_crtc_mode_valid() can be static Signed-off-by: Fengguang Wu Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170623095418.GA68865@lkp-sbx04 --- drivers/gpu/drm/arc/arcpgu_crtc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index 99fbdaecf100..611af74a31c0 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -64,8 +64,8 @@ static const struct drm_crtc_funcs arc_pgu_crtc_funcs = { .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; -enum drm_mode_status arc_pgu_crtc_mode_valid(struct drm_crtc *crtc, - const struct drm_display_mode *mode) +static enum drm_mode_status arc_pgu_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) { struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); long rate, clk_rate = mode->clock * 1000; From 71bb23c707c141b176bc084179ca5ee58d5fd26a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 22 Jun 2017 14:46:17 +0100 Subject: [PATCH 0074/1795] drm/vgem: Pin our pages for dmabuf exports When the caller maps their dmabuf and we return an sg_table, the caller doesn't expect the pages beneath that sg_table to vanish on a whim (i.e. under mempressure). The contract is that the pages are pinned for the duration of the mapping (from dma_buf_map_attachment() to dma_buf_unmap_attachment). To comply, we need to introduce our own vgem_object.pages_pin_count and elevate it across the mapping. However, the drm_prime interface we use calls drv->prime_pin on dma_buf_attach and drv->prime_unpin on dma_buf_detach, which while that does cover the mapping is much broader than is desired -- but it will do for now. v2: also hold the pin across prime_vmap/vunmap Reported-by: Tomi Sarvela Testcase: igt/gem_concurrent_blit/*swap*vgem* Fixes: 5ba6c9ff961a ("drm/vgem: Fix mmaping") Signed-off-by: Chris Wilson Cc: Tomi Sarvela Cc: Laura Abbott Cc: Sean Paul Cc: Matthew Auld Cc: Daniel Vetter Cc: # needs a backport Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170622134617.17912-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/vgem/vgem_drv.c | 83 ++++++++++++++++++++++++--------- drivers/gpu/drm/vgem/vgem_drv.h | 4 ++ 2 files changed, 65 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 18f401b442c2..c938af8c40cf 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -52,6 +52,7 @@ static void vgem_gem_free_object(struct drm_gem_object *obj) struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); kvfree(vgem_obj->pages); + mutex_destroy(&vgem_obj->pages_lock); if (obj->import_attach) drm_prime_gem_destroy(obj, vgem_obj->table); @@ -76,11 +77,15 @@ static int vgem_gem_fault(struct vm_fault *vmf) if (page_offset > num_pages) return VM_FAULT_SIGBUS; + ret = -ENOENT; + mutex_lock(&obj->pages_lock); if (obj->pages) { get_page(obj->pages[page_offset]); vmf->page = obj->pages[page_offset]; ret = 0; - } else { + } + mutex_unlock(&obj->pages_lock); + if (ret) { struct page *page; page = shmem_read_mapping_page( @@ -161,6 +166,8 @@ static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev, return ERR_PTR(ret); } + mutex_init(&obj->pages_lock); + return obj; } @@ -274,37 +281,66 @@ static const struct file_operations vgem_driver_fops = { .release = drm_release, }; +static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo) +{ + mutex_lock(&bo->pages_lock); + if (bo->pages_pin_count++ == 0) { + struct page **pages; + + pages = drm_gem_get_pages(&bo->base); + if (IS_ERR(pages)) { + bo->pages_pin_count--; + mutex_unlock(&bo->pages_lock); + return pages; + } + + bo->pages = pages; + } + mutex_unlock(&bo->pages_lock); + + return bo->pages; +} + +static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) +{ + mutex_lock(&bo->pages_lock); + if (--bo->pages_pin_count == 0) { + drm_gem_put_pages(&bo->base, bo->pages, true, true); + bo->pages = NULL; + } + mutex_unlock(&bo->pages_lock); +} + static int vgem_prime_pin(struct drm_gem_object *obj) { + struct drm_vgem_gem_object *bo = to_vgem_bo(obj); long n_pages = obj->size >> PAGE_SHIFT; struct page **pages; + pages = vgem_pin_pages(bo); + if (IS_ERR(pages)) + return PTR_ERR(pages); + /* Flush the object from the CPU cache so that importers can rely * on coherent indirect access via the exported dma-address. */ - pages = drm_gem_get_pages(obj); - if (IS_ERR(pages)) - return PTR_ERR(pages); - drm_clflush_pages(pages, n_pages); - drm_gem_put_pages(obj, pages, true, false); return 0; } +static void vgem_prime_unpin(struct drm_gem_object *obj) +{ + struct drm_vgem_gem_object *bo = to_vgem_bo(obj); + + vgem_unpin_pages(bo); +} + static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) { - struct sg_table *st; - struct page **pages; + struct drm_vgem_gem_object *bo = to_vgem_bo(obj); - pages = drm_gem_get_pages(obj); - if (IS_ERR(pages)) - return ERR_CAST(pages); - - st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT); - drm_gem_put_pages(obj, pages, false, false); - - return st; + return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT); } static struct drm_gem_object* vgem_prime_import(struct drm_device *dev, @@ -333,6 +369,8 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, __vgem_gem_destroy(obj); return ERR_PTR(-ENOMEM); } + + obj->pages_pin_count++; /* perma-pinned */ drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL, npages); return &obj->base; @@ -340,23 +378,23 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, static void *vgem_prime_vmap(struct drm_gem_object *obj) { + struct drm_vgem_gem_object *bo = to_vgem_bo(obj); long n_pages = obj->size >> PAGE_SHIFT; struct page **pages; - void *addr; - pages = drm_gem_get_pages(obj); + pages = vgem_pin_pages(bo); if (IS_ERR(pages)) return NULL; - addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); - drm_gem_put_pages(obj, pages, false, false); - - return addr; + return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); } static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) { + struct drm_vgem_gem_object *bo = to_vgem_bo(obj); + vunmap(vaddr); + vgem_unpin_pages(bo); } static int vgem_prime_mmap(struct drm_gem_object *obj, @@ -409,6 +447,7 @@ static struct drm_driver vgem_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_pin = vgem_prime_pin, + .gem_prime_unpin = vgem_prime_unpin, .gem_prime_import = vgem_prime_import, .gem_prime_export = drm_gem_prime_export, .gem_prime_import_sg_table = vgem_prime_import_sg_table, diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h index 1aae01419112..5c8f6d619ff3 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.h +++ b/drivers/gpu/drm/vgem/vgem_drv.h @@ -43,7 +43,11 @@ struct vgem_file { #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base) struct drm_vgem_gem_object { struct drm_gem_object base; + struct page **pages; + unsigned int pages_pin_count; + struct mutex pages_lock; + struct sg_table *table; }; From 36703e79a982c8ce5a8e43833291f2719e92d0d1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 22 Jun 2017 11:56:25 +0100 Subject: [PATCH 0075/1795] drm/i915: Break modeset deadlocks on reset Trying to do a modeset from within a reset is fraught with danger. We can fall into a cyclic deadlock where the modeset is waiting on a previous modeset that is waiting on a request, and since the GPU hung that request completion is waiting on the reset. As modesetting doesn't allow its locks to be broken and restarted, or for its *own* reset mechanism to take over the display, we have to do something very evil instead. If we detect that we are stuck waiting to prepare the display reset (by using a very simple timeout), resort to cancelling all in-flight requests and throwing the user data into /dev/null, which is marginally better than the driver locking up and keeping that data to itself. This is not a fix; this is just a workaround that unbreaks machines until we can resolve the deadlock in a way that doesn't lose data! v2: Move the retirement from set-wegded to the i915_reset() error path, after which we no longer any delayed worker cleanup for i915_handle_error() v3: C abuse for syntactic sugar v4: Cover all waits with the timeout to catch more driver breakage References: https://bugs.freedesktop.org/show_bug.cgi?id=99093 Signed-off-by: Chris Wilson Cc: Maarten Lankhorst Cc: Mika Kuoppala Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170622105625.16952-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_gem.c | 18 ++------ drivers/gpu/drm/i915/i915_irq.c | 80 ++++++++++++++++++++++++--------- 3 files changed, 65 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 370429e2071f..43e925933688 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1919,6 +1919,7 @@ wakeup: error: i915_gem_set_wedged(dev_priv); + i915_gem_retire_requests(dev_priv); goto finish; } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ae3ce1314bd1..36d838677982 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3062,7 +3062,8 @@ static void engine_set_wedged(struct intel_engine_cs *engine) /* Mark all executing requests as skipped */ spin_lock_irqsave(&engine->timeline->lock, flags); list_for_each_entry(request, &engine->timeline->requests, link) - dma_fence_set_error(&request->fence, -EIO); + if (!i915_gem_request_completed(request)) + dma_fence_set_error(&request->fence, -EIO); spin_unlock_irqrestore(&engine->timeline->lock, flags); /* Mark all pending requests as complete so that any concurrent @@ -3108,6 +3109,7 @@ static int __i915_gem_set_wedged_BKL(void *data) struct intel_engine_cs *engine; enum intel_engine_id id; + set_bit(I915_WEDGED, &i915->gpu_error.flags); for_each_engine(engine, i915, id) engine_set_wedged(engine); @@ -3116,20 +3118,7 @@ static int __i915_gem_set_wedged_BKL(void *data) void i915_gem_set_wedged(struct drm_i915_private *dev_priv) { - lockdep_assert_held(&dev_priv->drm.struct_mutex); - set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); - - /* Retire completed requests first so the list of inflight/incomplete - * requests is accurate and we don't try and mark successful requests - * as in error during __i915_gem_set_wedged_BKL(). - */ - i915_gem_retire_requests(dev_priv); - stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); - - i915_gem_contexts_lost(dev_priv); - - mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); } bool i915_gem_unset_wedged(struct drm_i915_private *i915) @@ -3184,6 +3173,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) * context and do not require stop_machine(). */ intel_engines_reset_default_submission(i915); + i915_gem_contexts_lost(i915); smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ clear_bit(I915_WEDGED, &i915->gpu_error.flags); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f25e73fe567c..e4934d5adc9e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2599,6 +2599,46 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) return ret; } +struct wedge_me { + struct delayed_work work; + struct drm_i915_private *i915; + const char *name; +}; + +static void wedge_me(struct work_struct *work) +{ + struct wedge_me *w = container_of(work, typeof(*w), work.work); + + dev_err(w->i915->drm.dev, + "%s timed out, cancelling all in-flight rendering.\n", + w->name); + i915_gem_set_wedged(w->i915); +} + +static void __init_wedge(struct wedge_me *w, + struct drm_i915_private *i915, + long timeout, + const char *name) +{ + w->i915 = i915; + w->name = name; + + INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); + schedule_delayed_work(&w->work, timeout); +} + +static void __fini_wedge(struct wedge_me *w) +{ + cancel_delayed_work_sync(&w->work); + destroy_delayed_work_on_stack(&w->work); + w->i915 = NULL; +} + +#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ + for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ + (W)->i915; \ + __fini_wedge((W))) + /** * i915_reset_device - do process context error handling work * @dev_priv: i915 device private @@ -2612,36 +2652,36 @@ static void i915_reset_device(struct drm_i915_private *dev_priv) char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; + struct wedge_me w; kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); DRM_DEBUG_DRIVER("resetting chip\n"); kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); - intel_prepare_reset(dev_priv); + /* Use a watchdog to ensure that our reset completes */ + i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { + intel_prepare_reset(dev_priv); - set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); - wake_up_all(&dev_priv->gpu_error.wait_queue); + /* Signal that locked waiters should reset the GPU */ + set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); + wake_up_all(&dev_priv->gpu_error.wait_queue); - do { - /* - * All state reset _must_ be completed before we update the - * reset counter, for otherwise waiters might miss the reset - * pending state and not properly drop locks, resulting in - * deadlocks with the reset work. + /* Wait for anyone holding the lock to wakeup, without + * blocking indefinitely on struct_mutex. */ - if (mutex_trylock(&dev_priv->drm.struct_mutex)) { - i915_reset(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); - } + do { + if (mutex_trylock(&dev_priv->drm.struct_mutex)) { + i915_reset(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); + } + } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, + I915_RESET_HANDOFF, + TASK_UNINTERRUPTIBLE, + 1)); - /* We need to wait for anyone holding the lock to wakeup */ - } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, - I915_RESET_HANDOFF, - TASK_UNINTERRUPTIBLE, - HZ)); - - intel_finish_reset(dev_priv); + intel_finish_reset(dev_priv); + } if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) kobject_uevent_env(kobj, From b3df5e65cc03696b0624a877d03a3ddf3ef43f52 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 10:28:39 +0200 Subject: [PATCH 0076/1795] drm/hibmc: Drop drm_vblank_cleanup So this seems to be the first driver that does it the right way round, so fix it up by calling drm_atomic_helper_shutdown instead. We need to do that before the last kms user is gone (fbdev emulation), but before we start shutting down hw stuff like interrupts. Cc: Xinliang Liu Cc: Rongrong Zou Cc: Xinwei Kong Cc: Chen Feng Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-3-daniel.vetter@ffwll.ch --- drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 2ffdbf9801bd..4d018ca98581 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -276,11 +276,12 @@ static int hibmc_unload(struct drm_device *dev) hibmc_fbdev_fini(priv); + drm_atomic_helper_shutdown(dev); + if (dev->irq_enabled) drm_irq_uninstall(dev); if (priv->msi_enabled) pci_disable_msi(dev->pdev); - drm_vblank_cleanup(dev); hibmc_kms_fini(priv); hibmc_mm_fini(priv); From 20db9a2a5a4cefdb41439c575dd74061354796b9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 10:28:40 +0200 Subject: [PATCH 0077/1795] drm/kirin: Drop drm_vblank_cleanup Again we probably want a drm_atomic_helper_shutdown somewhere, but that's a bit more analysis. Cc: Xinliang Liu Cc: Rongrong Zou Cc: Xinwei Kong Cc: Chen Feng Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-4-daniel.vetter@ffwll.ch --- drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 9c903672f582..8065d6cb1d7f 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -41,7 +41,6 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev) } #endif drm_kms_helper_poll_fini(dev); - drm_vblank_cleanup(dev); dc_ops->cleanup(to_platform_device(dev->dev)); drm_mode_config_cleanup(dev); devm_kfree(dev->dev, priv); From baf54385af7856eab151edb0822dc95e7f5c8b14 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 10:28:41 +0200 Subject: [PATCH 0078/1795] drm/i915: Drop drm_vblank_cleanup On the load error path we can't have pending vblank interrupts, and on unload we already call drm_atomic_helper_shutdown beforehand! So all good to nuke it. Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-5-daniel.vetter@ffwll.ch --- drivers/gpu/drm/i915/i915_drv.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6033355d9469..8d583a2fbf46 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1264,7 +1264,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) ret = i915_load_modeset_init(&dev_priv->drm); if (ret < 0) - goto out_cleanup_vblank; + goto out_cleanup_hw; i915_driver_register(dev_priv); @@ -1285,8 +1285,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -out_cleanup_vblank: - drm_vblank_cleanup(&dev_priv->drm); out_cleanup_hw: i915_driver_cleanup_hw(dev_priv); out_cleanup_mmio: @@ -1322,8 +1320,6 @@ void i915_driver_unload(struct drm_device *dev) i915_driver_unregister(dev_priv); - drm_vblank_cleanup(dev); - intel_modeset_cleanup(dev); /* From ed173e55c4948582b0d7800395875a5d4b9633fb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 10:28:42 +0200 Subject: [PATCH 0079/1795] drm/mtk: Drop drm_vblank_cleanup Seems entirely cargo-culted. Cc: CK Hu Cc: Philipp Zabel Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-6-daniel.vetter@ffwll.ch Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-7-daniel.vetter@ffwll.ch --- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index f6c8ec4c7dbc..56f802d0a51c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -266,7 +266,6 @@ static void mtk_drm_kms_deinit(struct drm_device *drm) { drm_kms_helper_poll_fini(drm); - drm_vblank_cleanup(drm); component_unbind_all(drm->dev, drm); drm_mode_config_cleanup(drm); } From 0265ac99ede927277627c85ef2bca4940014f5c1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 10:28:44 +0200 Subject: [PATCH 0080/1795] drm/nouveau: Drop drm_vblank_cleanup nouveau_display_vblank_fini is called in the load error path (where it doesn't matter) and module unload (where vblanks have been shut down correctly already through drm_vblank_off), we can drop it. Cc: Ben Skeggs Cc: nouveau@lists.freedesktop.org Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-8-daniel.vetter@ffwll.ch --- drivers/gpu/drm/nouveau/nouveau_display.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 6718c84fb862..0f3af939f5aa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -159,8 +159,6 @@ nouveau_display_vblank_fini(struct drm_device *dev) { struct drm_crtc *crtc; - drm_vblank_cleanup(dev); - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); nvif_notify_fini(&nv_crtc->vblank); From 3f5857fc6284da371868c2216a96d6d392185b5e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 10:28:45 +0200 Subject: [PATCH 0081/1795] drm/rockchip: Drop drm_vblank_cleanup Either not relevant (in the load error paths) or done better already (in the unload code, by calling drm_atomic_helper_shutdown). Drop it. Cc: Mark Yao Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-9-daniel.vetter@ffwll.ch --- drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index c6b1b7f3a2a3..b9fbf4b1e8f0 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -177,7 +177,6 @@ err_fbdev_fini: rockchip_drm_fbdev_fini(drm_dev); err_kms_helper_poll_fini: drm_kms_helper_poll_fini(drm_dev); - drm_vblank_cleanup(drm_dev); err_unbind_all: component_unbind_all(dev, drm_dev); err_mode_config_cleanup: @@ -200,7 +199,6 @@ static void rockchip_drm_unbind(struct device *dev) drm_kms_helper_poll_fini(drm_dev); drm_atomic_helper_shutdown(drm_dev); - drm_vblank_cleanup(drm_dev); component_unbind_all(dev, drm_dev); drm_mode_config_cleanup(drm_dev); rockchip_iommu_cleanup(drm_dev); From 5cbdaccb0df9a57936ba3608e97433fc469954cb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 10:28:47 +0200 Subject: [PATCH 0082/1795] drm/udl: Drop drm_vblank_cleanup udl doesn't shut down the display, so stopping the vblank isn't going to do much good either. Just drop it. Cc: Dave Airlie Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621082850.13224-11-daniel.vetter@ffwll.ch --- drivers/gpu/drm/udl/udl_main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index a9d93b871a15..0328b2c7b210 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -371,8 +371,6 @@ void udl_driver_unload(struct drm_device *dev) { struct udl_device *udl = dev->dev_private; - drm_vblank_cleanup(dev); - if (udl->urbs.count) udl_free_urb_list(dev); From 5f58e9742df3f1cdf1034a26569649f5f4fd776b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 26 Jun 2017 18:19:48 +0200 Subject: [PATCH 0083/1795] drm/vmwgfx: Drop drm_vblank_cleanup Again stopping the vblank before uninstalling the irq handler is kinda the wrong way round, but the fb_off stuff should take care of disabling the dsiplay at least in most cases. So drop the drm_vblank_cleanup code since it's not really doing anything, it looks all cargo-culted. v2: Appease gcc better. v3: Simplify code (Sean Paul) Cc: Sinclair Yeh Cc: Thomas Hellstrom Reviewed-by: Sean Paul Reviewed-by: Thomas Hellstrom Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170626161949.25629-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 8 ++------ drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 2 -- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 4 ---- drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 9 --------- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 27 +-------------------------- 5 files changed, 3 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index a8876b070168..5fab9edf359c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1771,7 +1771,7 @@ int vmw_kms_init(struct vmw_private *dev_priv) int vmw_kms_close(struct vmw_private *dev_priv) { - int ret; + int ret = 0; /* * Docs says we should take the lock before calling this function @@ -1779,11 +1779,7 @@ int vmw_kms_close(struct vmw_private *dev_priv) * drm_encoder_cleanup which takes the lock we deadlock. */ drm_mode_config_cleanup(dev_priv->dev); - if (dev_priv->active_display_unit == vmw_du_screen_object) - ret = vmw_kms_sou_close_display(dev_priv); - else if (dev_priv->active_display_unit == vmw_du_screen_target) - ret = vmw_kms_stdu_close_display(dev_priv); - else + if (dev_priv->active_display_unit == vmw_du_legacy) ret = vmw_kms_ldu_close_display(dev_priv); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 13f2f1d2818a..f94b4ca38ab2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -405,7 +405,6 @@ int vmw_kms_update_proxy(struct vmw_resource *res, * Screen Objects display functions - vmwgfx_scrn.c */ int vmw_kms_sou_init_display(struct vmw_private *dev_priv); -int vmw_kms_sou_close_display(struct vmw_private *dev_priv); int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer, struct drm_clip_rect *clips, @@ -433,7 +432,6 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, * Screen Target Display Unit functions - vmwgfx_stdu.c */ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv); -int vmw_kms_stdu_close_display(struct vmw_private *dev_priv); int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer, struct drm_clip_rect *clips, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index d3987bcf53f8..449ed4fba0f2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -582,13 +582,9 @@ err_free: int vmw_kms_ldu_close_display(struct vmw_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; - if (!dev_priv->ldu_priv) return -ENOSYS; - drm_vblank_cleanup(dev); - BUG_ON(!list_empty(&dev_priv->ldu_priv->active)); kfree(dev_priv->ldu_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 8d7dc9def7c2..3b917c9b0c21 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -746,15 +746,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) return 0; } -int vmw_kms_sou_close_display(struct vmw_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - - drm_vblank_cleanup(dev); - - return 0; -} - static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index bad31bdf09b6..b4d4074c0ae0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1634,36 +1634,11 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) if (unlikely(ret != 0)) { DRM_ERROR("Failed to initialize STDU %d", i); - goto err_vblank_cleanup; + return ret; } } DRM_INFO("Screen Target Display device initialized\n"); - return 0; - -err_vblank_cleanup: - drm_vblank_cleanup(dev); - return ret; -} - - - -/** - * vmw_kms_stdu_close_display - Cleans up after vmw_kms_stdu_init_display - * - * @dev_priv: VMW DRM device - * - * Frees up any resources allocated by vmw_kms_stdu_init_display - * - * RETURNS: - * 0 on success - */ -int vmw_kms_stdu_close_display(struct vmw_private *dev_priv) -{ - struct drm_device *dev = dev_priv->dev; - - drm_vblank_cleanup(dev); - return 0; } From 770a17a5713af26d1490d4f669194ed959b88241 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Mon, 26 Jun 2017 12:21:44 -0700 Subject: [PATCH 0084/1795] drm/i915/dp: Fix the t11_t12 panel power cycle delay from VBT read MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we read the VBT t11_t12 value for panel power cycle delay, it is a zero based value so we need to 100ms to that. And then it needs to be multiplied by 10 to store it in 100usecs unit same as SW VBT. v3: * Add it as part of series v2: * Change the VBT value instead of HW readout and pp div (Ville Syrjala) Reviewed-by: Ville Syrjala Signed-off-by: Manasi Navare Cc: Ville Syrjala Cc: Clint Taylor Link: http://patchwork.freedesktop.org/patch/msgid/1498504905-21067-1-git-send-email-manasi.d.navare@intel.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_dp.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 2eb6e0ff143a..95ff5d678472 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -5259,6 +5259,11 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, intel_pps_dump_state("cur", &cur); vbt = dev_priv->vbt.edp.pps; + /* T11_T12 delay is special and actually in units of 100ms, but zero + * based in the hw (so we need to add 100 ms). But the sw vbt + * table multiplies it with 1000 to make it in units of 100usec, + * too. */ + vbt.t11_t12 += 100 * 10; /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of * our hw here, which are all in 100usec. */ From 12c8ca9cf9bf5d53918068b1ebbd17ac6ef1d2f3 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Mon, 26 Jun 2017 12:21:45 -0700 Subject: [PATCH 0085/1795] drm/i915/dp: Remove -1/+1 from t11_t12 for Gen9_LP/CNP case MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now the VBT.seq->t11_t12 value adds 100ms to both Gen9_LP as well as non Gen9_LP cases so no need to special case and do -1 during HW readout and +1 during pp_div write for Gen9_LP/CNP case. Reviewed-by: Ville Syrjala Signed-off-by: Manasi Navare Suggested-by: Ville Syrjala Cc: Ville Syrjala Cc: Clint Taylor Link: http://patchwork.freedesktop.org/patch/msgid/1498504905-21067-2-git-send-email-manasi.d.navare@intel.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_dp.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 95ff5d678472..0445d11224d4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -5203,12 +5203,8 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv, PANEL_POWER_DOWN_DELAY_SHIFT; if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) { - u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> - BXT_POWER_CYCLE_DELAY_SHIFT; - if (tmp > 0) - seq->t11_t12 = (tmp - 1) * 1000; - else - seq->t11_t12 = 0; + seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> + BXT_POWER_CYCLE_DELAY_SHIFT) * 1000; } else { seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; @@ -5367,7 +5363,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) { pp_div = I915_READ(regs.pp_ctrl); pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; - pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) + pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) << BXT_POWER_CYCLE_DELAY_SHIFT); } else { pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; From 7141fd3e5ba90d09d2138ff1bbefd7cc43a82e94 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 21 Jun 2017 11:16:27 +0200 Subject: [PATCH 0086/1795] drm/atomic-helper: Simplify commit tracking locking The crtc->commit_lock only protects commit_list and commit_entry. If we chase the pointer from the drm_atomic_state update structure, then we don't need any locks (since we hold a reference already). Simplify the locking accordingly. Noticed while reviewing a patch from Boris. Cc: Boris Brezillon Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170621091627.30837-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_atomic_helper.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 45b4f34bebcd..c4e4e8b5caeb 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1715,9 +1715,7 @@ void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state) /* backend must have consumed any event by now */ WARN_ON(new_crtc_state->event); - spin_lock(&crtc->commit_lock); complete_all(&commit->hw_done); - spin_unlock(&crtc->commit_lock); } } EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done); @@ -1746,7 +1744,6 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state) if (WARN_ON(!commit)) continue; - spin_lock(&crtc->commit_lock); complete_all(&commit->cleanup_done); WARN_ON(!try_wait_for_completion(&commit->hw_done)); @@ -1756,8 +1753,6 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state) if (try_wait_for_completion(&commit->flip_done)) goto del_commit; - spin_unlock(&crtc->commit_lock); - /* We must wait for the vblank event to signal our completion * before releasing our reference, since the vblank work does * not hold a reference of its own. */ @@ -1767,8 +1762,8 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state) DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", crtc->base.id, crtc->name); - spin_lock(&crtc->commit_lock); del_commit: + spin_lock(&crtc->commit_lock); list_del(&commit->commit_entry); spin_unlock(&crtc->commit_lock); } From e38c2da01f76cca82b59ca612529b81df82a7cc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 26 Jun 2017 23:30:51 +0300 Subject: [PATCH 0087/1795] drm/i915: Disable MSI for all pre-gen5 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have pretty clear evidence that MSIs are getting lost on g4x and somehow the interrupt logic doesn't seem to recover from that state even if we try hard to clear the IIR. Disabling IER around the normal IIR clearing in the irq handler isn't sufficient to avoid this, so the problem really seems to be further up the interrupt chain. This should guarantee that there's always an edge if any IIR bits are set after the interrupt handler is done, which should normally guarantee that the CPU interrupt is generated. That approach seems to work perfectly on VLV/CHV, but apparently not on g4x. MSI is documented to be broken on 965gm at least. The chipset spec says MSI is defeatured because interrupts can be delayed or lost, which fits well with what we're seeing on g4x. Previously we've already disabled GMBUS interrupts on g4x because somehow GMBUS manages to raise legacy interrupts even when MSI is enabled. Since there's such widespread MSI breakahge all over in the pre-gen5 land let's just give up on MSI on these platforms. Seqno reporting might be negatively affected by this since the legcy interrupts aren't guaranteed to be ordered with the seqno writes, whereas MSI interrupts may be? But an occasioanlly missed seqno seems like a small price to pay for generally working interrupts. Cc: stable@vger.kernel.org Cc: Diego Viola Tested-by: Diego Viola Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101261 Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170626203051.28480-1-ville.syrjala@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 43e925933688..a6bef9ee8703 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1141,10 +1141,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) * and the registers being closely associated. * * According to chipset errata, on the 965GM, MSI interrupts may - * be lost or delayed, but we use them anyways to avoid - * stuck interrupts on some machines. + * be lost or delayed, and was defeatured. MSI interrupts seem to + * get lost on g4x as well, and interrupt delivery seems to stay + * properly dead afterwards. So we'll just disable them for all + * pre-gen5 chipsets. */ - if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 5) { if (pci_enable_msi(pdev) < 0) DRM_DEBUG_DRIVER("can't enable MSI"); } From 774eed4a408185d7a2cde16f40b1806d4fb1d531 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 23 Jun 2017 14:19:07 +0100 Subject: [PATCH 0088/1795] drm/i915/selftests: Fix mutex imbalance for igt_render_engine_reset_fallback Smatch spots: drivers/gpu/drm/i915/selftests/intel_hangcheck.c:669 igt_render_engine_reset_fallback() error: double unlock 'mutex:&i915->drm.struct_mutex' Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170623131907.24236-1-chris@chris-wilson.co.uk Reviewed-by: Michel Thierry --- .../gpu/drm/i915/selftests/intel_hangcheck.c | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index af475189bd52..7096c3911cd3 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -597,12 +597,12 @@ static int igt_render_engine_reset_fallback(void *arg) err = hang_init(&h, i915); if (err) - goto unlock; + goto err_unlock; rq = hang_create_request(&h, engine, i915->kernel_context); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto fini; + goto err_fini; } i915_gem_request_get(rq); @@ -614,7 +614,7 @@ static int igt_render_engine_reset_fallback(void *arg) if (!wait_for_hang(&h, rq)) { pr_err("Failed to start request %x\n", rq->fence.seqno); err = -EIO; - goto out_rq; + goto err_request; } reset_engine_count = i915_reset_engine_count(&i915->gpu_error, engine); @@ -646,13 +646,14 @@ static int igt_render_engine_reset_fallback(void *arg) */ if (i915_terminally_wedged(&i915->gpu_error)) { set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); - mutex_lock(&i915->drm.struct_mutex); rq->fence.error = 0; + mutex_lock(&i915->drm.struct_mutex); set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags); i915_reset(i915); GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); + mutex_unlock(&i915->drm.struct_mutex); if (i915_reset_count(&i915->gpu_error) == reset_count) { pr_err("No full GPU reset recorded!\n"); @@ -663,10 +664,8 @@ static int igt_render_engine_reset_fallback(void *arg) out_rq: i915_gem_request_put(rq); -fini: hang_fini(&h); -unlock: - mutex_unlock(&i915->drm.struct_mutex); +out_backoff: clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); wake_up_all(&i915->gpu_error.reset_queue); @@ -674,6 +673,14 @@ unlock: return -EIO; return err; + +err_request: + i915_gem_request_put(rq); +err_fini: + hang_fini(&h); +err_unlock: + mutex_unlock(&i915->drm.struct_mutex); + goto out_backoff; } int intel_hangcheck_live_selftests(struct drm_i915_private *i915) From 7c3f5317b8c2828ab10e8cf87c8ab5232d1966d0 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 27 Jun 2017 07:38:54 +0200 Subject: [PATCH 0089/1795] drm/i915: Fix an error checking test 'dma_buf_vmap' returns NULL on error, not an error pointer. Fixes: 6cca22ede8a4 ("drm/i915: Add some mock tests for dmabuf interop") Signed-off-by: Christophe JAILLET Link: http://patchwork.freedesktop.org/patch/msgid/20170627053854.21152-1-christophe.jaillet@wanadoo.fr Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c index d15cc9d3a5cd..89dc25a5a53b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c @@ -246,9 +246,9 @@ static int igt_dmabuf_export_vmap(void *arg) i915_gem_object_put(obj); ptr = dma_buf_vmap(dmabuf); - if (IS_ERR(ptr)) { - err = PTR_ERR(ptr); - pr_err("dma_buf_vmap failed with err=%d\n", err); + if (!ptr) { + pr_err("dma_buf_vmap failed\n"); + err = -ENOMEM; goto out; } From ceb26569af5bc33a8f47b755fd64f12c81801ce8 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Fri, 12 May 2017 09:19:36 -0700 Subject: [PATCH 0090/1795] IB/hfi1: Remove unnecessary initialization from tx request The tx request is unnecessarily initialized in the hot code path with memset(), however, there's no need to do this as most fields are initialized later on. this initialization shows to be costly in the profile. Remove unnecessary initialization from tx request and make sure all variables are initialized properly. Reviewed-by: Mike Marciniszyn Signed-off-by: Sebastian Sanchez Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/user_sdma.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index d55339f5d73b..16fd519216dc 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -607,12 +607,19 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); req = pq->reqs + info.comp_idx; - memset(req, 0, sizeof(*req)); req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */ + req->data_len = 0; req->pq = pq; req->cq = cq; req->status = -1; req->ahg_idx = -1; + req->iov_idx = 0; + req->sent = 0; + req->seqnum = 0; + req->seqcomp = 0; + req->seqsubmitted = 0; + req->flags = 0; + req->tids = NULL; INIT_LIST_HEAD(&req->txps); memcpy(&req->info, &info, sizeof(info)); @@ -701,12 +708,14 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, /* Save all the IO vector structures */ for (i = 0; i < req->data_iovs; i++) { + req->iovs[i].offset = 0; INIT_LIST_HEAD(&req->iovs[i].list); memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(req->iovs[i].iov)); ret = pin_vector_pages(req, &req->iovs[i]); if (ret) { + req->data_iovs = i; req->status = ret; goto free_req; } @@ -749,6 +758,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, } req->tids = tmp; req->n_tids = ntids; + req->tididx = 0; idx++; } From aa560df381f7199fafa4e7f71382de28f0400270 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Fri, 12 May 2017 09:19:47 -0700 Subject: [PATCH 0091/1795] IB/hfi1: Remove unused mk_qpn function Leftover function that is not used. Remove it. Reviewed-by: Dennis Dalessandro Signed-off-by: Ira Weiny Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/qp.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 650305cc0373..e91be05062e6 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -73,12 +73,6 @@ static void iowait_wakeup(struct iowait *wait, int reason); static void iowait_sdma_drained(struct iowait *wait); static void qp_pio_drain(struct rvt_qp *qp); -static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, - struct rvt_qpn_map *map, unsigned off) -{ - return (map - qpt->map) * RVT_BITS_PER_PAGE + off; -} - const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = { [IB_WR_RDMA_WRITE] = { .length = sizeof(struct ib_rdma_wr), From 7dafbab3753fcf59bc81748e5b2c5bf04e1c62c7 Mon Sep 17 00:00:00 2001 From: Don Hiatt Date: Fri, 12 May 2017 09:19:55 -0700 Subject: [PATCH 0092/1795] IB/hfi1: Add functions to parse BTH/IB headers Improve code readablity by adding inline functions to read specific BTH/IB fields without knowledge of byte offsets. Reviewed-by: Brian Welty Reviewed-by: Dasaratharaman Chandramouli Reviewed-by: Dennis Dalessandro Signed-off-by: Don Hiatt Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/driver.c | 6 +-- drivers/infiniband/hw/hfi1/rc.c | 8 +-- drivers/infiniband/hw/hfi1/uc.c | 2 +- drivers/infiniband/hw/hfi1/ud.c | 6 +-- drivers/infiniband/hw/hfi1/verbs.c | 6 +-- include/rdma/ib_hdrs.h | 84 +++++++++++++++++++++++++++++ include/rdma/ib_verbs.h | 2 + include/rdma/rdmavt_qp.h | 2 +- 8 files changed, 101 insertions(+), 15 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index a50870e455a3..0583479f2576 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -286,7 +286,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, goto drop; } /* Get the destination QP number. */ - qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; + qp_num = ib_bth_get_qpn(ohdr); if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) { struct rvt_qp *qp; unsigned long flags; @@ -438,7 +438,7 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, case IB_QPT_GSI: case IB_QPT_UD: rlid = ib_get_slid(hdr); - rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK; + rqpn = ib_get_sqpn(ohdr); svc_type = IB_CC_SVCTYPE_UD; is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); @@ -461,7 +461,7 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, bth1 = be32_to_cpu(ohdr->bth[1]); if (do_cnp && (bth1 & IB_FECN_SMASK)) { - u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]); + u16 pkey = ib_bth_get_pkey(ohdr); return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc, grh); } diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 1080778a1f7c..66e6843aab48 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -765,7 +765,7 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, ohdr->u.aeth = rvt_compute_aeth(qp); sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ - pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT); + pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); lrh0 |= (sc5 & 0xf) << 12 | (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4; hdr.lrh[0] = cpu_to_be16(lrh0); @@ -1009,7 +1009,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) return; } - psn = be32_to_cpu(ohdr->bth[2]); + psn = ib_bth_get_psn(ohdr); reset_sending_psn(qp, psn); /* @@ -1943,7 +1943,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) is_fecn = process_ecn(qp, packet, false); - psn = be32_to_cpu(ohdr->bth[2]); + psn = ib_bth_get_psn(ohdr); opcode = ib_bth_get_opcode(ohdr); /* @@ -2388,7 +2388,7 @@ void hfi1_rc_hdrerr( if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0)) return; - psn = be32_to_cpu(ohdr->bth[2]); + psn = ib_bth_get_psn(ohdr); opcode = ib_bth_get_opcode(ohdr); /* Only deal with RDMA Writes for now */ diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index 5da1e4546543..2a5650f8aee0 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c @@ -319,7 +319,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) process_ecn(qp, packet, true); - psn = be32_to_cpu(ohdr->bth[2]); + psn = ib_bth_get_psn(ohdr); opcode = ib_bth_get_opcode(ohdr); /* Compare the PSN verses the expected PSN. */ diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 6a4e95cefae5..49fe179ad3ae 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -549,7 +549,7 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, hdr.lrh[3] = cpu_to_be16(slid); plen = 2 /* PBC */ + hwords; - pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT; + pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); vl = sc_to_vlt(ppd->dd, sc5); pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); if (ctxt) { @@ -689,8 +689,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) u16 slid; u8 extra_bytes; - qkey = be32_to_cpu(ohdr->u.ud.deth[0]); - src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK; + qkey = ib_get_qkey(ohdr); + src_qp = ib_get_sqpn(ohdr); dlid = ib_get_dlid(hdr); bth1 = be32_to_cpu(ohdr->bth[1]); slid = ib_get_slid(hdr); diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 90e7b77d68e8..128d2917a2d9 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -595,7 +595,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) inc_opstats(tlen, &rcd->opstats->stats[opcode]); /* Get the destination QP number. */ - qp_num = be32_to_cpu(packet->ohdr->bth[1]) & RVT_QPN_MASK; + qp_num = ib_bth_get_qpn(packet->ohdr); lid = ib_get_dlid(hdr); if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) { @@ -863,7 +863,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, /* No vl15 here */ /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ - pbc |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT; + pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); if (unlikely(hfi1_dbg_fault_opcode(qp, opcode, false))) pbc = hfi1_fault_tx(qp, opcode, pbc); @@ -999,7 +999,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u8 opcode = get_opcode(&tx->phdr.hdr); /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ - pbc |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT; + pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); if (unlikely(hfi1_dbg_fault_opcode(qp, opcode, false))) pbc = hfi1_fault_tx(qp, opcode, pbc); pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h index 5519f31f043a..c124d515f7d5 100644 --- a/include/rdma/ib_hdrs.h +++ b/include/rdma/ib_hdrs.h @@ -193,8 +193,12 @@ static inline void put_ib_ateth_compare(u64 val, struct ib_atomic_eth *ateth) #define IB_LNH_MASK 3 #define IB_SC_MASK 0xf #define IB_SC_SHIFT 12 +#define IB_SC5_MASK 0x10 #define IB_SL_MASK 0xf #define IB_SL_SHIFT 4 +#define IB_SL_SHIFT 4 +#define IB_LVER_MASK 0xf +#define IB_LVER_SHIFT 8 static inline u8 ib_get_lnh(struct ib_header *hdr) { @@ -206,6 +210,11 @@ static inline u8 ib_get_sc(struct ib_header *hdr) return ((be16_to_cpu(hdr->lrh[0]) >> IB_SC_SHIFT) & IB_SC_MASK); } +static inline bool ib_is_sc5(u16 sc5) +{ + return !!(sc5 & IB_SC5_MASK); +} + static inline u8 ib_get_sl(struct ib_header *hdr) { return ((be16_to_cpu(hdr->lrh[0]) >> IB_SL_SHIFT) & IB_SL_MASK); @@ -221,6 +230,27 @@ static inline u16 ib_get_slid(struct ib_header *hdr) return (be16_to_cpu(hdr->lrh[3])); } +static inline u8 ib_get_lver(struct ib_header *hdr) +{ + return (u8)((be16_to_cpu(hdr->lrh[0]) >> IB_LVER_SHIFT) & + IB_LVER_MASK); +} + +static inline u16 ib_get_len(struct ib_header *hdr) +{ + return (u16)(be16_to_cpu(hdr->lrh[2])); +} + +static inline u32 ib_get_qkey(struct ib_other_headers *ohdr) +{ + return be32_to_cpu(ohdr->u.ud.deth[0]); +} + +static inline u32 ib_get_sqpn(struct ib_other_headers *ohdr) +{ + return ((be32_to_cpu(ohdr->u.ud.deth[1])) & IB_QPN_MASK); +} + /* * BTH */ @@ -229,6 +259,14 @@ static inline u16 ib_get_slid(struct ib_header *hdr) #define IB_BTH_PAD_MASK 3 #define IB_BTH_PKEY_MASK 0xffff #define IB_BTH_PAD_SHIFT 20 +#define IB_BTH_A_MASK 1 +#define IB_BTH_A_SHIFT 31 +#define IB_BTH_M_MASK 1 +#define IB_BTH_M_SHIFT 22 +#define IB_BTH_SE_MASK 1 +#define IB_BTH_SE_SHIFT 23 +#define IB_BTH_TVER_MASK 0xf +#define IB_BTH_TVER_SHIFT 16 static inline u8 ib_bth_get_pad(struct ib_other_headers *ohdr) { @@ -247,4 +285,50 @@ static inline u8 ib_bth_get_opcode(struct ib_other_headers *ohdr) IB_BTH_OPCODE_MASK); } +static inline u8 ib_bth_get_ackreq(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[2]) >> IB_BTH_A_SHIFT) & + IB_BTH_A_MASK); +} + +static inline u8 ib_bth_get_migreq(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_M_SHIFT) & + IB_BTH_M_MASK); +} + +static inline u8 ib_bth_get_se(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_SE_SHIFT) & + IB_BTH_SE_MASK); +} + +static inline u32 ib_bth_get_psn(struct ib_other_headers *ohdr) +{ + return (u32)(be32_to_cpu(ohdr->bth[2])); +} + +static inline u32 ib_bth_get_qpn(struct ib_other_headers *ohdr) +{ + return (u32)((be32_to_cpu(ohdr->bth[1])) & IB_QPN_MASK); +} + +static inline u8 ib_bth_get_becn(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_BECN_SHIFT) & + IB_BECN_MASK); +} + +static inline u8 ib_bth_get_fecn(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_FECN_SHIFT) & + IB_FECN_MASK); +} + +static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr) +{ + return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_TVER_SHIFT) & + IB_BTH_TVER_MASK); +} + #endif /* IB_HDRS_H */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index ba8314ec5768..8f1ce4e27bbd 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -664,6 +664,8 @@ union rdma_network_hdr { }; }; +#define IB_QPN_MASK 0xFFFFFF + enum { IB_MULTICAST_QPN = 0xffffff }; diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index be6472e5b06b..13f43b3527a8 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -396,7 +396,7 @@ struct rvt_srq { #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1) -#define RVT_QPN_MASK 0xFFFFFF +#define RVT_QPN_MASK IB_QPN_MASK /* * QPN-map pages start out as NULL, they get allocated upon From 228d2af1b723deedee38f03d144b7d25b39f6f86 Mon Sep 17 00:00:00 2001 From: Don Hiatt Date: Fri, 12 May 2017 09:20:08 -0700 Subject: [PATCH 0093/1795] IB/hfi1: Separate input/output header tracing Calls to trace incoming packets will now receive the packet context as parameter. This enables trace support for future packet types. Header trace output is in the format : which makes parsing easier. input_ibhdr trace before change: -0 [001] d.h. 5904.250925: input_ibhdr: [0000:05:00.0] vl 0 lver 0 sl 0 lnh 2,LRH_BTH dlid 0002 len 18 slid 0001 op 0x64,UD_SEND_ONLY se 0 m 0 pad 0 tver 0 pkey 0xffff f 0 b 0 qpn 0x000001 a 0 psn 0x000001b2 deth qkey 0x80010000 sqpn 0x000001 input_ibhdr trace after change: -0 [001] d.h. 6655.714488: input_ibhdr: [0000:05:00.0] (IB) len:124 sc:0 dlid:0x0001 slid:0x0002 lnh:2,LRH_BTH lver:0 sl:0 age:0 becn:0 fecn:0 l4:0 rc:0 entropy:0 op:0x64,UD_SEND_ONLY se:0 m:0 pad:0 tver:0 pkey:0x7fff f:0 b:0 qpn:0x000001 a:0 psn:0x00000036 hlen:8 deth qkey:0x80010000 sqpn:0x000001 Reviewed-by: Dasaratharaman Chandramouli Reviewed-by: Dennis Dalessandro Signed-off-by: Don Hiatt Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/hfi.h | 49 ---- drivers/infiniband/hw/hfi1/rc.c | 3 +- drivers/infiniband/hw/hfi1/trace.c | 58 +++- drivers/infiniband/hw/hfi1/trace_ibhdrs.h | 324 ++++++++++++++-------- drivers/infiniband/hw/hfi1/trace_rx.h | 9 + drivers/infiniband/hw/hfi1/verbs.c | 7 +- 6 files changed, 280 insertions(+), 170 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 414a04a481c2..3b76631cbcbd 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -2086,53 +2086,4 @@ int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp); #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev)) #define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev)) - -#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype } -#define show_packettype(etype) \ -__print_symbolic(etype, \ - packettype_name(EXPECTED), \ - packettype_name(EAGER), \ - packettype_name(IB), \ - packettype_name(ERROR), \ - packettype_name(BYPASS)) - -#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode } -#define show_ib_opcode(opcode) \ -__print_symbolic(opcode, \ - ib_opcode_name(RC_SEND_FIRST), \ - ib_opcode_name(RC_SEND_MIDDLE), \ - ib_opcode_name(RC_SEND_LAST), \ - ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \ - ib_opcode_name(RC_SEND_ONLY), \ - ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \ - ib_opcode_name(RC_RDMA_WRITE_FIRST), \ - ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \ - ib_opcode_name(RC_RDMA_WRITE_LAST), \ - ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \ - ib_opcode_name(RC_RDMA_WRITE_ONLY), \ - ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \ - ib_opcode_name(RC_RDMA_READ_REQUEST), \ - ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \ - ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \ - ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \ - ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \ - ib_opcode_name(RC_ACKNOWLEDGE), \ - ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \ - ib_opcode_name(RC_COMPARE_SWAP), \ - ib_opcode_name(RC_FETCH_ADD), \ - ib_opcode_name(UC_SEND_FIRST), \ - ib_opcode_name(UC_SEND_MIDDLE), \ - ib_opcode_name(UC_SEND_LAST), \ - ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \ - ib_opcode_name(UC_SEND_ONLY), \ - ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \ - ib_opcode_name(UC_RDMA_WRITE_FIRST), \ - ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \ - ib_opcode_name(UC_RDMA_WRITE_LAST), \ - ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \ - ib_opcode_name(UC_RDMA_WRITE_ONLY), \ - ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \ - ib_opcode_name(UD_SEND_ONLY), \ - ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \ - ib_opcode_name(CNP)) #endif /* _HFI1_KERNEL_H */ diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 66e6843aab48..b443c1e01543 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -798,7 +798,8 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, goto queue_ack; } - trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr); + trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), + &hdr, ib_is_sc5(sc5)); /* write the pbc and data */ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords); diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c index eafae487face..b80b74d0c252 100644 --- a/drivers/infiniband/hw/hfi1/trace.c +++ b/drivers/infiniband/hw/hfi1/trace.c @@ -47,7 +47,7 @@ #define CREATE_TRACE_POINTS #include "trace.h" -u8 ibhdr_exhdr_len(struct ib_header *hdr) +u8 hfi1_trace_ib_hdr_len(struct ib_header *hdr) { struct ib_other_headers *ohdr; u8 opcode; @@ -61,13 +61,18 @@ u8 ibhdr_exhdr_len(struct ib_header *hdr) 0 : hdr_len_by_opcode[opcode] - (12 + 8); } -#define IMM_PRN "imm %d" -#define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x" -#define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x" -#define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x" -#define IETH_PRN "ieth rkey 0x%.8x" -#define ATOMICACKETH_PRN "origdata %llx" -#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %llx cdata %llx" +const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet) +{ + return "IB"; +} + +#define IMM_PRN "imm:%d" +#define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x" +#define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x" +#define DETH_PRN "deth qkey:0x%.8x sqpn:0x%.6x" +#define IETH_PRN "ieth rkey:0x%.8x" +#define ATOMICACKETH_PRN "origdata:%llx" +#define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx" #define OP(transport, op) IB_OPCODE_## transport ## _ ## op @@ -84,6 +89,43 @@ static const char *parse_syndrome(u8 syndrome) return ""; } +void hfi1_trace_parse_bth(struct ib_other_headers *ohdr, + u8 *ack, u8 *becn, u8 *fecn, u8 *mig, + u8 *se, u8 *pad, u8 *opcode, u8 *tver, + u16 *pkey, u32 *psn, u32 *qpn) +{ + *ack = ib_bth_get_ackreq(ohdr); + *becn = ib_bth_get_becn(ohdr); + *fecn = ib_bth_get_fecn(ohdr); + *mig = ib_bth_get_migreq(ohdr); + *se = ib_bth_get_se(ohdr); + *pad = ib_bth_get_pad(ohdr); + *opcode = ib_bth_get_opcode(ohdr); + *tver = ib_bth_get_tver(ohdr); + *pkey = ib_bth_get_pkey(ohdr); + *psn = ib_bth_get_psn(ohdr); + *qpn = ib_bth_get_qpn(ohdr); +} + +void hfi1_trace_parse_9b_hdr(struct ib_header *hdr, bool sc5, + struct ib_other_headers **ohdr, + u8 *lnh, u8 *lver, u8 *sl, u8 *sc, + u16 *len, u32 *dlid, u32 *slid) +{ + *lnh = ib_get_lnh(hdr); + *lver = ib_get_lver(hdr); + *sl = ib_get_sl(hdr); + *sc = ib_get_sc(hdr) | (sc5 << 4); + *len = ib_get_len(hdr); + *dlid = ib_get_dlid(hdr); + *slid = ib_get_slid(hdr); + + if (*lnh == HFI1_LRH_BTH) + *ohdr = &hdr->u.oth; + else + *ohdr = &hdr->u.l.oth; +} + const char *parse_everbs_hdrs( struct trace_seq *p, u8 opcode, diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h index 090f6b506953..0f2d2da057ec 100644 --- a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h +++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h @@ -55,8 +55,57 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM hfi1_ibhdrs -u8 ibhdr_exhdr_len(struct ib_header *hdr); +#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode } +#define show_ib_opcode(opcode) \ +__print_symbolic(opcode, \ + ib_opcode_name(RC_SEND_FIRST), \ + ib_opcode_name(RC_SEND_MIDDLE), \ + ib_opcode_name(RC_SEND_LAST), \ + ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \ + ib_opcode_name(RC_SEND_ONLY), \ + ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \ + ib_opcode_name(RC_RDMA_WRITE_FIRST), \ + ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \ + ib_opcode_name(RC_RDMA_WRITE_LAST), \ + ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \ + ib_opcode_name(RC_RDMA_WRITE_ONLY), \ + ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \ + ib_opcode_name(RC_RDMA_READ_REQUEST), \ + ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \ + ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \ + ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \ + ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \ + ib_opcode_name(RC_ACKNOWLEDGE), \ + ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \ + ib_opcode_name(RC_COMPARE_SWAP), \ + ib_opcode_name(RC_FETCH_ADD), \ + ib_opcode_name(UC_SEND_FIRST), \ + ib_opcode_name(UC_SEND_MIDDLE), \ + ib_opcode_name(UC_SEND_LAST), \ + ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \ + ib_opcode_name(UC_SEND_ONLY), \ + ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \ + ib_opcode_name(UC_RDMA_WRITE_FIRST), \ + ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \ + ib_opcode_name(UC_RDMA_WRITE_LAST), \ + ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \ + ib_opcode_name(UC_RDMA_WRITE_ONLY), \ + ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \ + ib_opcode_name(UD_SEND_ONLY), \ + ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \ + ib_opcode_name(CNP)) + const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs); +u8 hfi1_trace_ib_hdr_len(struct ib_header *hdr); +const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet); +void hfi1_trace_parse_bth(struct ib_other_headers *ohdr, + u8 *ack, u8 *becn, u8 *fecn, u8 *mig, + u8 *se, u8 *pad, u8 *opcode, u8 *tver, + u16 *pkey, u32 *psn, u32 *qpn); +void hfi1_trace_parse_9b_hdr(struct ib_header *hdr, bool sc5, + struct ib_other_headers **ohdr, + u8 *lnh, u8 *lver, u8 *sl, u8 *sc, + u16 *len, u32 *dlid, u32 *slid); #define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs) @@ -66,139 +115,198 @@ __print_symbolic(lrh, \ lrh_name(LRH_BTH), \ lrh_name(LRH_GRH)) -#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x" +#define LRH_PRN "len:%d sc:%d dlid:0x%.4x slid:0x%.4x" +#define LRH_9B_PRN "lnh:%d,%s lver:%d sl:%d " #define BTH_PRN \ - "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \ - "f %d b %d qpn 0x%.6x a %d psn 0x%.8x" -#define EHDR_PRN "%s" + "op:0x%.2x,%s se:%d m:%d pad:%d tver:%d pkey:0x%.4x " \ + "f:%d b:%d qpn:0x%.6x a:%d psn:0x%.8x" +#define EHDR_PRN "hlen:%d %s" -DECLARE_EVENT_CLASS(hfi1_ibhdr_template, +DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template, TP_PROTO(struct hfi1_devdata *dd, - struct ib_header *hdr), - TP_ARGS(dd, hdr), + struct hfi1_packet *packet, + bool sc5), + TP_ARGS(dd, packet, sc5), TP_STRUCT__entry( DD_DEV_ENTRY(dd) - /* LRH */ - __field(u8, vl) + __field(u8, lnh) __field(u8, lver) __field(u8, sl) - __field(u8, lnh) - __field(u16, dlid) __field(u16, len) - __field(u16, slid) - /* BTH */ + __field(u32, dlid) + __field(u8, sc) + __field(u32, slid) __field(u8, opcode) __field(u8, se) - __field(u8, m) + __field(u8, mig) __field(u8, pad) __field(u8, tver) __field(u16, pkey) - __field(u8, f) - __field(u8, b) + __field(u8, fecn) + __field(u8, becn) __field(u32, qpn) - __field(u8, a) + __field(u8, ack) __field(u32, psn) /* extended headers */ - __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr)) + __dynamic_array(u8, ehdrs, + hfi1_trace_ib_hdr_len(packet->hdr)) ), - TP_fast_assign( + TP_fast_assign( + struct ib_other_headers *ohdr; + + DD_DEV_ASSIGN(dd); + + hfi1_trace_parse_9b_hdr(packet->hdr, sc5, + &ohdr, + &__entry->lnh, + &__entry->lver, + &__entry->sl, + &__entry->sc, + &__entry->len, + &__entry->dlid, + &__entry->slid); + + hfi1_trace_parse_bth(ohdr, &__entry->ack, + &__entry->becn, &__entry->fecn, + &__entry->mig, &__entry->se, + &__entry->pad, &__entry->opcode, + &__entry->tver, &__entry->pkey, + &__entry->psn, &__entry->qpn); + /* extended headers */ + memcpy(__get_dynamic_array(ehdrs), &ohdr->u, + __get_dynamic_array_len(ehdrs)); + ), + TP_printk("[%s] (IB) " LRH_PRN " " LRH_9B_PRN " " + BTH_PRN " " EHDR_PRN, + __get_str(dev), + __entry->len, + __entry->sc, + __entry->dlid, + __entry->slid, + __entry->lnh, show_lnh(__entry->lnh), + __entry->lver, + __entry->sl, + /* BTH */ + __entry->opcode, show_ib_opcode(__entry->opcode), + __entry->se, + __entry->mig, + __entry->pad, + __entry->tver, + __entry->pkey, + __entry->fecn, + __entry->becn, + __entry->qpn, + __entry->ack, + __entry->psn, + /* extended headers */ + __get_dynamic_array_len(ehdrs), + __parse_ib_ehdrs( + __entry->opcode, + (void *)__get_dynamic_array(ehdrs)) + ) +); + +DEFINE_EVENT(hfi1_input_ibhdr_template, input_ibhdr, + TP_PROTO(struct hfi1_devdata *dd, + struct hfi1_packet *packet, bool sc5), + TP_ARGS(dd, packet, sc5)); + +DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template, + TP_PROTO(struct hfi1_devdata *dd, + struct ib_header *hdr, + bool sc5), + TP_ARGS(dd, hdr, sc5), + TP_STRUCT__entry( + DD_DEV_ENTRY(dd) + __field(u8, lnh) + __field(u8, lver) + __field(u8, sl) + __field(u16, len) + __field(u32, dlid) + __field(u8, sc) + __field(u32, slid) + __field(u8, opcode) + __field(u8, se) + __field(u8, mig) + __field(u8, pad) + __field(u8, tver) + __field(u16, pkey) + __field(u8, fecn) + __field(u8, becn) + __field(u32, qpn) + __field(u8, ack) + __field(u32, psn) + /* extended headers */ + __dynamic_array(u8, ehdrs, + hfi1_trace_ib_hdr_len(hdr)) + ), + TP_fast_assign( struct ib_other_headers *ohdr; DD_DEV_ASSIGN(dd); - /* LRH */ - __entry->vl = - (u8)(be16_to_cpu(hdr->lrh[0]) >> 12); - __entry->lver = - (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf; - __entry->sl = - (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf; - __entry->lnh = - (u8)(be16_to_cpu(hdr->lrh[0]) & 3); - __entry->dlid = - be16_to_cpu(hdr->lrh[1]); - /* allow for larger len */ - __entry->len = - be16_to_cpu(hdr->lrh[2]); - __entry->slid = - be16_to_cpu(hdr->lrh[3]); - /* BTH */ - if (__entry->lnh == HFI1_LRH_BTH) - ohdr = &hdr->u.oth; - else - ohdr = &hdr->u.l.oth; - __entry->opcode = - (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; - __entry->se = - (be32_to_cpu(ohdr->bth[0]) >> 23) & 1; - __entry->m = - (be32_to_cpu(ohdr->bth[0]) >> 22) & 1; - __entry->pad = - (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; - __entry->tver = - (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf; - __entry->pkey = - be32_to_cpu(ohdr->bth[0]) & 0xffff; - __entry->f = - (be32_to_cpu(ohdr->bth[1]) >> IB_FECN_SHIFT) & - IB_FECN_MASK; - __entry->b = - (be32_to_cpu(ohdr->bth[1]) >> IB_BECN_SHIFT) & - IB_BECN_MASK; - __entry->qpn = - be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; - __entry->a = - (be32_to_cpu(ohdr->bth[2]) >> 31) & 1; - /* allow for larger PSN */ - __entry->psn = - be32_to_cpu(ohdr->bth[2]) & 0x7fffffff; + + hfi1_trace_parse_9b_hdr(hdr, sc5, + &ohdr, &__entry->lnh, + &__entry->lver, &__entry->sl, + &__entry->sc, &__entry->len, + &__entry->dlid, &__entry->slid); + + hfi1_trace_parse_bth(ohdr, &__entry->ack, + &__entry->becn, &__entry->fecn, + &__entry->mig, &__entry->se, + &__entry->pad, &__entry->opcode, + &__entry->tver, &__entry->pkey, + &__entry->psn, &__entry->qpn); + /* extended headers */ - memcpy(__get_dynamic_array(ehdrs), &ohdr->u, - ibhdr_exhdr_len(hdr)); - ), - TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN, - __get_str(dev), - /* LRH */ - __entry->vl, - __entry->lver, - __entry->sl, - __entry->lnh, show_lnh(__entry->lnh), - __entry->dlid, - __entry->len, - __entry->slid, - /* BTH */ - __entry->opcode, show_ib_opcode(__entry->opcode), - __entry->se, - __entry->m, - __entry->pad, - __entry->tver, - __entry->pkey, - __entry->f, - __entry->b, - __entry->qpn, - __entry->a, - __entry->psn, - /* extended headers */ - __parse_ib_ehdrs( - __entry->opcode, - (void *)__get_dynamic_array(ehdrs)) - ) + memcpy(__get_dynamic_array(ehdrs), + &ohdr->u, __get_dynamic_array_len(ehdrs)); + ), + TP_printk("[%s] (IB) " LRH_PRN " " LRH_9B_PRN " " + BTH_PRN " " EHDR_PRN, + __get_str(dev), + __entry->len, + __entry->sc, + __entry->dlid, + __entry->slid, + __entry->lnh, show_lnh(__entry->lnh), + __entry->lver, + __entry->sl, + /* BTH */ + __entry->opcode, show_ib_opcode(__entry->opcode), + __entry->se, + __entry->mig, + __entry->pad, + __entry->tver, + __entry->pkey, + __entry->fecn, + __entry->becn, + __entry->qpn, + __entry->ack, + __entry->psn, + /* extended headers */ + __get_dynamic_array_len(ehdrs), + __parse_ib_ehdrs( + __entry->opcode, + (void *)__get_dynamic_array(ehdrs)) + ) ); -DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr, - TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr), - TP_ARGS(dd, hdr)); +DEFINE_EVENT(hfi1_output_ibhdr_template, pio_output_ibhdr, + TP_PROTO(struct hfi1_devdata *dd, + struct ib_header *hdr, bool sc5), + TP_ARGS(dd, hdr, sc5)); -DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr, - TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr), - TP_ARGS(dd, hdr)); +DEFINE_EVENT(hfi1_output_ibhdr_template, ack_output_ibhdr, + TP_PROTO(struct hfi1_devdata *dd, + struct ib_header *hdr, bool sc5), + TP_ARGS(dd, hdr, sc5)); -DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr, - TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr), - TP_ARGS(dd, hdr)); +DEFINE_EVENT(hfi1_output_ibhdr_template, sdma_output_ibhdr, + TP_PROTO(struct hfi1_devdata *dd, + struct ib_header *hdr, bool sc5), + TP_ARGS(dd, hdr, sc5)); -DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr, - TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr), - TP_ARGS(dd, hdr)); #endif /* __HFI1_TRACE_IBHDRS_H */ diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h index f77e59fb43fe..05fc6d68ffe8 100644 --- a/drivers/infiniband/hw/hfi1/trace_rx.h +++ b/drivers/infiniband/hw/hfi1/trace_rx.h @@ -55,6 +55,15 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM hfi1_rx +#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype } +#define show_packettype(etype) \ +__print_symbolic(etype, \ + packettype_name(EXPECTED), \ + packettype_name(EAGER), \ + packettype_name(IB), \ + packettype_name(ERROR), \ + packettype_name(BYPASS)) + TRACE_EVENT(hfi1_rcvhdr, TP_PROTO(struct hfi1_devdata *dd, u32 ctxt, diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 128d2917a2d9..5f4be35f31b6 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -589,8 +589,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) goto drop; } - trace_input_ibhdr(rcd->dd, hdr); - + trace_input_ibhdr(rcd->dd, packet, !!(packet->rhf & RHF_DC_INFO_SMASK)); opcode = ib_bth_get_opcode(packet->ohdr); inc_opstats(tlen, &rcd->opstats->stats[opcode]); @@ -885,7 +884,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, return ret; } trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device), - &ps->s_txreq->phdr.hdr); + &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5)); return ret; bail_ecomm: @@ -1058,7 +1057,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, } trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device), - &ps->s_txreq->phdr.hdr); + &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5)); pio_bail: if (qp->s_wqe) { From 9039746cdf39dcbf2ddfcc4a68f729cbbbc853df Mon Sep 17 00:00:00 2001 From: Don Hiatt Date: Fri, 12 May 2017 09:20:20 -0700 Subject: [PATCH 0094/1795] IB/hfi1: Setup common IB fields in hfi1_packet struct We move many common IB fields into the hfi1_packet structure and set them up in a single function. This allows us to set the fields in a single place and not deal with them throughout the driver. Reviewed-by: Brian Welty Reviewed-by: Dasaratharaman Chandramouli Reviewed-by: Dennis Dalessandro Signed-off-by: Don Hiatt Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/chip.c | 9 -- drivers/infiniband/hw/hfi1/chip.h | 2 - drivers/infiniband/hw/hfi1/common.h | 1 + drivers/infiniband/hw/hfi1/driver.c | 158 +++++++++++++++++++--------- drivers/infiniband/hw/hfi1/hfi.h | 21 +++- drivers/infiniband/hw/hfi1/rc.c | 33 ++---- drivers/infiniband/hw/hfi1/ruc.c | 87 ++++++++------- drivers/infiniband/hw/hfi1/uc.c | 16 +-- drivers/infiniband/hw/hfi1/ud.c | 23 ++-- drivers/infiniband/hw/hfi1/verbs.c | 89 +++++++--------- drivers/infiniband/hw/hfi1/verbs.h | 6 +- 11 files changed, 235 insertions(+), 210 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 2ba00b89df6a..5dbee3c1bd45 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -9810,15 +9810,6 @@ void hfi1_clear_tids(struct hfi1_ctxtdata *rcd) hfi1_put_tid(dd, i, PT_INVALID, 0, 0); } -struct ib_header *hfi1_get_msgheader( - struct hfi1_devdata *dd, __le32 *rhf_addr) -{ - u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr)); - - return (struct ib_header *) - (rhf_addr - dd->rhf_offset + offset); -} - static const char * const ib_cfg_name_strings[] = { "HFI1_IB_CFG_LIDLMC", "HFI1_IB_CFG_LWID_DG_ENB", diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index cbe455d9ab8b..0b4f418ba0ac 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -1347,8 +1347,6 @@ enum { u64 get_all_cpu_total(u64 __percpu *cntr); void hfi1_start_cleanup(struct hfi1_devdata *dd); void hfi1_clear_tids(struct hfi1_ctxtdata *rcd); -struct ib_header *hfi1_get_msgheader( - struct hfi1_devdata *dd, __le32 *rhf_addr); void hfi1_init_ctxt(struct send_context *sc); void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, u32 type, unsigned long pa, u16 order); diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h index 995d62c7f9a7..ba9ab971ced9 100644 --- a/drivers/infiniband/hw/hfi1/common.h +++ b/drivers/infiniband/hw/hfi1/common.h @@ -325,6 +325,7 @@ struct diag_pkt { #define HFI1_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */ /* misc. */ +#define SC15_PACKET 0xF #define SIZE_OF_CRC 1 #define LIM_MGMT_P_KEY 0x7FFF diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 0583479f2576..2a1022e374a5 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -224,6 +224,20 @@ static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, (offset * RCV_BUF_BLOCK_SIZE)); } +static inline void *hfi1_get_header(struct hfi1_devdata *dd, + __le32 *rhf_addr) +{ + u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr)); + + return (void *)(rhf_addr - dd->rhf_offset + offset); +} + +static inline struct ib_header *hfi1_get_msgheader(struct hfi1_devdata *dd, + __le32 *rhf_addr) +{ + return (struct ib_header *)hfi1_get_header(dd, rhf_addr); +} + /* * Validate and encode the a given RcvArray Buffer size. * The function will check whether the given size falls within @@ -249,7 +263,8 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, { struct ib_header *rhdr = packet->hdr; u32 rte = rhf_rcv_type_err(packet->rhf); - int lnh = ib_get_lnh(rhdr); + u8 lnh = ib_get_lnh(rhdr); + bool has_grh = false; struct hfi1_ibport *ibp = rcd_to_iport(rcd); struct hfi1_devdata *dd = ppd->dd; struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; @@ -257,37 +272,42 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR)) return; + if (lnh == HFI1_LRH_BTH) { + packet->ohdr = &rhdr->u.oth; + } else if (lnh == HFI1_LRH_GRH) { + has_grh = true; + packet->ohdr = &rhdr->u.l.oth; + packet->grh = &rhdr->u.l.grh; + } else { + goto drop; + } + if (packet->rhf & RHF_TID_ERR) { /* For TIDERR and RC QPs preemptively schedule a NAK */ - struct ib_other_headers *ohdr = NULL; u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ - u16 lid = ib_get_dlid(rhdr); + u32 dlid = ib_get_dlid(rhdr); u32 qp_num; - u32 rcv_flags = 0; + u32 mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE); /* Sanity check packet */ if (tlen < 24) goto drop; /* Check for GRH */ - if (lnh == HFI1_LRH_BTH) { - ohdr = &rhdr->u.oth; - } else if (lnh == HFI1_LRH_GRH) { + if (has_grh) { u32 vtf; + struct ib_grh *grh = packet->grh; - ohdr = &rhdr->u.l.oth; - if (rhdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) + if (grh->next_hdr != IB_GRH_NEXT_HDR) goto drop; - vtf = be32_to_cpu(rhdr->u.l.grh.version_tclass_flow); + vtf = be32_to_cpu(grh->version_tclass_flow); if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) goto drop; - rcv_flags |= HFI1_HAS_GRH; - } else { - goto drop; } + /* Get the destination QP number. */ - qp_num = ib_bth_get_qpn(ohdr); - if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) { + qp_num = ib_bth_get_qpn(packet->ohdr); + if (dlid < mlid_base) { struct rvt_qp *qp; unsigned long flags; @@ -312,11 +332,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, switch (qp->ibqp.qp_type) { case IB_QPT_RC: - hfi1_rc_hdrerr( - rcd, - rhdr, - rcv_flags, - qp); + hfi1_rc_hdrerr(rcd, packet, qp); break; default: /* For now don't handle any other QP types */ @@ -332,9 +348,8 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, switch (rte) { case RHF_RTE_ERROR_OP_CODE_ERR: { - u32 opcode; void *ebuf = NULL; - __be32 *bth = NULL; + u8 opcode; if (rhf_use_egr_bfr(packet->rhf)) ebuf = packet->ebuf; @@ -342,16 +357,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, if (!ebuf) goto drop; /* this should never happen */ - if (lnh == HFI1_LRH_BTH) - bth = (__be32 *)ebuf; - else if (lnh == HFI1_LRH_GRH) - bth = (__be32 *)((char *)ebuf + sizeof(struct ib_grh)); - else - goto drop; - - opcode = be32_to_cpu(bth[0]) >> 24; - opcode &= 0xff; - + opcode = ib_bth_get_opcode(packet->ohdr); if (opcode == IB_OPCODE_CNP) { /* * Only in pre-B0 h/w is the CNP_OPCODE handled @@ -365,7 +371,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf); sl = ibp->sc_to_sl[sc5]; - lqpn = be32_to_cpu(bth[1]) & RVT_QPN_MASK; + lqpn = ib_bth_get_qpn(packet->ohdr); rcu_read_lock(); qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn); if (!qp) { @@ -415,7 +421,6 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd, packet->rhf = rhf_to_cpu(packet->rhf_addr); packet->rhqoff = rcd->head; packet->numpkt = 0; - packet->rcv_flags = 0; } void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, @@ -424,15 +429,12 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct ib_header *hdr = pkt->hdr; struct ib_other_headers *ohdr = pkt->ohdr; - struct ib_grh *grh = NULL; + struct ib_grh *grh = pkt->grh; u32 rqpn = 0, bth1; u16 rlid, dlid = ib_get_dlid(hdr); u8 sc, svc_type; bool is_mcast = false; - if (pkt->rcv_flags & HFI1_HAS_GRH) - grh = &hdr->u.l.grh; - switch (qp->ibqp.qp_type) { case IB_QPT_SMI: case IB_QPT_GSI: @@ -591,9 +593,10 @@ static void __prescan_rxq(struct hfi1_packet *packet) if (lnh == HFI1_LRH_BTH) { packet->ohdr = &hdr->u.oth; + packet->grh = NULL; } else if (lnh == HFI1_LRH_GRH) { packet->ohdr = &hdr->u.l.oth; - packet->rcv_flags |= HFI1_HAS_GRH; + packet->grh = &hdr->u.l.grh; } else { goto next; /* just in case */ } @@ -698,10 +701,9 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) { int ret; - packet->hdr = hfi1_get_msgheader(packet->rcd->dd, - packet->rhf_addr); - packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; packet->etype = rhf_rcv_type(packet->rhf); + + packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; /* total length */ packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ /* retrieve eager buffer details */ @@ -759,7 +761,7 @@ static inline void process_rcv_update(int last, struct hfi1_packet *packet) packet->etail, 0, 0); packet->updegr = 0; } - packet->rcv_flags = 0; + packet->grh = NULL; } static inline void finish_packet(struct hfi1_packet *packet) @@ -896,12 +898,15 @@ static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, struct hfi1_devdata *dd) { struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; - struct ib_header *hdr = hfi1_get_msgheader(packet->rcd->dd, - packet->rhf_addr); u8 etype = rhf_rcv_type(packet->rhf); + u8 sc = SC15_PACKET; - if (etype == RHF_RCV_TYPE_IB && - hfi1_9B_get_sc5(hdr, packet->rhf) != 0xf) { + if (etype == RHF_RCV_TYPE_IB) { + struct ib_header *hdr = hfi1_get_msgheader(packet->rcd->dd, + packet->rhf_addr); + sc = hfi1_9B_get_sc5(hdr, packet->rhf); + } + if (sc != SC15_PACKET) { int hwstate = read_logical_state(dd); if (hwstate != LSTATE_ACTIVE) { @@ -1321,6 +1326,58 @@ bail: return ret; } +static inline void hfi1_setup_ib_header(struct hfi1_packet *packet) +{ + packet->hdr = (struct hfi1_ib_message_header *) + hfi1_get_msgheader(packet->rcd->dd, + packet->rhf_addr); + packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; +} + +static int hfi1_setup_9B_packet(struct hfi1_packet *packet) +{ + struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); + struct ib_header *hdr; + u8 lnh; + + hfi1_setup_ib_header(packet); + hdr = packet->hdr; + + lnh = ib_get_lnh(hdr); + if (lnh == HFI1_LRH_BTH) { + packet->ohdr = &hdr->u.oth; + packet->grh = NULL; + } else if (lnh == HFI1_LRH_GRH) { + u32 vtf; + + packet->ohdr = &hdr->u.l.oth; + packet->grh = &hdr->u.l.grh; + if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) + goto drop; + vtf = be32_to_cpu(packet->grh->version_tclass_flow); + if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) + goto drop; + } else { + goto drop; + } + + /* Query commonly used fields from packet header */ + packet->opcode = ib_bth_get_opcode(packet->ohdr); + packet->slid = ib_get_slid(hdr); + packet->dlid = ib_get_dlid(hdr); + packet->sl = ib_get_sl(hdr); + packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf); + packet->pad = ib_bth_get_pad(packet->ohdr); + packet->extra_byte = 0; + packet->fecn = ib_bth_get_fecn(packet->ohdr); + packet->becn = ib_bth_get_becn(packet->ohdr); + + return 0; +drop: + ibp->rvp.n_pkt_drops++; + return -EINVAL; +} + void handle_eflags(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; @@ -1351,6 +1408,9 @@ int process_receive_ib(struct hfi1_packet *packet) if (unlikely(hfi1_dbg_fault_packet(packet))) return RHF_RCV_CONTINUE; + if (hfi1_setup_9B_packet(packet)) + return RHF_RCV_CONTINUE; + trace_hfi1_rcvhdr(packet->rcd->ppd->dd, packet->rcd->ctxt, rhf_err_flags(packet->rhf), @@ -1422,6 +1482,7 @@ int process_receive_error(struct hfi1_packet *packet) rhf_rcv_type_err(packet->rhf) == 3)) return RHF_RCV_CONTINUE; + hfi1_setup_ib_header(packet); handle_eflags(packet); if (unlikely(rhf_err_flags(packet->rhf))) @@ -1435,6 +1496,8 @@ int kdeth_process_expected(struct hfi1_packet *packet) { if (unlikely(hfi1_dbg_fault_packet(packet))) return RHF_RCV_CONTINUE; + + hfi1_setup_ib_header(packet); if (unlikely(rhf_err_flags(packet->rhf))) handle_eflags(packet); @@ -1445,6 +1508,7 @@ int kdeth_process_expected(struct hfi1_packet *packet) int kdeth_process_eager(struct hfi1_packet *packet) { + hfi1_setup_ib_header(packet); if (unlikely(rhf_err_flags(packet->rhf))) handle_eflags(packet); if (unlikely(hfi1_dbg_fault_packet(packet))) diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 3b76631cbcbd..9c6c73448461 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -356,17 +356,26 @@ struct hfi1_packet { __le32 *rhf_addr; struct rvt_qp *qp; struct ib_other_headers *ohdr; + struct ib_grh *grh; u64 rhf; u32 maxcnt; u32 rhqoff; + u32 dlid; + u32 slid; u16 tlen; s16 etail; u8 hlen; u8 numpkt; u8 rsize; u8 updegr; - u8 rcv_flags; u8 etype; + u8 extra_byte; + u8 pad; + u8 sc; + u8 sl; + u8 opcode; + bool becn; + bool fecn; }; struct rvt_sge_state; @@ -2086,4 +2095,14 @@ int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp); #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev)) #define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev)) + +/* + * hfi1_check_mcast- Check if the given lid is + * in the IB multicast range. + */ +static inline bool hfi1_check_mcast(u16 lid) +{ + return ((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && + (lid != be16_to_cpu(IB_LID_PERMISSIVE))); +} #endif /* _HFI1_KERNEL_H */ diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index b443c1e01543..baa67bf0772b 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -1916,17 +1916,16 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn, void hfi1_rc_rcv(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; - struct ib_header *hdr = packet->hdr; - u32 rcv_flags = packet->rcv_flags; void *data = packet->ebuf; u32 tlen = packet->tlen; struct rvt_qp *qp = packet->qp; struct hfi1_ibport *ibp = rcd_to_iport(rcd); struct ib_other_headers *ohdr = packet->ohdr; - u32 bth0, opcode; + u32 bth0; + u32 opcode = packet->opcode; u32 hdrsize = packet->hlen; u32 psn; - u32 pad; + u32 pad = packet->pad; struct ib_wc wc; u32 pmtu = qp->pmtu; int diff; @@ -1938,14 +1937,13 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) u32 rkey; lockdep_assert_held(&qp->r_lock); + bth0 = be32_to_cpu(ohdr->bth[0]); - if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0)) + if (hfi1_ruc_check_hdr(ibp, packet)) return; is_fecn = process_ecn(qp, packet, false); - psn = ib_bth_get_psn(ohdr); - opcode = ib_bth_get_opcode(ohdr); /* * Process responses (ACKs) before anything else. Note that the @@ -2075,8 +2073,6 @@ no_immediate_data: wc.wc_flags = 0; wc.ex.imm_data = 0; send_last: - /* Get the number of bytes the message was padded by. */ - pad = ib_bth_get_pad(ohdr); /* Check for invalid length. */ /* LAST len should be >= 1 */ if (unlikely(tlen < (hdrsize + pad + 4))) @@ -2369,28 +2365,19 @@ send_ack: void hfi1_rc_hdrerr( struct hfi1_ctxtdata *rcd, - struct ib_header *hdr, - u32 rcv_flags, + struct hfi1_packet *packet, struct rvt_qp *qp) { - int has_grh = rcv_flags & HFI1_HAS_GRH; - struct ib_other_headers *ohdr; struct hfi1_ibport *ibp = rcd_to_iport(rcd); int diff; u32 opcode; - u32 psn, bth0; + u32 psn; - /* Check for GRH */ - ohdr = &hdr->u.oth; - if (has_grh) - ohdr = &hdr->u.l.oth; - - bth0 = be32_to_cpu(ohdr->bth[0]); - if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0)) + if (hfi1_ruc_check_hdr(ibp, packet)) return; - psn = ib_bth_get_psn(ohdr); - opcode = ib_bth_get_opcode(ohdr); + psn = ib_bth_get_psn(packet->ohdr); + opcode = ib_bth_get_opcode(packet->ohdr); /* Only deal with RDMA Writes for now */ if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index 3a17daba28a9..9cc9c7be9dd4 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -214,100 +214,95 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) * * The s_lock will be acquired around the hfi1_migrate_qp() call. */ -int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr, - int has_grh, struct rvt_qp *qp, u32 bth0) +int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet) { __be64 guid; unsigned long flags; + struct rvt_qp *qp = packet->qp; u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; + u32 dlid = packet->dlid; + u32 slid = packet->slid; + u32 sl = packet->sl; + int migrated; + u32 bth0, bth1; - if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { - if (!has_grh) { + bth0 = be32_to_cpu(packet->ohdr->bth[0]); + bth1 = be32_to_cpu(packet->ohdr->bth[1]); + migrated = bth0 & IB_BTH_MIG_REQ; + + if (qp->s_mig_state == IB_MIG_ARMED && migrated) { + if (!packet->grh) { if (rdma_ah_get_ah_flags(&qp->alt_ah_attr) & IB_AH_GRH) - goto err; + return 1; } else { const struct ib_global_route *grh; if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) & IB_AH_GRH)) - goto err; + return 1; grh = rdma_ah_read_grh(&qp->alt_ah_attr); guid = get_sguid(ibp, grh->sgid_index); - if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix, + if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix, guid)) - goto err; + return 1; if (!gid_ok( - &hdr->u.l.grh.sgid, + &packet->grh->sgid, grh->dgid.global.subnet_prefix, grh->dgid.global.interface_id)) - goto err; + return 1; } - if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, sc5, - ib_get_slid(hdr)))) { - hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, - (u16)bth0, - ib_get_sl(hdr), - 0, qp->ibqp.qp_num, - ib_get_slid(hdr), - ib_get_dlid(hdr)); - goto err; + if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, + sc5, slid))) { + hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, (u16)bth0, sl, + 0, qp->ibqp.qp_num, slid, dlid); + return 1; } /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ - if (ib_get_slid(hdr) != - rdma_ah_get_dlid(&qp->alt_ah_attr) || + if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) || ppd_from_ibp(ibp)->port != rdma_ah_get_port_num(&qp->alt_ah_attr)) - goto err; + return 1; spin_lock_irqsave(&qp->s_lock, flags); hfi1_migrate_qp(qp); spin_unlock_irqrestore(&qp->s_lock, flags); } else { - if (!has_grh) { + if (!packet->grh) { if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) - goto err; + return 1; } else { const struct ib_global_route *grh; if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) - goto err; + return 1; grh = rdma_ah_read_grh(&qp->remote_ah_attr); guid = get_sguid(ibp, grh->sgid_index); - if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix, + if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix, guid)) - goto err; + return 1; if (!gid_ok( - &hdr->u.l.grh.sgid, + &packet->grh->sgid, grh->dgid.global.subnet_prefix, grh->dgid.global.interface_id)) - goto err; + return 1; } - if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, sc5, - ib_get_slid(hdr)))) { - hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, - (u16)bth0, - ib_get_sl(hdr), - 0, qp->ibqp.qp_num, - ib_get_slid(hdr), - ib_get_dlid(hdr)); - goto err; + if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, + sc5, slid))) { + hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, (u16)bth0, sl, + 0, qp->ibqp.qp_num, slid, dlid); + return 1; } /* Validate the SLID. See Ch. 9.6.1.5 */ - if (ib_get_slid(hdr) != - rdma_ah_get_dlid(&qp->remote_ah_attr) || + if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) || ppd_from_ibp(ibp)->port != qp->port_num) - goto err; - if (qp->s_mig_state == IB_MIG_REARM && - !(bth0 & IB_BTH_MIG_REQ)) + return 1; + if (qp->s_mig_state == IB_MIG_REARM && !migrated) qp->s_mig_state = IB_MIG_ARMED; } return 0; - -err: - return 1; } /** diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index 2a5650f8aee0..76c2451a53d7 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c @@ -297,31 +297,25 @@ bail_no_tx: void hfi1_uc_rcv(struct hfi1_packet *packet) { struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); - struct ib_header *hdr = packet->hdr; - u32 rcv_flags = packet->rcv_flags; void *data = packet->ebuf; u32 tlen = packet->tlen; struct rvt_qp *qp = packet->qp; struct ib_other_headers *ohdr = packet->ohdr; - u32 bth0, opcode; + u32 opcode = packet->opcode; u32 hdrsize = packet->hlen; u32 psn; - u32 pad; + u32 pad = packet->pad; struct ib_wc wc; u32 pmtu = qp->pmtu; struct ib_reth *reth; - int has_grh = rcv_flags & HFI1_HAS_GRH; int ret; - bth0 = be32_to_cpu(ohdr->bth[0]); - if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0)) + if (hfi1_ruc_check_hdr(ibp, packet)) return; process_ecn(qp, packet, true); psn = ib_bth_get_psn(ohdr); - opcode = ib_bth_get_opcode(ohdr); - /* Compare the PSN verses the expected PSN. */ if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) { /* @@ -432,8 +426,6 @@ no_immediate_data: wc.ex.imm_data = 0; wc.wc_flags = 0; send_last: - /* Get the number of bytes the message was padded by. */ - pad = ib_bth_get_pad(ohdr); /* Check for invalid length. */ /* LAST len should be >= 1 */ if (unlikely(tlen < (hdrsize + pad + 4))) @@ -527,8 +519,6 @@ rdma_first: rdma_last_imm: wc.wc_flags = IB_WC_WITH_IMM; - /* Get the number of bytes the message was padded by. */ - pad = ib_bth_get_pad(ohdr); /* Check for invalid length. */ /* LAST len should be >= 1 */ if (unlikely(tlen < (hdrsize + pad + 4))) diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 49fe179ad3ae..c995aa58c36a 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -668,36 +668,31 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5, void hfi1_ud_rcv(struct hfi1_packet *packet) { struct ib_other_headers *ohdr = packet->ohdr; - int opcode; u32 hdrsize = packet->hlen; struct ib_wc wc; u32 qkey; u32 src_qp; - u16 dlid, pkey; + u16 pkey; int mgmt_pkey_idx = -1; struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct ib_header *hdr = packet->hdr; - u32 rcv_flags = packet->rcv_flags; void *data = packet->ebuf; u32 tlen = packet->tlen; struct rvt_qp *qp = packet->qp; - bool has_grh = rcv_flags & HFI1_HAS_GRH; u8 sc5 = hfi1_9B_get_sc5(hdr, packet->rhf); u32 bth1; - u8 sl_from_sc, sl; - u16 slid; - u8 extra_bytes; + u8 sl_from_sc; + u8 extra_bytes = packet->pad; + u8 opcode = packet->opcode; + u8 sl = packet->sl; + u32 dlid = packet->dlid; + u32 slid = packet->slid; + bth1 = be32_to_cpu(ohdr->bth[1]); qkey = ib_get_qkey(ohdr); src_qp = ib_get_sqpn(ohdr); - dlid = ib_get_dlid(hdr); - bth1 = be32_to_cpu(ohdr->bth[1]); - slid = ib_get_slid(hdr); pkey = ib_bth_get_pkey(ohdr); - opcode = ib_bth_get_opcode(ohdr); - sl = ib_get_sl(hdr); - extra_bytes = ib_bth_get_pad(ohdr); extra_bytes += (SIZE_OF_CRC << 2); sl_from_sc = ibp->sc_to_sl[sc5]; @@ -811,7 +806,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) qp->r_flags |= RVT_R_REUSE_SGE; goto drop; } - if (has_grh) { + if (packet->grh) { hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh, sizeof(struct ib_grh), true, false); wc.wc_flags |= IB_WC_GRH; diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 5f4be35f31b6..af54d3f4696a 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -508,13 +508,14 @@ again: /* * Make sure the QP is ready and able to accept the given opcode. */ -static inline opcode_handler qp_ok(int opcode, struct hfi1_packet *packet) +static inline opcode_handler qp_ok(struct hfi1_packet *packet) { if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK)) return NULL; - if (((opcode & RVT_OPCODE_QP_MASK) == packet->qp->allowed_ops) || - (opcode == IB_OPCODE_CNP)) - return opcode_handler_tbl[opcode]; + if (((packet->opcode & RVT_OPCODE_QP_MASK) == + packet->qp->allowed_ops) || + (packet->opcode == IB_OPCODE_CNP)) + return opcode_handler_tbl[packet->opcode]; return NULL; } @@ -548,68 +549,34 @@ static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc) return pbc; } -/** - * hfi1_ib_rcv - process an incoming packet - * @packet: data packet information - * - * This is called to process an incoming packet at interrupt level. - * - * Tlen is the length of the header + data + CRC in bytes. - */ -void hfi1_ib_rcv(struct hfi1_packet *packet) +static inline void hfi1_handle_packet(struct hfi1_packet *packet, + bool is_mcast) { + u32 qp_num; struct hfi1_ctxtdata *rcd = packet->rcd; - struct ib_header *hdr = packet->hdr; - u32 tlen = packet->tlen; struct hfi1_pportdata *ppd = rcd->ppd; struct hfi1_ibport *ibp = rcd_to_iport(rcd); struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; opcode_handler packet_handler; unsigned long flags; - u32 qp_num; - int lnh; - u8 opcode; - u16 lid; - /* Check for GRH */ - lnh = ib_get_lnh(hdr); - if (lnh == HFI1_LRH_BTH) { - packet->ohdr = &hdr->u.oth; - } else if (lnh == HFI1_LRH_GRH) { - u32 vtf; + inc_opstats(packet->tlen, &rcd->opstats->stats[packet->opcode]); - packet->ohdr = &hdr->u.l.oth; - if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) - goto drop; - vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); - if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) - goto drop; - packet->rcv_flags |= HFI1_HAS_GRH; - } else { - goto drop; - } - - trace_input_ibhdr(rcd->dd, packet, !!(packet->rhf & RHF_DC_INFO_SMASK)); - opcode = ib_bth_get_opcode(packet->ohdr); - inc_opstats(tlen, &rcd->opstats->stats[opcode]); - - /* Get the destination QP number. */ - qp_num = ib_bth_get_qpn(packet->ohdr); - lid = ib_get_dlid(hdr); - if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && - (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) { + if (unlikely(is_mcast)) { struct rvt_mcast *mcast; struct rvt_mcast_qp *p; - if (lnh != HFI1_LRH_GRH) + if (!packet->grh) goto drop; - mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid, lid); + mcast = rvt_mcast_find(&ibp->rvp, + &packet->grh->dgid, + packet->dlid); if (!mcast) goto drop; list_for_each_entry_rcu(p, &mcast->qp_list, list) { packet->qp = p->qp; spin_lock_irqsave(&packet->qp->r_lock, flags); - packet_handler = qp_ok(opcode, packet); + packet_handler = qp_ok(packet); if (likely(packet_handler)) packet_handler(packet); else @@ -623,19 +590,21 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) if (atomic_dec_return(&mcast->refcount) <= 1) wake_up(&mcast->wait); } else { + /* Get the destination QP number. */ + qp_num = ib_bth_get_qpn(packet->ohdr); rcu_read_lock(); packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); if (!packet->qp) { rcu_read_unlock(); goto drop; } - if (unlikely(hfi1_dbg_fault_opcode(packet->qp, opcode, + if (unlikely(hfi1_dbg_fault_opcode(packet->qp, packet->opcode, true))) { rcu_read_unlock(); goto drop; } spin_lock_irqsave(&packet->qp->r_lock, flags); - packet_handler = qp_ok(opcode, packet); + packet_handler = qp_ok(packet); if (likely(packet_handler)) packet_handler(packet); else @@ -644,11 +613,29 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) rcu_read_unlock(); } return; - drop: ibp->rvp.n_pkt_drops++; } +/** + * hfi1_ib_rcv - process an incoming packet + * @packet: data packet information + * + * This is called to process an incoming packet at interrupt level. + */ +void hfi1_ib_rcv(struct hfi1_packet *packet) +{ + struct hfi1_ctxtdata *rcd = packet->rcd; + bool is_mcast = false; + + if (unlikely(hfi1_check_mcast(packet->dlid))) + is_mcast = true; + + trace_input_ibhdr(rcd->dd, packet, + !!(packet->rhf & RHF_DC_INFO_SMASK)); + hfi1_handle_packet(packet, is_mcast); +} + /* * This is called from a timer to check for QPs * which need kernel memory in order to send a packet. diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h index cd635d0c1d3b..17b38cd5f654 100644 --- a/drivers/infiniband/hw/hfi1/verbs.h +++ b/drivers/infiniband/hw/hfi1/verbs.h @@ -307,8 +307,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet); void hfi1_rc_hdrerr( struct hfi1_ctxtdata *rcd, - struct ib_header *hdr, - u32 rcv_flags, + struct hfi1_packet *packet, struct rvt_qp *qp); u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr); @@ -346,8 +345,7 @@ static inline u8 get_opcode(struct ib_header *h) return be32_to_cpu(h->u.l.oth.bth[0]) >> 24; } -int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr, - int has_grh, struct rvt_qp *qp, u32 bth0); +int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet); u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, const struct ib_global_route *grh, u32 hwords, u32 nwords); From 14fe13fcd3afb96b06809f280b586be1c998332c Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Fri, 12 May 2017 09:20:31 -0700 Subject: [PATCH 0095/1795] IB/rdmavt: Compress adjacent SGEs in rvt_lkey_ok() SGEs that are contiguous needlessly consume driver dependent TX resources. The lkey validation logic is enhanced to compress the SGE that ends up in the send wqe when consecutive addresses are detected. The lkey validation API used to return 1 (success) or 0 (fail). The return value is now an -errno, 0 (compressed), or 1 (uncompressed). A additional argument is added to pass the last SQE for the compression. Loopback callers always pass a NULL to last_sge since the optimization is of little benefit in that situation. Reviewed-by: Dennis Dalessandro Signed-off-by: Brian Welty Signed-off-by: Venkata Sandeep Dhanalakota Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/ruc.c | 2 +- drivers/infiniband/hw/qib/qib_ruc.c | 2 +- drivers/infiniband/sw/rdmavt/mr.c | 51 +++++++++++++++++--- drivers/infiniband/sw/rdmavt/qp.c | 23 +++++---- drivers/infiniband/sw/rdmavt/trace_mr.h | 62 +++++++++++++++++++++++++ drivers/infiniband/sw/rdmavt/trace_tx.h | 11 +++-- include/rdma/rdma_vt.h | 3 +- 7 files changed, 130 insertions(+), 24 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index 9cc9c7be9dd4..476fe5da2992 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -75,7 +75,7 @@ static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) continue; /* Check LKEY */ if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, - &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) + NULL, &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) goto bad_lkey; qp->r_len += wqe->sg_list[i].length; j++; diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index bd09de7c6e56..88d84cbf7e5a 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -59,7 +59,7 @@ static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) continue; /* Check LKEY */ if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, - &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) + NULL, &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) goto bad_lkey; qp->r_len += wqe->sg_list[i].length; j++; diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index aa5f9ea318e4..ea95672d9675 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -777,24 +777,55 @@ out: return ret; } +/** + * rvt_sge_adjacent - is isge compressible + * @isge: outgoing internal SGE + * @last_sge: last outgoing SGE written + * @sge: SGE to check + * + * If adjacent will update last_sge to add length. + * + * Return: true if isge is adjacent to last sge + */ +static inline bool rvt_sge_adjacent(struct rvt_sge *isge, + struct rvt_sge *last_sge, + struct ib_sge *sge) +{ + if (last_sge && sge->lkey == last_sge->mr->lkey && + ((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) { + if (sge->lkey) { + if (unlikely((sge->addr - last_sge->mr->user_base + + sge->length > last_sge->mr->length))) + return false; /* overrun, caller will catch */ + } else { + last_sge->length += sge->length; + } + last_sge->sge_length += sge->length; + trace_rvt_sge_adjacent(last_sge, sge); + return true; + } + return false; +} + /** * rvt_lkey_ok - check IB SGE for validity and initialize * @rkt: table containing lkey to check SGE against * @pd: protection domain * @isge: outgoing internal SGE + * @last_sge: last outgoing SGE written * @sge: SGE to check * @acc: access flags * * Check the IB SGE for validity and initialize our internal version * of it. * - * Return: 1 if valid and successful, otherwise returns 0. - * - * increments the reference count upon success + * Increments the reference count when a new sge is stored. * + * Return: 0 if compressed, 1 if added , otherwise returns -errno. */ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, - struct rvt_sge *isge, struct ib_sge *sge, int acc) + struct rvt_sge *isge, struct rvt_sge *last_sge, + struct ib_sge *sge, int acc) { struct rvt_mregion *mr; unsigned n, m; @@ -804,12 +835,14 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, * We use LKEY == zero for kernel virtual addresses * (see rvt_get_dma_mr() and dma_virt_ops). */ - rcu_read_lock(); if (sge->lkey == 0) { struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device); if (pd->user) - goto bail; + return -EINVAL; + if (rvt_sge_adjacent(isge, last_sge, sge)) + return 0; + rcu_read_lock(); mr = rcu_dereference(dev->dma_mr); if (!mr) goto bail; @@ -824,6 +857,9 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, isge->n = 0; goto ok; } + if (rvt_sge_adjacent(isge, last_sge, sge)) + return 0; + rcu_read_lock(); mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]); if (!mr) goto bail; @@ -874,12 +910,13 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, isge->m = m; isge->n = n; ok: + trace_rvt_sge_new(isge, sge); return 1; bail_unref: rvt_put_mr(mr); bail: rcu_read_unlock(); - return 0; + return -EINVAL; } EXPORT_SYMBOL(rvt_lkey_ok); diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 727e81cc2c8f..a3dd1e536860 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1646,7 +1646,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp, struct rvt_pd *pd; struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); u8 log_pmtu; - int ret; + int ret, incr; size_t cplen; bool reserved_op; int local_ops_delayed = 0; @@ -1719,22 +1719,23 @@ static int rvt_post_one_wr(struct rvt_qp *qp, wqe->length = 0; j = 0; if (wr->num_sge) { + struct rvt_sge *last_sge = NULL; + acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0; for (i = 0; i < wr->num_sge; i++) { u32 length = wr->sg_list[i].length; - int ok; if (length == 0) continue; - ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], - &wr->sg_list[i], acc); - if (!ok) { - ret = -EINVAL; - goto bail_inval_free; - } + incr = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge, + &wr->sg_list[i], acc); + if (unlikely(incr < 0)) + goto bail_lkey_error; wqe->length += length; - j++; + if (incr) + last_sge = &wqe->sg_list[j]; + j += incr; } wqe->wr.num_sge = j; } @@ -1781,12 +1782,14 @@ static int rvt_post_one_wr(struct rvt_qp *qp, wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED; qp->s_avail--; } - trace_rvt_post_one_wr(qp, wqe); + trace_rvt_post_one_wr(qp, wqe, wr->num_sge); smp_wmb(); /* see request builders */ qp->s_head = next; return 0; +bail_lkey_error: + ret = incr; bail_inval_free: /* release mr holds */ while (j) { diff --git a/drivers/infiniband/sw/rdmavt/trace_mr.h b/drivers/infiniband/sw/rdmavt/trace_mr.h index 3318a6c36373..976e482930a3 100644 --- a/drivers/infiniband/sw/rdmavt/trace_mr.h +++ b/drivers/infiniband/sw/rdmavt/trace_mr.h @@ -103,6 +103,68 @@ DEFINE_EVENT( TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len), TP_ARGS(mr, m, n, v, len)); +DECLARE_EVENT_CLASS( + rvt_sge_template, + TP_PROTO(struct rvt_sge *sge, struct ib_sge *isge), + TP_ARGS(sge, isge), + TP_STRUCT__entry( + RDI_DEV_ENTRY(ib_to_rvt(sge->mr->pd->device)) + __field(struct rvt_mregion *, mr) + __field(struct rvt_sge *, sge) + __field(struct ib_sge *, isge) + __field(void *, vaddr) + __field(u64, ivaddr) + __field(u32, lkey) + __field(u32, sge_length) + __field(u32, length) + __field(u32, ilength) + __field(int, user) + __field(u16, m) + __field(u16, n) + ), + TP_fast_assign( + RDI_DEV_ASSIGN(ib_to_rvt(sge->mr->pd->device)); + __entry->mr = sge->mr; + __entry->sge = sge; + __entry->isge = isge; + __entry->vaddr = sge->vaddr; + __entry->ivaddr = isge->addr; + __entry->lkey = sge->mr->lkey; + __entry->sge_length = sge->sge_length; + __entry->length = sge->length; + __entry->ilength = isge->length; + __entry->m = sge->m; + __entry->n = sge->m; + __entry->user = ibpd_to_rvtpd(sge->mr->pd)->user; + ), + TP_printk( + "[%s] mr %p sge %p isge %p vaddr %p ivaddr %llx lkey %x sge_length %u length %u ilength %u m %u n %u user %u", + __get_str(dev), + __entry->mr, + __entry->sge, + __entry->isge, + __entry->vaddr, + __entry->ivaddr, + __entry->lkey, + __entry->sge_length, + __entry->length, + __entry->ilength, + __entry->m, + __entry->n, + __entry->user + ) +); + +DEFINE_EVENT( + rvt_sge_template, rvt_sge_adjacent, + TP_PROTO(struct rvt_sge *sge, struct ib_sge *isge), + TP_ARGS(sge, isge)); + +DEFINE_EVENT( + rvt_sge_template, rvt_sge_new, + TP_PROTO(struct rvt_sge *sge, struct ib_sge *isge), + TP_ARGS(sge, isge)); + #endif /* __RVT_TRACE_MR_H */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h index a613a2223751..0ef25fc49f25 100644 --- a/drivers/infiniband/sw/rdmavt/trace_tx.h +++ b/drivers/infiniband/sw/rdmavt/trace_tx.h @@ -84,12 +84,12 @@ __print_symbolic(opcode, \ wr_opcode_name(RESERVED10)) #define POS_PRN \ -"[%s] wqe %p wr_id %llx send_flags %x qpn %x qpt %u psn %x lpsn %x ssn %x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u pid %u num_sge %u" +"[%s] wqe %p wr_id %llx send_flags %x qpn %x qpt %u psn %x lpsn %x ssn %x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u pid %u num_sge %u wr_num_sge %u" TRACE_EVENT( rvt_post_one_wr, - TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe), - TP_ARGS(qp, wqe), + TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge), + TP_ARGS(qp, wqe, wr_num_sge), TP_STRUCT__entry( RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device)) __field(u64, wr_id) @@ -108,6 +108,7 @@ TRACE_EVENT( __field(int, send_flags) __field(pid_t, pid) __field(int, num_sge) + __field(int, wr_num_sge) ), TP_fast_assign( RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device)) @@ -127,6 +128,7 @@ TRACE_EVENT( __entry->ssn = wqe->ssn; __entry->send_flags = wqe->wr.send_flags; __entry->num_sge = wqe->wr.num_sge; + __entry->wr_num_sge = wr_num_sge; ), TP_printk( POS_PRN, @@ -146,7 +148,8 @@ TRACE_EVENT( __entry->head, __entry->last, __entry->pid, - __entry->num_sge + __entry->num_sge, + __entry->wr_num_sge ) ); diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 4878aaf7bdff..d0b9f91e5f4d 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -515,7 +515,8 @@ int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey); int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc); int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, - struct rvt_sge *isge, struct ib_sge *sge, int acc); + struct rvt_sge *isge, struct rvt_sge *last_sge, + struct ib_sge *sge, int acc); struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid, u16 lid); From 7be85676f1d13c77a7e0c72e04903bfd39580d4f Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Fri, 26 May 2017 05:35:12 -0700 Subject: [PATCH 0096/1795] IB/hfi1: Don't remove RB entry when not needed. An RB tree is used for the SDMA pinning cache. Cache entries are extracted and reinserted from the tree in case the address range for it changes. However, if the address range for the entry doesn't change, deleting the entry from the RB tree is not necessary. This affects performance since the tree needs to be rebalanced for each insertion, and this happens in the hot path. Optimize RB search by not removing entries when it's not needed. Reviewed-by: Mike Marciniszyn Reviewed-by: Mitko Haralanov Signed-off-by: Sebastian Sanchez Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/mmu_rb.c | 14 ++++++++++---- drivers/infiniband/hw/hfi1/mmu_rb.h | 5 +++-- drivers/infiniband/hw/hfi1/user_sdma.c | 21 +++++++++++++++------ 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index ccbf52c8ff6f..d41fd87a39f2 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -217,21 +217,27 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, return node; } -struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler, - unsigned long addr, unsigned long len) +bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler, + unsigned long addr, unsigned long len, + struct mmu_rb_node **rb_node) { struct mmu_rb_node *node; unsigned long flags; + bool ret = false; spin_lock_irqsave(&handler->lock, flags); node = __mmu_rb_search(handler, addr, len); if (node) { + if (node->addr == addr && node->len == len) + goto unlock; __mmu_int_rb_remove(node, &handler->root); list_del(&node->list); /* remove from LRU list */ + ret = true; } +unlock: spin_unlock_irqrestore(&handler->lock, flags); - - return node; + *rb_node = node; + return ret; } void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h index 754f6ebf13fb..f04cec1e99d1 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.h +++ b/drivers/infiniband/hw/hfi1/mmu_rb.h @@ -81,7 +81,8 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg); void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler, struct mmu_rb_node *mnode); -struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler, - unsigned long addr, unsigned long len); +bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler, + unsigned long addr, unsigned long len, + struct mmu_rb_node **rb_node); #endif /* _HFI1_MMU_RB_H */ diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 16fd519216dc..79450cf2a3d5 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -1165,14 +1165,23 @@ static int pin_vector_pages(struct user_sdma_request *req, struct hfi1_user_sdma_pkt_q *pq = req->pq; struct sdma_mmu_node *node = NULL; struct mmu_rb_node *rb_node; + bool extracted; - rb_node = hfi1_mmu_rb_extract(pq->handler, - (unsigned long)iovec->iov.iov_base, - iovec->iov.iov_len); - if (rb_node) + extracted = + hfi1_mmu_rb_remove_unless_exact(pq->handler, + (unsigned long) + iovec->iov.iov_base, + iovec->iov.iov_len, &rb_node); + if (rb_node) { node = container_of(rb_node, struct sdma_mmu_node, rb); - else - rb_node = NULL; + if (!extracted) { + atomic_inc(&node->refcount); + iovec->pages = node->pages; + iovec->npages = node->npages; + iovec->node = node; + return 0; + } + } if (!node) { node = kzalloc(sizeof(*node), GFP_KERNEL); From e3304b7cc4f14d365f46d6847a35563ae8b017f7 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Fri, 26 May 2017 05:35:18 -0700 Subject: [PATCH 0097/1795] IB/hfi1: Optimize cachelines for user SDMA request structure The current user SDMA request structure layout has holes. The cachelines can be reduced to improve cacheline trading. Separate fields in the following categories: mostly read, writable and shared with interrupt. Reviewed-by: Mike Marciniszyn Signed-off-by: Sebastian Sanchez Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/user_sdma.c | 108 ++++++++++++++----------- 1 file changed, 59 insertions(+), 49 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 79450cf2a3d5..92517cebb4c7 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -117,6 +117,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 #define AHG_KDETH_INTR_SHIFT 12 #define AHG_KDETH_SH_SHIFT 13 +#define AHG_KDETH_ARRAY_SIZE 9 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4) #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff) @@ -204,25 +205,42 @@ struct evict_data { }; struct user_sdma_request { - struct sdma_req_info info; - struct hfi1_user_sdma_pkt_q *pq; - struct hfi1_user_sdma_comp_q *cq; /* This is the original header from user space */ struct hfi1_pkt_header hdr; + + /* Read mostly fields */ + struct hfi1_user_sdma_pkt_q *pq ____cacheline_aligned_in_smp; + struct hfi1_user_sdma_comp_q *cq; /* * Pointer to the SDMA engine for this request. * Since different request could be on different VLs, * each request will need it's own engine pointer. */ struct sdma_engine *sde; - s8 ahg_idx; - u32 ahg[9]; + struct sdma_req_info info; + /* TID array values copied from the tid_iov vector */ + u32 *tids; + /* total length of the data in the request */ + u32 data_len; + /* number of elements copied to the tids array */ + u16 n_tids; /* - * KDETH.Offset (Eager) field - * We need to remember the initial value so the headers - * can be updated properly. + * We copy the iovs for this request (based on + * info.iovcnt). These are only the data vectors */ - u32 koffset; + u8 data_iovs; + s8 ahg_idx; + + /* Writeable fields shared with interrupt */ + u64 seqcomp ____cacheline_aligned_in_smp; + u64 seqsubmitted; + unsigned long flags; + /* status of the last txreq completed */ + int status; + + /* Send side fields */ + struct list_head txps ____cacheline_aligned_in_smp; + u64 seqnum; /* * KDETH.OFFSET (TID) field * The offset can cover multiple packets, depending on the @@ -230,29 +248,19 @@ struct user_sdma_request { */ u32 tidoffset; /* - * We copy the iovs for this request (based on - * info.iovcnt). These are only the data vectors + * KDETH.Offset (Eager) field + * We need to remember the initial value so the headers + * can be updated properly. */ - unsigned data_iovs; - /* total length of the data in the request */ - u32 data_len; - /* progress index moving along the iovs array */ - unsigned iov_idx; - struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ]; - /* number of elements copied to the tids array */ - u16 n_tids; - /* TID array values copied from the tid_iov vector */ - u32 *tids; - u16 tididx; + u32 koffset; u32 sent; - u64 seqnum; - u64 seqcomp; - u64 seqsubmitted; - struct list_head txps; - unsigned long flags; - /* status of the last txreq completed */ - int status; -}; + /* TID index copied from the tid_iov vector */ + u16 tididx; + /* progress index moving along the iovs array */ + u8 iov_idx; + + struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ]; +} ____cacheline_aligned_in_smp; /* * A single txreq could span up to 3 physical pages when the MTU @@ -1034,11 +1042,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) datalen); if (changes < 0) goto free_tx; - sdma_txinit_ahg(&tx->txreq, - SDMA_TXREQ_F_USE_AHG, - datalen, req->ahg_idx, changes, - req->ahg, sizeof(req->hdr), - user_sdma_txreq_cb); } } else { ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + @@ -1442,21 +1445,22 @@ done: } static int set_txreq_header_ahg(struct user_sdma_request *req, - struct user_sdma_txreq *tx, u32 len) + struct user_sdma_txreq *tx, u32 datalen) { + u32 ahg[AHG_KDETH_ARRAY_SIZE]; int diff = 0; u8 omfactor; /* KDETH.OM */ struct hfi1_user_sdma_pkt_q *pq = req->pq; struct hfi1_pkt_header *hdr = &req->hdr; u16 pbclen = le16_to_cpu(hdr->pbc[0]); - u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(len)); + u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen)); if (PBC2LRH(pbclen) != lrhlen) { /* PBC.PbcLengthDWs */ - AHG_HEADER_SET(req->ahg, diff, 0, 0, 12, + AHG_HEADER_SET(ahg, diff, 0, 0, 12, cpu_to_le16(LRH2PBC(lrhlen))); /* LRH.PktLen (we need the full 16 bits due to byte swap) */ - AHG_HEADER_SET(req->ahg, diff, 3, 0, 16, + AHG_HEADER_SET(ahg, diff, 3, 0, 16, cpu_to_be16(lrhlen >> 2)); } @@ -1468,13 +1472,12 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff); if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK)) val32 |= 1UL << 31; - AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16)); - AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff)); + AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16)); + AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff)); /* KDETH.Offset */ - AHG_HEADER_SET(req->ahg, diff, 15, 0, 16, + AHG_HEADER_SET(ahg, diff, 15, 0, 16, cpu_to_le16(req->koffset & 0xffff)); - AHG_HEADER_SET(req->ahg, diff, 15, 16, 16, - cpu_to_le16(req->koffset >> 16)); + AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16)); if (req_opcode(req->info.ctrl) == EXPECTED) { __le16 val; @@ -1492,9 +1495,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, * we have to check again. */ if (++req->tididx > req->n_tids - 1 || - !req->tids[req->tididx]) { + !req->tids[req->tididx]) return -EINVAL; - } tidval = req->tids[req->tididx]; } omfactor = ((EXP_TID_GET(tidval, LEN) * @@ -1502,7 +1504,7 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT : KDETH_OM_SMALL_SHIFT; /* KDETH.OM and KDETH.OFFSET (TID) */ - AHG_HEADER_SET(req->ahg, diff, 7, 0, 16, + AHG_HEADER_SET(ahg, diff, 7, 0, 16, ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 | ((req->tidoffset >> omfactor) & 0x7fff))); @@ -1522,12 +1524,20 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, AHG_KDETH_INTR_SHIFT)); } - AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); + AHG_HEADER_SET(ahg, diff, 7, 16, 14, val); } + if (diff < 0) + return diff; trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx, req->sde->this_idx, - req->ahg_idx, req->ahg, diff, tidval); + req->ahg_idx, ahg, diff, tidval); + sdma_txinit_ahg(&tx->txreq, + SDMA_TXREQ_F_USE_AHG, + datalen, req->ahg_idx, diff, + ahg, sizeof(req->hdr), + user_sdma_txreq_cb); + return diff; } From 721c462123b4b53745c1ccd99619b16e8f4e091b Mon Sep 17 00:00:00 2001 From: "Michael J. Ruhl" Date: Fri, 26 May 2017 05:35:25 -0700 Subject: [PATCH 0098/1795] IB/hfi1: Name function prototype parameters for affinity module To improve the readability of function prototypes, give the parameters names in the affinity module. Reviewed-by: Sebastian Sanchez Signed-off-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/affinity.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h index e78c7aa094e0..2a1e374169c0 100644 --- a/drivers/infiniband/hw/hfi1/affinity.h +++ b/drivers/infiniband/hw/hfi1/affinity.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2017 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -75,24 +75,26 @@ struct hfi1_msix_entry; /* Initialize non-HT cpu cores mask */ void init_real_cpu_mask(void); /* Initialize driver affinity data */ -int hfi1_dev_affinity_init(struct hfi1_devdata *); +int hfi1_dev_affinity_init(struct hfi1_devdata *dd); /* * Set IRQ affinity to a CPU. The function will determine the * CPU and set the affinity to it. */ -int hfi1_get_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *); +int hfi1_get_irq_affinity(struct hfi1_devdata *dd, + struct hfi1_msix_entry *msix); /* * Remove the IRQ's CPU affinity. This function also updates * any internal CPU tracking data */ -void hfi1_put_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *); +void hfi1_put_irq_affinity(struct hfi1_devdata *dd, + struct hfi1_msix_entry *msix); /* * Determine a CPU affinity for a user process, if the process does not * have an affinity set yet. */ -int hfi1_get_proc_affinity(int); +int hfi1_get_proc_affinity(int node); /* Release a CPU used by a user process. */ -void hfi1_put_proc_affinity(int); +void hfi1_put_proc_affinity(int cpu); struct hfi1_affinity_node { int node; From bb7dde8784913c06ccd1456bed6dcc5ebd0b3c24 Mon Sep 17 00:00:00 2001 From: "Michael J. Ruhl" Date: Fri, 26 May 2017 05:35:31 -0700 Subject: [PATCH 0099/1795] IB/hfi1: Replace deprecated pci functions with new API pci_enable_msix_range() and pci_disable_msix() have been deprecated. Updating to the new pci_alloc_irq_vectors() interface. Reviewed-by: Sebastian Sanchez Signed-off-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/affinity.c | 18 ++--- drivers/infiniband/hw/hfi1/chip.c | 105 +++++++++++++------------- drivers/infiniband/hw/hfi1/hfi.h | 6 +- drivers/infiniband/hw/hfi1/pcie.c | 80 ++++---------------- 4 files changed, 78 insertions(+), 131 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index e2cd2cd3b28a..a97055dd4fbd 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2017 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -335,10 +335,10 @@ static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu) sde->cpu = cpu; cpumask_clear(&msix->mask); cpumask_set_cpu(cpu, &msix->mask); - dd_dev_dbg(dd, "IRQ vector: %u, type %s engine %u -> cpu: %d\n", - msix->msix.vector, irq_type_names[msix->type], + dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n", + msix->irq, irq_type_names[msix->type], sde->this_idx, cpu); - irq_set_affinity_hint(msix->msix.vector, &msix->mask); + irq_set_affinity_hint(msix->irq, &msix->mask); /* * Set the new cpu in the hfi1_affinity_node and clean @@ -387,7 +387,7 @@ static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix) { struct irq_affinity_notify *notify = &msix->notify; - notify->irq = msix->msix.vector; + notify->irq = msix->irq; notify->notify = hfi1_irq_notifier_notify; notify->release = hfi1_irq_notifier_release; @@ -472,10 +472,10 @@ static int get_irq_affinity(struct hfi1_devdata *dd, } cpumask_set_cpu(cpu, &msix->mask); - dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n", - msix->msix.vector, irq_type_names[msix->type], + dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n", + msix->irq, irq_type_names[msix->type], extra, cpu); - irq_set_affinity_hint(msix->msix.vector, &msix->mask); + irq_set_affinity_hint(msix->irq, &msix->mask); if (msix->type == IRQ_SDMA) { sde->cpu = cpu; @@ -533,7 +533,7 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd, } } - irq_set_affinity_hint(msix->msix.vector, NULL); + irq_set_affinity_hint(msix->irq, NULL); cpumask_clear(&msix->mask); mutex_unlock(&node_affinity.lock); } diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 5dbee3c1bd45..9118618d28e5 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12800,30 +12800,24 @@ static void clean_up_interrupts(struct hfi1_devdata *dd) for (i = 0; i < dd->num_msix_entries; i++, me++) { if (!me->arg) /* => no irq, no affinity */ continue; - hfi1_put_irq_affinity(dd, &dd->msix_entries[i]); - free_irq(me->msix.vector, me->arg); + hfi1_put_irq_affinity(dd, me); + free_irq(me->irq, me->arg); } + + /* clean structures */ + kfree(dd->msix_entries); + dd->msix_entries = NULL; + dd->num_msix_entries = 0; } else { /* INTx */ if (dd->requested_intx_irq) { free_irq(dd->pcidev->irq, dd); dd->requested_intx_irq = 0; } - } - - /* turn off interrupts */ - if (dd->num_msix_entries) { - /* MSI-X */ - pci_disable_msix(dd->pcidev); - } else { - /* INTx */ disable_intx(dd->pcidev); } - /* clean structures */ - kfree(dd->msix_entries); - dd->msix_entries = NULL; - dd->num_msix_entries = 0; + pci_free_irq_vectors(dd->pcidev); } /* @@ -12972,13 +12966,21 @@ static int request_msix_irqs(struct hfi1_devdata *dd) continue; /* make sure the name is terminated */ me->name[sizeof(me->name) - 1] = 0; + me->irq = pci_irq_vector(dd->pcidev, i); + /* + * On err return me->irq. Don't need to clear this + * because 'arg' has not been set, and cleanup will + * do the right thing. + */ + if (me->irq < 0) + return me->irq; - ret = request_threaded_irq(me->msix.vector, handler, thread, 0, + ret = request_threaded_irq(me->irq, handler, thread, 0, me->name, arg); if (ret) { dd_dev_err(dd, - "unable to allocate %s interrupt, vector %d, index %d, err %d\n", - err_info, me->msix.vector, idx, ret); + "unable to allocate %s interrupt, irq %d, index %d, err %d\n", + err_info, me->irq, idx, ret); return ret; } /* @@ -12989,8 +12991,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd) ret = hfi1_get_irq_affinity(dd, me); if (ret) - dd_dev_err(dd, - "unable to pin IRQ %d\n", ret); + dd_dev_err(dd, "unable to pin IRQ %d\n", ret); } return ret; @@ -13009,7 +13010,7 @@ void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd) struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i]; struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr]; - synchronize_irq(me->msix.vector); + synchronize_irq(me->irq); } } @@ -13022,7 +13023,7 @@ void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd) return; hfi1_put_irq_affinity(dd, me); - free_irq(me->msix.vector, me->arg); + free_irq(me->irq, me->arg); me->arg = NULL; } @@ -13050,14 +13051,19 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd) DRIVER_NAME "_%d kctxt%d", dd->unit, idx); me->name[sizeof(me->name) - 1] = 0; me->type = IRQ_RCVCTXT; - + me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr); + if (me->irq < 0) { + dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n", + idx, me->irq); + return; + } remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr); - ret = request_threaded_irq(me->msix.vector, receive_context_interrupt, + ret = request_threaded_irq(me->irq, receive_context_interrupt, receive_context_thread, 0, me->name, arg); if (ret) { - dd_dev_err(dd, "vnic irq request (vector %d, idx %d) fail %d\n", - me->msix.vector, idx, ret); + dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n", + me->irq, idx, ret); return; } /* @@ -13070,7 +13076,7 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd) if (ret) { dd_dev_err(dd, "unable to pin IRQ %d\n", ret); - free_irq(me->msix.vector, me->arg); + free_irq(me->irq, me->arg); } } @@ -13093,9 +13099,8 @@ static void reset_interrupts(struct hfi1_devdata *dd) static int set_up_interrupts(struct hfi1_devdata *dd) { - struct hfi1_msix_entry *entries; - u32 total, request; - int i, ret; + u32 total; + int ret, request; int single_interrupt = 0; /* we expect to have all the interrupts */ /* @@ -13107,39 +13112,31 @@ static int set_up_interrupts(struct hfi1_devdata *dd) */ total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT; - entries = kcalloc(total, sizeof(*entries), GFP_KERNEL); - if (!entries) { - ret = -ENOMEM; - goto fail; - } - /* 1-1 MSI-X entry assignment */ - for (i = 0; i < total; i++) - entries[i].msix.entry = i; - /* ask for MSI-X interrupts */ - request = total; - request_msix(dd, &request, entries); - - if (request == 0) { + request = request_msix(dd, total); + if (request < 0) { + ret = request; + goto fail; + } else if (request == 0) { /* using INTx */ /* dd->num_msix_entries already zero */ - kfree(entries); single_interrupt = 1; dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n"); + } else if (request < total) { + /* using MSI-X, with reduced interrupts */ + dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n", + total, request); + ret = -EINVAL; + goto fail; } else { - /* using MSI-X */ - dd->num_msix_entries = request; - dd->msix_entries = entries; - - if (request != total) { - /* using MSI-X, with reduced interrupts */ - dd_dev_err( - dd, - "cannot handle reduced interrupt case, want %u, got %u\n", - total, request); - ret = -EINVAL; + dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries), + GFP_KERNEL); + if (!dd->msix_entries) { + ret = -ENOMEM; goto fail; } + /* using MSI-X */ + dd->num_msix_entries = total; dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total); } diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 9c6c73448461..8f74cf6d6c9a 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -521,7 +521,7 @@ static inline void incr_cntr32(u32 *cntr) #define MAX_NAME_SIZE 64 struct hfi1_msix_entry { enum irq_type type; - struct msix_entry msix; + int irq; void *arg; char name[MAX_NAME_SIZE]; cpumask_t mask; @@ -1838,9 +1838,7 @@ void hfi1_pcie_cleanup(struct pci_dev *pdev); int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev); void hfi1_pcie_ddcleanup(struct hfi1_devdata *); int pcie_speeds(struct hfi1_devdata *dd); -void request_msix(struct hfi1_devdata *dd, u32 *nent, - struct hfi1_msix_entry *entry); -void hfi1_enable_intx(struct pci_dev *pdev); +int request_msix(struct hfi1_devdata *dd, u32 msireq); void restore_pci_variables(struct hfi1_devdata *dd); int do_pcie_gen3_transition(struct hfi1_devdata *dd); int parse_platform_config(struct hfi1_devdata *dd); diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 6a9f6f9819e1..f01841b51946 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2017 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -240,50 +240,6 @@ void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd) iounmap(dd->piobase); } -static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt, - struct hfi1_msix_entry *hfi1_msix_entry) -{ - int ret; - int nvec = *msixcnt; - struct msix_entry *msix_entry; - int i; - - /* - * We can't pass hfi1_msix_entry array to msix_setup - * so use a dummy msix_entry array and copy the allocated - * irq back to the hfi1_msix_entry array. - */ - msix_entry = kmalloc_array(nvec, sizeof(*msix_entry), GFP_KERNEL); - if (!msix_entry) { - ret = -ENOMEM; - goto do_intx; - } - - for (i = 0; i < nvec; i++) - msix_entry[i] = hfi1_msix_entry[i].msix; - - ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec); - if (ret < 0) - goto free_msix_entry; - nvec = ret; - - for (i = 0; i < nvec; i++) - hfi1_msix_entry[i].msix = msix_entry[i]; - - kfree(msix_entry); - *msixcnt = nvec; - return; - -free_msix_entry: - kfree(msix_entry); - -do_intx: - dd_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n", - nvec, ret); - *msixcnt = 0; - hfi1_enable_intx(dd->pcidev); -} - /* return the PCIe link speed from the given link status */ static u32 extract_speed(u16 linkstat) { @@ -364,33 +320,29 @@ int pcie_speeds(struct hfi1_devdata *dd) } /* - * Returns in *nent: - * - actual number of interrupts allocated + * Returns: + * - actual number of interrupts allocated or * - 0 if fell back to INTx. + * - error */ -void request_msix(struct hfi1_devdata *dd, u32 *nent, - struct hfi1_msix_entry *entry) +int request_msix(struct hfi1_devdata *dd, u32 msireq) { - int pos; + int nvec; - pos = dd->pcidev->msix_cap; - if (*nent && pos) { - msix_setup(dd, pos, nent, entry); - /* did it, either MSI-X or INTx */ - } else { - *nent = 0; - hfi1_enable_intx(dd->pcidev); + nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq, + PCI_IRQ_MSIX | PCI_IRQ_LEGACY); + if (nvec < 0) { + dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", nvec); + return nvec; } tune_pcie_caps(dd); -} -void hfi1_enable_intx(struct pci_dev *pdev) -{ - /* first, turn on INTx */ - pci_intx(pdev, 1); - /* then turn off MSI-X */ - pci_disable_msix(pdev); + /* check for legacy IRQ */ + if (nvec == 1 && !dd->pcidev->msix_enabled) + return 0; + + return nvec; } /* restore command and BARs after a reset has wiped them out */ From cb49366f3616fdf197893c24a5b2677b8c26ce29 Mon Sep 17 00:00:00 2001 From: "Vishwanathapura, Niranjana" Date: Thu, 1 Jun 2017 17:04:02 -0700 Subject: [PATCH 0100/1795] IB/core,rdmavt,hfi1,opa-vnic: Send OPA cap_mask3 in trap Provide the ability for IB clients to modify the OPA specific capability mask and include this mask in the subsequent trap data. Reviewed-by: Niranjana Vishwanathapura Signed-off-by: Michael N. Henry Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/mad.c | 7 ++--- drivers/infiniband/hw/hfi1/mad.h | 2 +- drivers/infiniband/hw/hfi1/verbs.c | 6 ++++- drivers/infiniband/sw/rdmavt/vt.c | 9 +++++-- .../infiniband/ulp/opa_vnic/opa_vnic_vema.c | 27 ++++++++++++++++++- include/rdma/ib_verbs.h | 3 ++- include/rdma/rdma_vt.h | 1 + 7 files changed, 44 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 5977673a52d4..70831ad621b0 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -260,6 +260,7 @@ void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num) data.issuer_lid = cpu_to_be32(lid); data.ntc_144.lid = data.issuer_lid; data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags); + data.ntc_144.cap_mask3 = cpu_to_be16(ibp->rvp.port_cap3_flags); send_trap(ibp, &data, sizeof(data)); } @@ -704,11 +705,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT; pi->buffer_units = cpu_to_be32(buffer_units); - pi->opa_cap_mask = cpu_to_be16(OPA_CAP_MASK3_IsSharedSpaceSupported | - OPA_CAP_MASK3_IsEthOnFabricSupported); - /* Driver does not support mcast/collective configuration */ - pi->opa_cap_mask &= - cpu_to_be16(~OPA_CAP_MASK3_IsAddrRangeConfigSupported); + pi->opa_cap_mask = cpu_to_be16(ibp->rvp.port_cap3_flags); pi->collectivemask_multicastmask = ((HFI1_COLLECTIVE_NR & 0x7) << 3 | (HFI1_MCAST_NR & 0x7)); diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h index 5aa3fd1be653..a4e2506bd5ca 100644 --- a/drivers/infiniband/hw/hfi1/mad.h +++ b/drivers/infiniband/hw/hfi1/mad.h @@ -115,7 +115,7 @@ struct opa_mad_notice_attr { __be32 lid; /* LID where change occurred */ __be32 new_cap_mask; /* new capability mask */ __be16 reserved2; - __be16 cap_mask; + __be16 cap_mask3; __be16 change_flags; /* low 4 bits only */ } __packed ntc_144; diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index af54d3f4696a..2d7759f0c6b4 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1537,9 +1537,13 @@ static void init_ibport(struct hfi1_pportdata *ppd) /* Set the prefix to the default value (see ch. 4.1.1) */ ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX; ibp->rvp.sm_lid = 0; - /* Below should only set bits defined in OPA PortInfo.CapabilityMask */ + /* + * Below should only set bits defined in OPA PortInfo.CapabilityMask + * and PortInfo.CapabilityMask3 + */ ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP | IB_PORT_CAP_MASK_NOTICE_SUP; + ibp->rvp.port_cap3_flags = OPA_CAP_MASK3_IsSharedSpaceSupported; ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 0d7c6bb551d9..64bdd442078a 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -202,8 +202,13 @@ static int rvt_modify_port(struct ib_device *ibdev, u8 port_num, return -EINVAL; rvp = rdi->ports[port_index]; - rvp->port_cap_flags |= props->set_port_cap_mask; - rvp->port_cap_flags &= ~props->clr_port_cap_mask; + if (port_modify_mask & IB_PORT_OPA_MASK_CHG) { + rvp->port_cap3_flags |= props->set_port_cap_mask; + rvp->port_cap3_flags &= ~props->clr_port_cap_mask; + } else { + rvp->port_cap_flags |= props->set_port_cap_mask; + rvp->port_cap_flags &= ~props->clr_port_cap_mask; + } if (props->set_port_cap_mask || props->clr_port_cap_mask) rdi->driver_f.cap_mask_chg(rdi, port_num); diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c index 875694f9a7f9..32cdd7a35415 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c @@ -52,7 +52,9 @@ #include #include -#include +#include +#include +#include #include "opa_vnic_internal.h" @@ -979,6 +981,27 @@ static int vema_register(struct opa_vnic_ctrl_port *cport) return 0; } +/** + * opa_vnic_ctrl_config_dev -- This function sends a trap to the EM + * by way of ib_modify_port to indicate support for ethernet on the + * fabric. + * @cport: pointer to control port + * @en: enable or disable ethernet on fabric support + */ +static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en) +{ + struct ib_port_modify pm = { 0 }; + int i; + + if (en) + pm.set_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported; + else + pm.clr_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported; + + for (i = 1; i <= cport->num_ports; i++) + ib_modify_port(cport->ibdev, i, IB_PORT_OPA_MASK_CHG, &pm); +} + /** * opa_vnic_vema_add_one -- Handle new ib device * @device: ib device pointer @@ -1007,6 +1030,7 @@ static void opa_vnic_vema_add_one(struct ib_device *device) c_info("VNIC client initialized\n"); ib_set_client_data(device, &opa_vnic_client, cport); + opa_vnic_ctrl_config_dev(cport, true); } /** @@ -1025,6 +1049,7 @@ static void opa_vnic_vema_rem_one(struct ib_device *device, return; c_info("removing VNIC client\n"); + opa_vnic_ctrl_config_dev(cport, false); vema_unregister(cport); kfree(cport); } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 8f1ce4e27bbd..9d4d2a74c95e 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -577,7 +577,8 @@ struct ib_device_modify { enum ib_port_modify_flags { IB_PORT_SHUTDOWN = 1, IB_PORT_INIT_TYPE = (1<<2), - IB_PORT_RESET_QKEY_CNTR = (1<<3) + IB_PORT_RESET_QKEY_CNTR = (1<<3), + IB_PORT_OPA_MASK_CHG = (1<<4) }; struct ib_port_modify { diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index d0b9f91e5f4d..0f18ffd98dd7 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -75,6 +75,7 @@ struct rvt_ibport { __be64 mkey; u64 tid; u32 port_cap_flags; + u16 port_cap3_flags; u32 pma_sample_start; u32 pma_sample_interval; __be16 pma_counter_select[5]; From b888429c202197916c8c549811a2dd62f090280d Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Fri, 26 May 2017 05:35:44 -0700 Subject: [PATCH 0101/1795] IB/hfi1: Remove atomic SDMA_REQ_SEND_DONE bit operation The atomic SDMA_REQ_SEND_DONE bit is set by the process-level code, and then the same process-level code uses the bit to test that all packets have been submitted incurring a costly atomic read. Use a bool type with a READ_ONCE/WRITE_ONCE pairing for this bit, and use the same condition that is used to set the bit to test that all packets have been submitted. Reviewed-by: Mike Marciniszyn Signed-off-by: Sebastian Sanchez Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/user_sdma.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 92517cebb4c7..6fb70f0064a6 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -154,10 +154,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */ /* SDMA request flag bits */ -#define SDMA_REQ_FOR_THREAD 1 -#define SDMA_REQ_SEND_DONE 2 -#define SDMA_REQ_HAS_ERROR 3 -#define SDMA_REQ_DONE_ERROR 4 +#define SDMA_REQ_HAS_ERROR 1 +#define SDMA_REQ_DONE_ERROR 2 #define SDMA_PKT_Q_INACTIVE BIT(0) #define SDMA_PKT_Q_ACTIVE BIT(1) @@ -258,6 +256,7 @@ struct user_sdma_request { u16 tididx; /* progress index moving along the iovs array */ u8 iov_idx; + u8 done; struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ]; } ____cacheline_aligned_in_smp; @@ -628,6 +627,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, req->seqsubmitted = 0; req->flags = 0; req->tids = NULL; + req->done = 0; INIT_LIST_HEAD(&req->txps); memcpy(&req->info, &info, sizeof(info)); @@ -809,7 +809,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, * request have been submitted to the SDMA engine. However, it * will not wait for send completions. */ - while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) { + while (req->seqsubmitted != req->info.npkts) { ret = user_sdma_send_pkts(req, pcount); if (ret < 0) { if (ret != -EBUSY) { @@ -1118,7 +1118,7 @@ dosend: ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count); req->seqsubmitted += count; if (req->seqsubmitted == req->info.npkts) { - set_bit(SDMA_REQ_SEND_DONE, &req->flags); + WRITE_ONCE(req->done, 1); /* * The txreq has already been submitted to the HW queue * so we can free the AHG entry now. Corruption will not @@ -1585,7 +1585,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) if (status != SDMA_TXREQ_S_OK) req->status = status; if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) && - (test_bit(SDMA_REQ_SEND_DONE, &req->flags) || + (READ_ONCE(req->done) || test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) { user_sdma_free_request(req, false); pq_update(pq); From e9c48ebd0cb4d31bbaf60ddec2a2fa40227b8cb5 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Fri, 26 May 2017 05:35:50 -0700 Subject: [PATCH 0102/1795] IB/hfi1: Remove atomic SDMA_REQ_HAS_ERROR bit operation Atomic bit tests are used to single errors and the completion of request submissions. These operations don't need to be atomic and show to be expensive on the profile. Replace each atomic bit operation with a bool type and a READ_ONCE/WRITE_ONCE pairing. Reviewed-by: Mike Marciniszyn Signed-off-by: Sebastian Sanchez Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/user_sdma.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 6fb70f0064a6..fcadbb9978ca 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -153,10 +153,6 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 #define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */ #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */ -/* SDMA request flag bits */ -#define SDMA_REQ_HAS_ERROR 1 -#define SDMA_REQ_DONE_ERROR 2 - #define SDMA_PKT_Q_INACTIVE BIT(0) #define SDMA_PKT_Q_ACTIVE BIT(1) #define SDMA_PKT_Q_DEFERRED BIT(2) @@ -232,7 +228,6 @@ struct user_sdma_request { /* Writeable fields shared with interrupt */ u64 seqcomp ____cacheline_aligned_in_smp; u64 seqsubmitted; - unsigned long flags; /* status of the last txreq completed */ int status; @@ -257,6 +252,7 @@ struct user_sdma_request { /* progress index moving along the iovs array */ u8 iov_idx; u8 done; + u8 has_error; struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ]; } ____cacheline_aligned_in_smp; @@ -625,9 +621,9 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, req->seqnum = 0; req->seqcomp = 0; req->seqsubmitted = 0; - req->flags = 0; req->tids = NULL; req->done = 0; + req->has_error = 0; INIT_LIST_HEAD(&req->txps); memcpy(&req->info, &info, sizeof(info)); @@ -814,7 +810,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, if (ret < 0) { if (ret != -EBUSY) { req->status = ret; - set_bit(SDMA_REQ_DONE_ERROR, &req->flags); + WRITE_ONCE(req->has_error, 1); if (ACCESS_ONCE(req->seqcomp) == req->seqsubmitted - 1) goto free_req; @@ -916,10 +912,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) pq = req->pq; /* If tx completion has reported an error, we are done. */ - if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) { - set_bit(SDMA_REQ_DONE_ERROR, &req->flags); + if (READ_ONCE(req->has_error)) return -EFAULT; - } /* * Check if we might have sent the entire request already @@ -942,10 +936,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) * with errors. If so, we are not going to process any * more packets from this request. */ - if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) { - set_bit(SDMA_REQ_DONE_ERROR, &req->flags); + if (READ_ONCE(req->has_error)) return -EFAULT; - } tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL); if (!tx) @@ -1566,7 +1558,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) if (status != SDMA_TXREQ_S_OK) { SDMA_DBG(req, "SDMA completion with error %d", status); - set_bit(SDMA_REQ_HAS_ERROR, &req->flags); + WRITE_ONCE(req->has_error, 1); } req->seqcomp = tx->seqnum; @@ -1586,7 +1578,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) req->status = status; if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) && (READ_ONCE(req->done) || - test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) { + READ_ONCE(req->has_error))) { user_sdma_free_request(req, false); pq_update(pq); set_comp_state(pq, cq, idx, ERROR, req->status); From b4e9e2f0fc87b876a4e2162733c1940e861785b1 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Fri, 26 May 2017 05:35:57 -0700 Subject: [PATCH 0103/1795] IB/hfi1: Reclassify type of messages printed for platform config logic Reclassify messages printed out to /var/log/messages into warnings and errors to facilitate debugging in the future for issues related to the platform config logic. Reviewed-by: Mike Marciniszyn Signed-off-by: Sebastian Sanchez Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/platform.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c index 838fe84e285a..cbda9b189216 100644 --- a/drivers/infiniband/hw/hfi1/platform.c +++ b/drivers/infiniband/hw/hfi1/platform.c @@ -242,7 +242,7 @@ static int qual_power(struct hfi1_pportdata *ppd) if (ppd->offline_disabled_reason == HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) { - dd_dev_info( + dd_dev_err( ppd->dd, "%s: Port disabled due to system power restrictions\n", __func__); @@ -268,7 +268,7 @@ static int qual_bitrate(struct hfi1_pportdata *ppd) if (ppd->offline_disabled_reason == HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) { - dd_dev_info( + dd_dev_err( ppd->dd, "%s: Cable failed bitrate check, disabling port\n", __func__); @@ -709,15 +709,15 @@ static void apply_tunings( ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, GENERAL_CONFIG, config_data); if (ret != HCMD_SUCCESS) - dd_dev_info(ppd->dd, - "%s: Failed set ext device config params\n", - __func__); + dd_dev_err(ppd->dd, + "%s: Failed set ext device config params\n", + __func__); } if (tx_preset_index == OPA_INVALID_INDEX) { if (ppd->port_type == PORT_TYPE_QSFP && limiting_active) - dd_dev_info(ppd->dd, "%s: Invalid Tx preset index\n", - __func__); + dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n", + __func__); return; } @@ -900,7 +900,7 @@ static int tune_qsfp(struct hfi1_pportdata *ppd, case 0xD: /* fallthrough */ case 0xF: default: - dd_dev_info(ppd->dd, "%s: Unknown/unsupported cable\n", + dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n", __func__); break; } @@ -942,7 +942,7 @@ void tune_serdes(struct hfi1_pportdata *ppd) case PORT_TYPE_DISCONNECTED: ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED); - dd_dev_info(dd, "%s: Port disconnected, disabling port\n", + dd_dev_warn(dd, "%s: Port disconnected, disabling port\n", __func__); goto bail; case PORT_TYPE_FIXED: @@ -1027,7 +1027,7 @@ void tune_serdes(struct hfi1_pportdata *ppd) } break; default: - dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__); + dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__); ppd->port_type = PORT_TYPE_UNKNOWN; tuning_method = OPA_UNKNOWN_TUNING; total_atten = 0; From 90dba23e1e30af72dbf4379842a5161e811b26b8 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Fri, 26 May 2017 05:36:04 -0700 Subject: [PATCH 0104/1795] IB/hfi1: Fix up sdma_init function comment sdma_init does not take a number of sdma engine parameters, rather it initializes all of the sdma engines. Signed-off-by: Ira Weiny Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/sdma.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index bfd0d5187e9b..d82ff57214c5 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -1340,10 +1340,8 @@ static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) * @dd: hfi1_devdata * @port: port number (currently only zero) * - * sdma_init initializes the specified number of engines. - * - * The code initializes each sde, its csrs. Interrupts - * are not required to be enabled. + * Initializes each sde and its csrs. + * Interrupts are not required to be enabled. * * Returns: * 0 - success, -errno on failure From b2f8a04e77bad520d52b7f321ca776b33c947ad0 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Mon, 29 May 2017 17:17:28 -0700 Subject: [PATCH 0105/1795] IB/rdmavt: Remove duplicated functions The free_qpn() function from the hfi1/qib driver which was the basis for rdmavt_free_qpn() function was accidentally left in the code. Remove it. Reviewed-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rdmavt/qp.c | 37 ++++++++++++------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index a3dd1e536860..a372afbbfbef 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -422,15 +422,6 @@ bail: return ret; } -static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) -{ - struct rvt_qpn_map *map; - - map = qpt->map + qpn / RVT_BITS_PER_PAGE; - if (map->page) - clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); -} - /** * rvt_clear_mr_refs - Drop help mr refs * @qp: rvt qp data structure @@ -646,6 +637,19 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, lockdep_assert_held(&qp->s_lock); } +/** rvt_free_qpn - Free a qpn from the bit map + * @qpt: QP table + * @qpn: queue pair number to free + */ +static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) +{ + struct rvt_qpn_map *map; + + map = qpt->map + qpn / RVT_BITS_PER_PAGE; + if (map->page) + clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); +} + /** * rvt_create_qp - create a queue pair for a device * @ibpd: the protection domain who's device we create the queue pair for @@ -936,7 +940,7 @@ bail_ip: kref_put(&qp->ip->ref, rvt_release_mmap_info); bail_qpn: - free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); + rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); bail_rq_wq: if (!qp->ip) @@ -1325,19 +1329,6 @@ inval: return -EINVAL; } -/** rvt_free_qpn - Free a qpn from the bit map - * @qpt: QP table - * @qpn: queue pair number to free - */ -static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) -{ - struct rvt_qpn_map *map; - - map = qpt->map + qpn / RVT_BITS_PER_PAGE; - if (map->page) - clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); -} - /** * rvt_destroy_qp - destroy a queue pair * @ibqp: the queue pair to destroy From bc54f6714c3a5d1f7ac6e7e5a5f7c390b1a01285 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Mon, 29 May 2017 17:18:14 -0700 Subject: [PATCH 0106/1795] IB/hfi1: Ensure dd->gi_mask can not be overflowed As the code stands today the array access in remap_intr() is OK. To future proof the code though we should explicitly check to ensure the index value is not outside of the valid range. This is not a straight forward calculation so err on the side of caution. Reviewed-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/chip.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 9118618d28e5..d6af715c35bf 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12832,7 +12832,12 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) /* clear from the handled mask of the general interrupt */ m = isrc / 64; n = isrc % 64; - dd->gi_mask[m] &= ~((u64)1 << n); + if (likely(m < CCE_NUM_INT_CSRS)) { + dd->gi_mask[m] &= ~((u64)1 << n); + } else { + dd_dev_err(dd, "remap interrupt err\n"); + return; + } /* direct the chip source to the given MSI-X interrupt */ m = isrc / 8; From 67838e64fa63415fe4e0da7149bcb123ed9f5612 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Mon, 29 May 2017 17:18:46 -0700 Subject: [PATCH 0107/1795] IB/hfi1: Fix spelling mistake in linkdown reason Spell receive correctly in OPA_LINKDOWN_REASON_RCV_ERROR Reviewed-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/chip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index d6af715c35bf..99c29ddcca35 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -6906,7 +6906,7 @@ static void reset_neighbor_info(struct hfi1_pportdata *ppd) static const char * const link_down_reason_strs[] = { [OPA_LINKDOWN_REASON_NONE] = "None", - [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0", + [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0", [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length", [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long", [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short", From 6c31e5283cb06b81a13cc88da2f2ad3db594a935 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Mon, 29 May 2017 17:19:21 -0700 Subject: [PATCH 0108/1795] IB/hfi1: Use QPN mask to avoid overflow Ensure we can't come up with an array size that is bigger than the array by applying the QPN mask before the divide in the free_qpn function. Reviewed-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rdmavt/qp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index a372afbbfbef..2ce0928dddd6 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -645,7 +645,7 @@ static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) { struct rvt_qpn_map *map; - map = qpt->map + qpn / RVT_BITS_PER_PAGE; + map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE; if (map->page) clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); } From 52d86e72c515ebd4fb0d329470aa50698e28fb36 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Mon, 29 May 2017 17:19:46 -0700 Subject: [PATCH 0109/1795] IB/hfi1: Remove subtraction of uninitialized value In process_receive_packet the packet header field is used to calculate the length of the packet. However this is not necessarily setup. In fact only if the ECN prescan is enabled will the packet header be valid at this point. The code works as is because we do not do anything with the packet length at this point in the packet processing. The length and header are setup correctly in hfi1_setup_ib_header which is called by the following sequence: process_receive_packet() -> rhf_receieve_function_map[]() --> process_receive_ib() ---> hfi1_setup_9B_packet() ----> hfi1_setup_ib_header() Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/driver.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 2a1022e374a5..9e59430bc55c 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -703,7 +703,6 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) packet->etype = rhf_rcv_type(packet->rhf); - packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; /* total length */ packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ /* retrieve eager buffer details */ From f1685179820bb7f9d9c4f5d0ac9bfb269529a919 Mon Sep 17 00:00:00 2001 From: Neel Desai Date: Mon, 29 May 2017 17:20:27 -0700 Subject: [PATCH 0110/1795] IB/hfi1: Add error checking for buffer overrun in OPA aggregate Improve safety of code by checking the size of the data buffer and prevent buffer overrun Reviewed-by: Dennis Dalessandro Reviewed-by: Brian Welty Signed-off-by: Neel Desai Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/mad.c | 296 +++++++++++++++++++------------ 1 file changed, 185 insertions(+), 111 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 70831ad621b0..b180dff5f2e9 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -59,6 +59,14 @@ #define OPA_LINK_WIDTH_RESET_OLD 0x0fff #define OPA_LINK_WIDTH_RESET 0xffff +static int smp_length_check(u32 data_size, u32 request_len) +{ + if (unlikely(request_len < data_size)) + return -EINVAL; + + return 0; +} + static int reply(struct ib_mad_hdr *smp) { /* @@ -308,11 +316,11 @@ void hfi1_node_desc_chg(struct hfi1_ibport *ibp) static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, - u8 port, u32 *resp_len) + u8 port, u32 *resp_len, u32 max_len) { struct opa_node_description *nd; - if (am) { + if (am || smp_length_check(sizeof(*nd), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -329,7 +337,7 @@ static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am, static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct opa_node_info *ni; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); @@ -339,6 +347,7 @@ static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data, /* GUID 0 is illegal */ if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 || + smp_length_check(sizeof(*ni), max_len) || get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); @@ -520,7 +529,7 @@ void read_ltp_rtt(struct hfi1_devdata *dd) static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { int i; struct hfi1_devdata *dd; @@ -536,7 +545,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, u32 buffer_units; u64 tmp = 0; - if (num_ports != 1) { + if (num_ports != 1 || smp_length_check(sizeof(*pi), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -745,7 +754,7 @@ static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u32 n_blocks_req = OPA_AM_NBLK(am); @@ -768,6 +777,11 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16); + if (smp_length_check(size, max_len)) { + smp->status |= IB_SMP_INVALID_FIELD; + return reply((struct ib_mad_hdr *)smp); + } + if (start_block + n_blocks_req > n_blocks_avail || n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) { pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; " @@ -1071,7 +1085,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp, */ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct opa_port_info *pi = (struct opa_port_info *)data; struct ib_event event; @@ -1092,7 +1106,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, int ret, i, invalid = 0, call_set_mtu = 0; int call_link_downgrade_policy = 0; - if (num_ports != 1) { + if (num_ports != 1 || + smp_length_check(sizeof(*pi), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1343,7 +1358,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, if (ret) return ret; - ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len); + ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len, + max_len); /* restore re-reg bit per o14-12.2.1 */ pi->clientrereg_subnettimeout |= clientrereg; @@ -1360,7 +1376,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, return ret; get_only: - return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len); + return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len, + max_len); } /** @@ -1421,7 +1438,7 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u32 n_blocks_sent = OPA_AM_NBLK(am); @@ -1431,6 +1448,7 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, int i; u16 n_blocks_avail; unsigned npkeys = hfi1_get_npkeys(dd); + u32 size = 0; if (n_blocks_sent == 0) { pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n", @@ -1441,6 +1459,13 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1; + size = sizeof(u16) * (n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE); + + if (smp_length_check(size, max_len)) { + smp->status |= IB_SMP_INVALID_FIELD; + return reply((struct ib_mad_hdr *)smp); + } + if (start_block + n_blocks_sent > n_blocks_avail || n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) { pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n", @@ -1458,7 +1483,8 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, return reply((struct ib_mad_hdr *)smp); } - return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len); + return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len, + max_len); } #define ILLEGAL_VL 12 @@ -1519,14 +1545,14 @@ static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data) static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct hfi1_ibport *ibp = to_iport(ibdev, port); u8 *p = data; size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */ unsigned i; - if (am) { + if (am || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1542,14 +1568,15 @@ static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data, static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct hfi1_ibport *ibp = to_iport(ibdev, port); u8 *p = data; + size_t size = ARRAY_SIZE(ibp->sl_to_sc); int i; u8 sc; - if (am) { + if (am || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1564,19 +1591,20 @@ static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data, } } - return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len); + return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len, + max_len); } static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct hfi1_ibport *ibp = to_iport(ibdev, port); u8 *p = data; size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */ unsigned i; - if (am) { + if (am || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1592,13 +1620,14 @@ static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data, static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct hfi1_ibport *ibp = to_iport(ibdev, port); + size_t size = ARRAY_SIZE(ibp->sc_to_sl); u8 *p = data; int i; - if (am) { + if (am || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1606,19 +1635,20 @@ static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data, for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++) ibp->sc_to_sl[i] = *p++; - return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len); + return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len, + max_len); } static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { u32 n_blocks = OPA_AM_NBLK(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); void *vp = (void *)data; size_t size = 4 * sizeof(u64); - if (n_blocks != 1) { + if (n_blocks != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1633,7 +1663,7 @@ static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { u32 n_blocks = OPA_AM_NBLK(am); int async_update = OPA_AM_ASYNC(am); @@ -1641,8 +1671,15 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, void *vp = (void *)data; struct hfi1_pportdata *ppd; int lstate; + /* + * set_sc2vlt_tables writes the information contained in *data + * to four 64-bit registers SendSC2VLt[0-3]. We need to make + * sure *max_len is not greater than the total size of the four + * SendSC2VLt[0-3] registers. + */ + size_t size = 4 * sizeof(u64); - if (n_blocks != 1 || async_update) { + if (n_blocks != 1 || async_update || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1662,27 +1699,28 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, set_sc2vlt_tables(dd, vp); - return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len); + return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len, + max_len); } static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { u32 n_blocks = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; void *vp = (void *)data; - int size; + int size = sizeof(struct sc2vlnt); - if (n_blocks != 1) { + if (n_blocks != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } ppd = dd->pport + (port - 1); - size = fm_get_table(ppd, FM_TBL_SC2VLNT, vp); + fm_get_table(ppd, FM_TBL_SC2VLNT, vp); if (resp_len) *resp_len += size; @@ -1692,15 +1730,16 @@ static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { u32 n_blocks = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; void *vp = (void *)data; int lstate; + int size = sizeof(struct sc2vlnt); - if (n_blocks != 1) { + if (n_blocks != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1718,12 +1757,12 @@ static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, fm_set_table(ppd, FM_TBL_SC2VLNT, vp); return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); } static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { u32 nports = OPA_AM_NPORT(am); u32 start_of_sm_config = OPA_AM_START_SM_CFG(am); @@ -1732,7 +1771,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, struct hfi1_pportdata *ppd; struct opa_port_state_info *psi = (struct opa_port_state_info *)data; - if (nports != 1) { + if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1765,7 +1804,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { u32 nports = OPA_AM_NPORT(am); u32 start_of_sm_config = OPA_AM_START_SM_CFG(am); @@ -1776,7 +1815,7 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data, struct opa_port_state_info *psi = (struct opa_port_state_info *)data; int ret, invalid = 0; - if (nports != 1) { + if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1806,19 +1845,21 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data, if (invalid) smp->status |= IB_SMP_INVALID_FIELD; - return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len); + return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len, + max_len); } static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u32 addr = OPA_AM_CI_ADDR(am); u32 len = OPA_AM_CI_LEN(am) + 1; int ret; - if (dd->pport->port_type != PORT_TYPE_QSFP) { + if (dd->pport->port_type != PORT_TYPE_QSFP || + smp_length_check(len, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1861,21 +1902,22 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data, } static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data, - struct ib_device *ibdev, u8 port, u32 *resp_len) + struct ib_device *ibdev, u8 port, u32 *resp_len, + u32 max_len) { u32 num_ports = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; struct buffer_control *p = (struct buffer_control *)data; - int size; + int size = sizeof(struct buffer_control); - if (num_ports != 1) { + if (num_ports != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } ppd = dd->pport + (port - 1); - size = fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p); + fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p); trace_bct_get(dd, p); if (resp_len) *resp_len += size; @@ -1884,14 +1926,15 @@ static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data, } static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data, - struct ib_device *ibdev, u8 port, u32 *resp_len) + struct ib_device *ibdev, u8 port, u32 *resp_len, + u32 max_len) { u32 num_ports = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; struct buffer_control *p = (struct buffer_control *)data; - if (num_ports != 1) { + if (num_ports != 1 || smp_length_check(sizeof(*p), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1902,41 +1945,43 @@ static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data, return reply((struct ib_mad_hdr *)smp); } - return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len); + return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len, + max_len); } static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); u32 num_ports = OPA_AM_NPORT(am); u8 section = (am & 0x00ff0000) >> 16; u8 *p = data; - int size = 0; + int size = 256; - if (num_ports != 1) { + if (num_ports != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } switch (section) { case OPA_VLARB_LOW_ELEMENTS: - size = fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p); + fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p); break; case OPA_VLARB_HIGH_ELEMENTS: - size = fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p); + fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p); break; case OPA_VLARB_PREEMPT_ELEMENTS: - size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p); + fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p); break; case OPA_VLARB_PREEMPT_MATRIX: - size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p); + fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p); break; default: pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n", be32_to_cpu(smp->attr_mod)); smp->status |= IB_SMP_INVALID_FIELD; + size = 0; break; } @@ -1948,14 +1993,15 @@ static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); u32 num_ports = OPA_AM_NPORT(am); u8 section = (am & 0x00ff0000) >> 16; u8 *p = data; + int size = 256; - if (num_ports != 1) { + if (num_ports != 1 || smp_length_check(size, max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -1983,7 +2029,8 @@ static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, break; } - return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len); + return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len, + max_len); } struct opa_pma_mad { @@ -3279,13 +3326,18 @@ struct opa_congestion_info_attr { static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct opa_congestion_info_attr *p = (struct opa_congestion_info_attr *)data; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); + if (smp_length_check(sizeof(*p), max_len)) { + smp->status |= IB_SMP_INVALID_FIELD; + return reply((struct ib_mad_hdr *)smp); + } + p->congestion_info = 0; p->control_table_cap = ppd->cc_max_table_entries; p->congestion_log_length = OPA_CONG_LOG_ELEMS; @@ -3298,7 +3350,7 @@ static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data, static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, - u8 port, u32 *resp_len) + u8 port, u32 *resp_len, u32 max_len) { int i; struct opa_congestion_setting_attr *p = @@ -3308,6 +3360,11 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am, struct opa_congestion_setting_entry_shadow *entries; struct cc_state *cc_state; + if (smp_length_check(sizeof(*p), max_len)) { + smp->status |= IB_SMP_INVALID_FIELD; + return reply((struct ib_mad_hdr *)smp); + } + rcu_read_lock(); cc_state = get_cc_state(ppd); @@ -3382,7 +3439,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd) static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct opa_congestion_setting_attr *p = (struct opa_congestion_setting_attr *)data; @@ -3391,6 +3448,11 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, struct opa_congestion_setting_entry_shadow *entries; int i; + if (smp_length_check(sizeof(*p), max_len)) { + smp->status |= IB_SMP_INVALID_FIELD; + return reply((struct ib_mad_hdr *)smp); + } + /* * Save details from packet into the ppd. Hold the cc_state_lock so * our information is consistent with anyone trying to apply the state. @@ -3412,12 +3474,12 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, apply_cc_state(ppd); return __subn_get_opa_cong_setting(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); } static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, - u8 port, u32 *resp_len) + u8 port, u32 *resp_len, u32 max_len) { struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); @@ -3425,7 +3487,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am, s64 ts; int i; - if (am != 0) { + if (am || smp_length_check(sizeof(*cong_log), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -3483,7 +3545,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am, static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct ib_cc_table_attr *cc_table_attr = (struct ib_cc_table_attr *)data; @@ -3495,9 +3557,10 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, int i, j; u32 sentry, eentry; struct cc_state *cc_state; + u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1); /* sanity check n_blocks, start_block */ - if (n_blocks == 0 || + if (n_blocks == 0 || smp_length_check(size, max_len) || start_block + n_blocks > ppd->cc_max_table_entries) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); @@ -3527,14 +3590,14 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, rcu_read_unlock(); if (resp_len) - *resp_len += sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1); + *resp_len += size; return reply((struct ib_mad_hdr *)smp); } static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data; struct hfi1_ibport *ibp = to_iport(ibdev, port); @@ -3545,9 +3608,10 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, int i, j; u32 sentry, eentry; u16 ccti_limit; + u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1); /* sanity check n_blocks, start_block */ - if (n_blocks == 0 || + if (n_blocks == 0 || smp_length_check(size, max_len) || start_block + n_blocks > ppd->cc_max_table_entries) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); @@ -3578,7 +3642,8 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, /* now apply the information */ apply_cc_state(ppd); - return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len); + return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len, + max_len); } struct opa_led_info { @@ -3591,7 +3656,7 @@ struct opa_led_info { static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd = dd->pport; @@ -3599,7 +3664,7 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, u32 nport = OPA_AM_NPORT(am); u32 is_beaconing_active; - if (nport != 1) { + if (nport != 1 || smp_length_check(sizeof(*p), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -3621,14 +3686,14 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct opa_led_info *p = (struct opa_led_info *)data; u32 nport = OPA_AM_NPORT(am); int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK); - if (nport != 1) { + if (nport != 1 || smp_length_check(sizeof(*p), max_len)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -3638,12 +3703,13 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, else shutdown_led_override(dd->pport); - return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len); + return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len, + max_len); } static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { int ret; struct hfi1_ibport *ibp = to_iport(ibdev, port); @@ -3651,71 +3717,71 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, switch (attr_id) { case IB_SMP_ATTR_NODE_DESC: ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case IB_SMP_ATTR_NODE_INFO: ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case IB_SMP_ATTR_PORT_INFO: ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case IB_SMP_ATTR_PKEY_TABLE: ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_SL_TO_SC_MAP: ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_SL_MAP: ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_VLT_MAP: ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_VLNT_MAP: ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_PORT_STATE_INFO: ret = __subn_get_opa_psi(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE: ret = __subn_get_opa_bct(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_CABLE_INFO: ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case IB_SMP_ATTR_VL_ARB_TABLE: ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_CONGESTION_INFO: ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING: ret = __subn_get_opa_cong_setting(smp, am, data, ibdev, - port, resp_len); + port, resp_len, max_len); break; case OPA_ATTRIB_ID_HFI_CONGESTION_LOG: ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev, - port, resp_len); + port, resp_len, max_len); break; case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE: ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case IB_SMP_ATTR_LED_INFO: ret = __subn_get_opa_led_info(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case IB_SMP_ATTR_SM_INFO: if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) @@ -3733,7 +3799,7 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, - u32 *resp_len) + u32 *resp_len, u32 max_len) { int ret; struct hfi1_ibport *ibp = to_iport(ibdev, port); @@ -3741,51 +3807,51 @@ static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, switch (attr_id) { case IB_SMP_ATTR_PORT_INFO: ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case IB_SMP_ATTR_PKEY_TABLE: ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_SL_TO_SC_MAP: ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_SL_MAP: ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_VLT_MAP: ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_SC_TO_VLNT_MAP: ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_PORT_STATE_INFO: ret = __subn_set_opa_psi(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE: ret = __subn_set_opa_bct(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case IB_SMP_ATTR_VL_ARB_TABLE: ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING: ret = __subn_set_opa_cong_setting(smp, am, data, ibdev, - port, resp_len); + port, resp_len, max_len); break; case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE: ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case IB_SMP_ATTR_LED_INFO: ret = __subn_set_opa_led_info(smp, am, data, ibdev, port, - resp_len); + resp_len, max_len); break; case IB_SMP_ATTR_SM_INFO: if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) @@ -3841,7 +3907,10 @@ static int subn_get_opa_aggregate(struct opa_smp *smp, memset(next_smp + sizeof(*agg), 0, agg_data_len); (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data, - ibdev, port, NULL); + ibdev, port, NULL, (u32)agg_data_len); + + if (smp->status & IB_SMP_INVALID_FIELD) + break; if (smp->status & ~IB_SMP_DIRECTION) { set_aggr_error(agg); return reply((struct ib_mad_hdr *)smp); @@ -3884,7 +3953,9 @@ static int subn_set_opa_aggregate(struct opa_smp *smp, } (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data, - ibdev, port, NULL); + ibdev, port, NULL, (u32)agg_data_len); + if (smp->status & IB_SMP_INVALID_FIELD) + break; if (smp->status & ~IB_SMP_DIRECTION) { set_aggr_error(agg); return reply((struct ib_mad_hdr *)smp); @@ -3994,12 +4065,13 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags, struct opa_smp *smp = (struct opa_smp *)out_mad; struct hfi1_ibport *ibp = to_iport(ibdev, port); u8 *data; - u32 am; + u32 am, data_size; __be16 attr_id; int ret; *out_mad = *in_mad; data = opa_get_smp_data(smp); + data_size = (u32)opa_get_smp_data_size(smp); am = be32_to_cpu(smp->attr_mod); attr_id = smp->attr_id; @@ -4043,7 +4115,8 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags, default: clear_opa_smp_data(smp); ret = subn_get_opa_sma(attr_id, smp, am, data, - ibdev, port, resp_len); + ibdev, port, resp_len, + data_size); break; case OPA_ATTRIB_ID_AGGREGATE: ret = subn_get_opa_aggregate(smp, ibdev, port, @@ -4055,7 +4128,8 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags, switch (attr_id) { default: ret = subn_set_opa_sma(attr_id, smp, am, data, - ibdev, port, resp_len); + ibdev, port, resp_len, + data_size); break; case OPA_ATTRIB_ID_AGGREGATE: ret = subn_set_opa_aggregate(smp, ibdev, port, From d54389836ac63cb517bf863b9f6c22626c6aec26 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Mon, 29 May 2017 17:20:53 -0700 Subject: [PATCH 0111/1795] IB/core: Allow QP state transition from reset to error Playing with IP-O-IB interface can trigger a warning message: "ib0: Failed to modify QP to ERROR state" to be logged. This happens when the QP is in IB_QPS_RESET state and the stack is trying to transition it to IB_QPS_ERR state in ipoib_ib_dev_stop(). According to the IB spec, Table 91 - "QP State Transition Properties" it looks like the transition from reset to error is valid: Transition: Any State to Error Required Attributes: None Optional Attributes: None allowed Actions: Queue processing is stopped. Work Requests pending or in process are completed in error, when possible. This patch allows the transition and quiets the message. Reviewed-by: Dennis Dalessandro Signed-off-by: Tadeusz Struk Signed-off-by: Dennis Dalessandro Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/verbs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 4792f5209ac2..b822bedeb4a5 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -872,6 +872,7 @@ static const struct { } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = { .valid = 1 }, + [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_INIT] = { .valid = 1, .req_param = { From bec7c79cd8f764ba84c8ec6d8c402b8a7cd3a54f Mon Sep 17 00:00:00 2001 From: "Byczkowski, Jakub" Date: Mon, 29 May 2017 17:21:32 -0700 Subject: [PATCH 0112/1795] IB/hfi1: Modify handling of physical link state by Host Driver Ensure states returned to the Fabric Manager are consistent with the OPA specification by caching the physical state along with the logical state. Reviewed-by: Stuart Summers Reviewed-by: Ira Weiny Reviewed-by: Andrzej Kotlowski Signed-off-by: Jakub Byczkowski Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/chip.c | 102 +++++++++++++++++++--------- drivers/infiniband/hw/hfi1/chip.h | 2 +- drivers/infiniband/hw/hfi1/driver.c | 8 ++- drivers/infiniband/hw/hfi1/hfi.h | 18 ++++- drivers/infiniband/hw/hfi1/mad.c | 6 +- drivers/infiniband/hw/hfi1/verbs.c | 2 +- 6 files changed, 97 insertions(+), 41 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 99c29ddcca35..3fae98d079e4 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1066,6 +1066,8 @@ static int thermal_init(struct hfi1_devdata *dd); static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, int msecs); +static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, + int msecs); static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc); static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr); static void handle_temp_err(struct hfi1_devdata *dd); @@ -10028,28 +10030,6 @@ static void set_lidlmc(struct hfi1_pportdata *ppd) sdma_update_lmc(dd, mask, ppd->lid); } -static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs) -{ - unsigned long timeout; - u32 curr_state; - - timeout = jiffies + msecs_to_jiffies(msecs); - while (1) { - curr_state = read_physical_state(dd); - if (curr_state == state) - break; - if (time_after(jiffies, timeout)) { - dd_dev_err(dd, - "timeout waiting for phy link state 0x%x, current state is 0x%x\n", - state, curr_state); - return -ETIMEDOUT; - } - usleep_range(1950, 2050); /* sleep 2ms-ish */ - } - - return 0; -} - static const char *state_completed_string(u32 completed) { static const char * const state_completed[] = { @@ -10283,7 +10263,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) if (do_wait) { /* it can take a while for the link to go down */ - ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000); + ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000); if (ret < 0) return ret; } @@ -10536,6 +10516,19 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) goto unexpected; } + /* + * Wait for Link_Up physical state. + * Physical and Logical states should already be + * be transitioned to LinkUp and LinkInit respectively. + */ + ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000); + if (ret) { + dd_dev_err(dd, + "%s: physical state did not change to LINK-UP\n", + __func__); + break; + } + ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000); if (ret) { dd_dev_err(dd, @@ -10649,6 +10642,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) */ if (ret) goto_offline(ppd, 0); + else + cache_physical_state(ppd); break; case HLS_DN_DISABLE: /* link is disabled */ @@ -10673,6 +10668,13 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) ret = -EINVAL; break; } + ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000); + if (ret) { + dd_dev_err(dd, + "%s: physical state did not change to DISABLED\n", + __func__); + break; + } dc_shutdown(dd); } ppd->host_link_state = HLS_DN_DISABLE; @@ -10690,6 +10692,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) if (ppd->host_link_state != HLS_DN_POLL) goto unexpected; ppd->host_link_state = HLS_VERIFY_CAP; + cache_physical_state(ppd); break; case HLS_GOING_UP: if (ppd->host_link_state != HLS_VERIFY_CAP) @@ -12663,21 +12666,56 @@ static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, return -ETIMEDOUT; } -u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd) +/* + * Read the physical hardware link state and set the driver's cached value + * of it. + */ +void cache_physical_state(struct hfi1_pportdata *ppd) { - u32 pstate; + u32 read_pstate; u32 ib_pstate; - pstate = read_physical_state(ppd->dd); - ib_pstate = chip_to_opa_pstate(ppd->dd, pstate); - if (ppd->last_pstate != ib_pstate) { + read_pstate = read_physical_state(ppd->dd); + ib_pstate = chip_to_opa_pstate(ppd->dd, read_pstate); + /* check if OPA pstate changed */ + if (chip_to_opa_pstate(ppd->dd, ppd->pstate) != ib_pstate) { dd_dev_info(ppd->dd, "%s: physical state changed to %s (0x%x), phy 0x%x\n", __func__, opa_pstate_name(ib_pstate), ib_pstate, - pstate); - ppd->last_pstate = ib_pstate; + read_pstate); } - return ib_pstate; + ppd->pstate = read_pstate; +} + +/* + * wait_physical_linkstate - wait for an physical link state change to occur + * @ppd: port device + * @state: the state to wait for + * @msecs: the number of milliseconds to wait + * + * Wait up to msecs milliseconds for physical link state change to occur. + * Returns 0 if state reached, otherwise -ETIMEDOUT. + */ +static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, + int msecs) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(msecs); + while (1) { + cache_physical_state(ppd); + if (ppd->pstate == state) + break; + if (time_after(jiffies, timeout)) { + dd_dev_err(ppd->dd, + "timeout waiting for phy link state 0x%x, current state is 0x%x\n", + state, ppd->pstate); + return -ETIMEDOUT; + } + usleep_range(1950, 2050); /* sleep 2ms-ish */ + } + + return 0; } #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ @@ -14781,7 +14819,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, /* start in offline */ ppd->host_link_state = HLS_DN_OFFLINE; init_vl_arb_caches(ppd); - ppd->last_pstate = 0xff; /* invalid value */ + ppd->pstate = PLS_OFFLINE; } dd->link_default = HLS_DN_POLL; diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index 0b4f418ba0ac..3dab3156ba4a 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -744,6 +744,7 @@ int is_bx(struct hfi1_devdata *dd); u32 read_physical_state(struct hfi1_devdata *dd); u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate); u32 get_logical_state(struct hfi1_pportdata *ppd); +void cache_physical_state(struct hfi1_pportdata *ppd); const char *opa_lstate_name(u32 lstate); const char *opa_pstate_name(u32 pstate); u32 driver_physical_state(struct hfi1_pportdata *ppd); @@ -1354,7 +1355,6 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd); void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt); u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp); u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp); -u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd); int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which); int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val); int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey); diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 9e59430bc55c..e64e9e28c936 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -906,10 +906,12 @@ static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, sc = hfi1_9B_get_sc5(hdr, packet->rhf); } if (sc != SC15_PACKET) { - int hwstate = read_logical_state(dd); + int hwstate = driver_lstate(rcd->ppd); - if (hwstate != LSTATE_ACTIVE) { - dd_dev_info(dd, "Unexpected link state %d\n", hwstate); + if (hwstate != IB_PORT_ACTIVE) { + dd_dev_info(dd, + "Unexpected link state %s\n", + opa_lstate_name(hwstate)); return 0; } diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 8f74cf6d6c9a..bca781c3b5ac 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -663,7 +663,7 @@ struct hfi1_pportdata { u8 link_enabled; /* link enabled? */ u8 linkinit_reason; u8 local_tx_rate; /* rate given to 8051 firmware */ - u8 last_pstate; /* info only */ + u8 pstate; /* info only */ u8 qsfp_retry_count; /* placeholders for IB MAD packet settings */ @@ -1330,6 +1330,22 @@ static inline u32 driver_lstate(struct hfi1_pportdata *ppd) return ppd->lstate; } +/* return the driver's idea of the physical OPA port state */ +static inline u32 driver_pstate(struct hfi1_pportdata *ppd) +{ + /* + * The driver does some processing from the time the physical + * link state is at LINKUP to the time the SM can be notified + * as such. Return IB_PORTPHYSSTATE_TRAINING until the software + * state is ready. + */ + if (ppd->pstate == PLS_LINKUP && + !(ppd->host_link_state & HLS_UP)) + return IB_PORTPHYSSTATE_TRAINING; + else + return chip_to_opa_pstate(ppd->dd, ppd->pstate); +} + void receive_interrupt_work(struct work_struct *work); /* extract service channel from header and rhf */ diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index b180dff5f2e9..c8daf633212d 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -113,7 +113,7 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) return; /* o14-3.2.1 */ - if (ppd_from_ibp(ibp)->lstate != IB_PORT_ACTIVE) + if (driver_lstate(ppd_from_ibp(ibp)) != IB_PORT_ACTIVE) return; /* o14-2 */ @@ -615,7 +615,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, ppd->offline_disabled_reason; pi->port_states.portphysstate_portstate = - (hfi1_ibphys_portstate(ppd) << 4) | state; + (driver_pstate(ppd) << 4) | state; pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc; @@ -1791,7 +1791,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, ppd->offline_disabled_reason; psi->port_states.portphysstate_portstate = - (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf); + (driver_pstate(ppd) << 4) | (lstate & 0xf); psi->link_width_downgrade_tx_active = cpu_to_be16(ppd->link_width_downgrade_tx_active); psi->link_width_downgrade_rx_active = diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 2d7759f0c6b4..5b53faf47042 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1354,7 +1354,7 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num, props->lmc = ppd->lmc; /* OPA logical states match IB logical states */ props->state = driver_lstate(ppd); - props->phys_state = hfi1_ibphys_portstate(ppd); + props->phys_state = driver_pstate(ppd); props->gid_tbl_len = HFI1_GUIDS_PER_PORT; props->active_width = (u8)opa_width_to_ib(ppd->link_width_active); /* see rate_show() in ib core/sysfs.c */ From 13d84914db56c1afd1c9bf4f41e9bf91f061a7dd Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Mon, 29 May 2017 17:22:01 -0700 Subject: [PATCH 0113/1795] IB/hfi1,qib: Do not send QKey trap for UD qps According to IBTA spec a QKey violation should not result in a bad qkey trap being triggered for UD queue pairs. Also since it is a silent error we do not increment the q_key violation or the dropped packet counters. Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/mad.c | 14 ++++----- drivers/infiniband/hw/hfi1/ruc.c | 8 ++--- drivers/infiniband/hw/hfi1/ud.c | 37 ++++++++--------------- drivers/infiniband/hw/hfi1/verbs.h | 4 +-- drivers/infiniband/hw/qib/qib_mad.c | 13 ++++---- drivers/infiniband/hw/qib/qib_ruc.c | 20 ++++++------- drivers/infiniband/hw/qib/qib_ud.c | 43 +++++++++------------------ drivers/infiniband/hw/qib/qib_verbs.h | 4 +-- 8 files changed, 54 insertions(+), 89 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index c8daf633212d..a081a98d728a 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -180,10 +180,10 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) } /* - * Send a bad [PQ]_Key trap (ch. 14.3.8). + * Send a bad P_Key trap (ch. 14.3.8). */ -void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl, - u32 qp1, u32 qp2, u16 lid1, u16 lid2) +void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl, + u32 qp1, u32 qp2, u16 lid1, u16 lid2) { struct opa_mad_notice_attr data; u32 lid = ppd_from_ibp(ibp)->lid; @@ -191,17 +191,13 @@ void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl, u32 _lid2 = lid2; memset(&data, 0, sizeof(data)); - - if (trap_num == OPA_TRAP_BAD_P_KEY) - ibp->rvp.pkey_violations++; - else - ibp->rvp.qkey_violations++; ibp->rvp.n_pkt_drops++; + ibp->rvp.pkey_violations++; /* Send violation trap */ data.generic_type = IB_NOTICE_TYPE_SECURITY; data.prod_type_lsb = IB_NOTICE_PROD_CA; - data.trap_num = trap_num; + data.trap_num = OPA_TRAP_BAD_P_KEY; data.issuer_lid = cpu_to_be32(lid); data.ntc_257_258.lid1 = cpu_to_be32(_lid1); data.ntc_257_258.lid2 = cpu_to_be32(_lid2); diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index 476fe5da2992..9cf506a9a796 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -254,8 +254,8 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet) } if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, sc5, slid))) { - hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, (u16)bth0, sl, - 0, qp->ibqp.qp_num, slid, dlid); + hfi1_bad_pkey(ibp, (u16)bth0, sl, + 0, qp->ibqp.qp_num, slid, dlid); return 1; } /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ @@ -290,8 +290,8 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet) } if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, sc5, slid))) { - hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, (u16)bth0, sl, - 0, qp->ibqp.qp_num, slid, dlid); + hfi1_bad_pkey(ibp, (u16)bth0, sl, + 0, qp->ibqp.qp_num, slid, dlid); return 1; } /* Validate the SLID. See Ch. 9.6.1.5 */ diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index c995aa58c36a..6bf7a1b08491 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -110,10 +110,10 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ((1 << ppd->lmc) - 1)); if (unlikely(ingress_pkey_check(ppd, pkey, sc5, qp->s_pkey_index, slid))) { - hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, pkey, - rdma_ah_get_sl(ah_attr), - sqp->ibqp.qp_num, qp->ibqp.qp_num, - slid, rdma_ah_get_dlid(ah_attr)); + hfi1_bad_pkey(ibp, pkey, + rdma_ah_get_sl(ah_attr), + sqp->ibqp.qp_num, qp->ibqp.qp_num, + slid, rdma_ah_get_dlid(ah_attr)); goto drop; } } @@ -128,18 +128,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) qkey = (int)swqe->ud_wr.remote_qkey < 0 ? sqp->qkey : swqe->ud_wr.remote_qkey; - if (unlikely(qkey != qp->qkey)) { - u16 lid; - - lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) & - ((1 << ppd->lmc) - 1)); - hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey, - rdma_ah_get_sl(ah_attr), - sqp->ibqp.qp_num, qp->ibqp.qp_num, - lid, - rdma_ah_get_dlid(ah_attr)); - goto drop; - } + if (unlikely(qkey != qp->qkey)) + goto drop; /* silently drop per IBTA spec */ } /* @@ -722,10 +712,10 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) * for invalid pkeys is optional according to * IB spec (release 1.3, section 10.9.4) */ - hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, - pkey, sl, - src_qp, qp->ibqp.qp_num, - slid, dlid); + hfi1_bad_pkey(ibp, + pkey, sl, + src_qp, qp->ibqp.qp_num, + slid, dlid); return; } } else { @@ -734,12 +724,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) if (mgmt_pkey_idx < 0) goto drop; } - if (unlikely(qkey != qp->qkey)) { - hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey, sl, - src_qp, qp->ibqp.qp_num, - slid, dlid); + if (unlikely(qkey != qp->qkey)) /* Silent drop */ return; - } + /* Drop invalid MAD packets (see 13.5.3.1). */ if (unlikely(qp->ibqp.qp_num == 1 && (tlen > 2048 || (sc5 == 0xF)))) diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h index 17b38cd5f654..fdf1e1fb880c 100644 --- a/drivers/infiniband/hw/hfi1/verbs.h +++ b/drivers/infiniband/hw/hfi1/verbs.h @@ -236,8 +236,8 @@ static inline int hfi1_send_ok(struct rvt_qp *qp) /* * This must be called with s_lock held. */ -void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl, - u32 qp1, u32 qp2, u16 lid1, u16 lid2); +void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl, + u32 qp1, u32 qp2, u16 lid1, u16 lid2); void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num); void hfi1_sys_guid_chg(struct hfi1_ibport *ibp); void hfi1_node_desc_chg(struct hfi1_ibport *ibp); diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index da295e0392ed..a4a7f2a76f24 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -134,24 +134,21 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) } /* - * Send a bad [PQ]_Key trap (ch. 14.3.8). + * Send a bad P_Key trap (ch. 14.3.8). */ -void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, - u32 qp1, u32 qp2, __be16 lid1, __be16 lid2) +void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl, + u32 qp1, u32 qp2, __be16 lid1, __be16 lid2) { struct ib_mad_notice_attr data; - if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) - ibp->rvp.pkey_violations++; - else - ibp->rvp.qkey_violations++; ibp->rvp.n_pkt_drops++; + ibp->rvp.pkey_violations++; /* Send violation trap */ data.generic_type = IB_NOTICE_TYPE_SECURITY; data.prod_type_msb = 0; data.prod_type_lsb = IB_NOTICE_PROD_CA; - data.trap_num = trap_num; + data.trap_num = IB_NOTICE_TRAP_BAD_PKEY; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; memset(&data.details, 0, sizeof(data.details)); diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 88d84cbf7e5a..28528459a052 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -256,11 +256,11 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr, } if (!qib_pkey_ok((u16)bth0, qib_get_pkey(ibp, qp->s_alt_pkey_index))) { - qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, - (u16)bth0, - (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, - 0, qp->ibqp.qp_num, - hdr->lrh[3], hdr->lrh[1]); + qib_bad_pkey(ibp, + (u16)bth0, + (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, + 0, qp->ibqp.qp_num, + hdr->lrh[3], hdr->lrh[1]); goto err; } /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ @@ -295,11 +295,11 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr, } if (!qib_pkey_ok((u16)bth0, qib_get_pkey(ibp, qp->s_pkey_index))) { - qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, - (u16)bth0, - (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, - 0, qp->ibqp.qp_num, - hdr->lrh[3], hdr->lrh[1]); + qib_bad_pkey(ibp, + (u16)bth0, + (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, + 0, qp->ibqp.qp_num, + hdr->lrh[3], hdr->lrh[1]); goto err; } /* Validate the SLID. See Ch. 9.6.1.5 */ diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index 341a123ee95c..be4907453ac4 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -66,8 +66,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn); if (!qp) { ibp->rvp.n_pkt_drops++; - rcu_read_unlock(); - return; + goto drop; } sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? @@ -94,11 +93,11 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) if (unlikely(!qib_pkey_ok(pkey1, pkey2))) { lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) & ((1 << ppd->lmc) - 1)); - qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1, - rdma_ah_get_sl(ah_attr), - sqp->ibqp.qp_num, qp->ibqp.qp_num, - cpu_to_be16(lid), - cpu_to_be16(rdma_ah_get_dlid(ah_attr))); + qib_bad_pkey(ibp, pkey1, + rdma_ah_get_sl(ah_attr), + sqp->ibqp.qp_num, qp->ibqp.qp_num, + cpu_to_be16(lid), + cpu_to_be16(rdma_ah_get_dlid(ah_attr))); goto drop; } } @@ -113,18 +112,8 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) qkey = (int)swqe->ud_wr.remote_qkey < 0 ? sqp->qkey : swqe->ud_wr.remote_qkey; - if (unlikely(qkey != qp->qkey)) { - u16 lid; - - lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) & - ((1 << ppd->lmc) - 1)); - qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey, - rdma_ah_get_sl(ah_attr), - sqp->ibqp.qp_num, qp->ibqp.qp_num, - cpu_to_be16(lid), - cpu_to_be16(rdma_ah_get_dlid(ah_attr))); + if (unlikely(qkey != qp->qkey)) goto drop; - } } /* @@ -487,22 +476,18 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, pkey1 = be32_to_cpu(ohdr->bth[0]); pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); if (unlikely(!qib_pkey_ok(pkey1, pkey2))) { - qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, - pkey1, - (be16_to_cpu(hdr->lrh[0]) >> 4) & + qib_bad_pkey(ibp, + pkey1, + (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, - src_qp, qp->ibqp.qp_num, - hdr->lrh[3], hdr->lrh[1]); + src_qp, qp->ibqp.qp_num, + hdr->lrh[3], hdr->lrh[1]); return; } } - if (unlikely(qkey != qp->qkey)) { - qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey, - (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, - src_qp, qp->ibqp.qp_num, - hdr->lrh[3], hdr->lrh[1]); + if (unlikely(qkey != qp->qkey)) return; - } + /* Drop invalid MAD packets (see 13.5.3.1). */ if (unlikely(qp->ibqp.qp_num == 1 && (tlen != 256 || diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index da0db5485ddc..33d5691a9b2d 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -241,8 +241,8 @@ static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0); } -void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, - u32 qp1, u32 qp2, __be16 lid1, __be16 lid2); +void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl, + u32 qp1, u32 qp2, __be16 lid1, __be16 lid2); void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num); void qib_sys_guid_chg(struct qib_ibport *ibp); void qib_node_desc_chg(struct qib_ibport *ibp); From ddbf2efff4ad85135b9fd1cb53b10069a160bd58 Mon Sep 17 00:00:00 2001 From: Bartlomiej Dudek Date: Fri, 9 Jun 2017 15:59:26 -0700 Subject: [PATCH 0114/1795] IB/hfi1: Fix DC 8051 host info flag array Fix info array of host message flags by adding entry for link width downgrade and reverse values for BC SMA and BC PWR_MSG messages Reviewed-by: Jakub Byczkowski Signed-off-by: Bartlomiej Dudek Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/chip.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 3fae98d079e4..ebcb2c405c5f 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1012,14 +1012,15 @@ static struct flag_table dc8051_info_err_flags[] = { */ static struct flag_table dc8051_info_host_msg_flags[] = { FLAG_ENTRY0("Host request done", 0x0001), - FLAG_ENTRY0("BC SMA message", 0x0002), - FLAG_ENTRY0("BC PWR_MGM message", 0x0004), + FLAG_ENTRY0("BC PWR_MGM message", 0x0002), + FLAG_ENTRY0("BC SMA message", 0x0004), FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008), FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010), FLAG_ENTRY0("External device config request", 0x0020), FLAG_ENTRY0("VerifyCap all frames received", 0x0040), FLAG_ENTRY0("LinkUp achieved", 0x0080), FLAG_ENTRY0("Link going down", 0x0100), + FLAG_ENTRY0("Link width downgraded", 0x0200), }; static u32 encoded_size(u32 size); From 702265fc0002497fd0ddaae4a5bec00a8a40da3e Mon Sep 17 00:00:00 2001 From: Jan Sokolowski Date: Fri, 9 Jun 2017 15:59:33 -0700 Subject: [PATCH 0115/1795] IB/hfi1: Set proper logging levels on QSFP cable error events Change QSFP cable error events logging levels from info to error. Reviewed-by: Jakub Byczkowski Reviewed-by: Dennis Dalessandro Signed-off-by: Jan Sokolowski Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/chip.c | 64 +++++++++++++++---------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index ebcb2c405c5f..b8ee0e27dae6 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -9376,13 +9376,13 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) || (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING)) - dd_dev_info(dd, "%s: QSFP cable temperature too high\n", - __func__); + dd_dev_err(dd, "%s: QSFP cable temperature too high\n", + __func__); if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) || (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING)) - dd_dev_info(dd, "%s: QSFP cable temperature too low\n", - __func__); + dd_dev_err(dd, "%s: QSFP cable temperature too low\n", + __func__); /* * The remaining alarms/warnings don't matter if the link is down. @@ -9392,75 +9392,75 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) || (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) - dd_dev_info(dd, "%s: QSFP supply voltage too high\n", - __func__); + dd_dev_err(dd, "%s: QSFP supply voltage too high\n", + __func__); if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) || (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING)) - dd_dev_info(dd, "%s: QSFP supply voltage too low\n", - __func__); + dd_dev_err(dd, "%s: QSFP supply voltage too low\n", + __func__); /* Byte 2 is vendor specific */ if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) || (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING)) - dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n", - __func__); + dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n", + __func__); if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) || (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING)) - dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n", - __func__); + dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n", + __func__); if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) || (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING)) - dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n", - __func__); + dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n", + __func__); if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) || (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING)) - dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n", - __func__); + dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n", + __func__); if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) || (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING)) - dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n", - __func__); + dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n", + __func__); if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) || (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING)) - dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n", - __func__); + dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n", + __func__); if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) || (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING)) - dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n", - __func__); + dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n", + __func__); if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) || (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING)) - dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n", - __func__); + dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n", + __func__); if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) || (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING)) - dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n", - __func__); + dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n", + __func__); if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) || (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING)) - dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n", - __func__); + dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n", + __func__); if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) || (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING)) - dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n", - __func__); + dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n", + __func__); if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) || (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING)) - dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n", - __func__); + dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n", + __func__); /* Bytes 9-10 and 11-12 are reserved */ /* Bytes 13-15 are vendor specific */ From 9c1a99c3882beb9e88ed41d914e75bab2d593926 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Fri, 9 Jun 2017 15:59:40 -0700 Subject: [PATCH 0116/1795] IB/hfi1: Create common expected receive verbs/PSM code Declarations and code in common between verbs and PSM are now moved to exp_rcv.[ch]. Reviewed-by: Michael J. Ruhl Reviewed-by: Mitko Haralanov Reviewed-by: Ashutosh Dixit Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/Makefile | 2 +- drivers/infiniband/hw/hfi1/exp_rcv.c | 118 ++++++++++++++ drivers/infiniband/hw/hfi1/exp_rcv.h | 187 ++++++++++++++++++++++ drivers/infiniband/hw/hfi1/file_ops.c | 4 +- drivers/infiniband/hw/hfi1/user_exp_rcv.c | 121 -------------- drivers/infiniband/hw/hfi1/user_exp_rcv.h | 23 +-- drivers/infiniband/hw/hfi1/user_sdma.c | 38 ----- 7 files changed, 309 insertions(+), 184 deletions(-) create mode 100644 drivers/infiniband/hw/hfi1/exp_rcv.c create mode 100644 drivers/infiniband/hw/hfi1/exp_rcv.h diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile index 88085f65432e..66d538c033b0 100644 --- a/drivers/infiniband/hw/hfi1/Makefile +++ b/drivers/infiniband/hw/hfi1/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o hfi1-y := affinity.o chip.o device.o driver.o efivar.o \ - eprom.o file_ops.o firmware.o \ + eprom.o exp_rcv.o file_ops.o firmware.o \ init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \ qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o \ uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \ diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c new file mode 100644 index 000000000000..08d13ed1b574 --- /dev/null +++ b/drivers/infiniband/hw/hfi1/exp_rcv.c @@ -0,0 +1,118 @@ +/* + * Copyright(c) 2017 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "exp_rcv.h" +#include "trace.h" + +/** + * exp_tid_group_init - initialize exp_tid_set + * @set - the set + */ +void hfi1_exp_tid_group_init(struct exp_tid_set *set) +{ + INIT_LIST_HEAD(&set->list); + set->count = 0; +} + +/** + * alloc_ctxt_rcv_groups - initialize expected receive groups + * @rcd - the context to add the groupings to + */ +int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd) +{ + struct hfi1_devdata *dd = rcd->dd; + u32 tidbase; + struct tid_group *grp; + int i; + + hfi1_exp_tid_group_init(&rcd->tid_group_list); + hfi1_exp_tid_group_init(&rcd->tid_used_list); + hfi1_exp_tid_group_init(&rcd->tid_full_list); + + tidbase = rcd->expected_base; + for (i = 0; i < rcd->expected_count / + dd->rcv_entries.group_size; i++) { + grp = kzalloc(sizeof(*grp), GFP_KERNEL); + if (!grp) + goto bail; + grp->size = dd->rcv_entries.group_size; + grp->base = tidbase; + tid_group_add_tail(grp, &rcd->tid_group_list); + tidbase += dd->rcv_entries.group_size; + } + + return 0; +bail: + hfi1_free_ctxt_rcv_groups(rcd); + return -ENOMEM; +} + +/** + * free_ctxt_rcv_groups - free expected receive groups + * @rcd - the context to free + * + * The routine dismantles the expect receive linked + * list and clears any tids associated with the receive + * context. + * + * This should only be called for kernel contexts and the + * a base user context. + */ +void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd) +{ + struct tid_group *grp, *gptr; + + WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_full_list)); + WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_used_list)); + + list_for_each_entry_safe(grp, gptr, &rcd->tid_group_list.list, list) { + tid_group_remove(grp, &rcd->tid_group_list); + kfree(grp); + } + + hfi1_clear_tids(rcd); +} diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.h b/drivers/infiniband/hw/hfi1/exp_rcv.h new file mode 100644 index 000000000000..c7d02bcddded --- /dev/null +++ b/drivers/infiniband/hw/hfi1/exp_rcv.h @@ -0,0 +1,187 @@ +#ifndef _HFI1_EXP_RCV_H +#define _HFI1_EXP_RCV_H +/* + * Copyright(c) 2017 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "hfi.h" + +#define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list)) + +#define EXP_TID_TIDLEN_MASK 0x7FFULL +#define EXP_TID_TIDLEN_SHIFT 0 +#define EXP_TID_TIDCTRL_MASK 0x3ULL +#define EXP_TID_TIDCTRL_SHIFT 20 +#define EXP_TID_TIDIDX_MASK 0x3FFULL +#define EXP_TID_TIDIDX_SHIFT 22 +#define EXP_TID_GET(tid, field) \ + (((tid) >> EXP_TID_TID##field##_SHIFT) & EXP_TID_TID##field##_MASK) + +#define EXP_TID_SET(field, value) \ + (((value) & EXP_TID_TID##field##_MASK) << \ + EXP_TID_TID##field##_SHIFT) +#define EXP_TID_CLEAR(tid, field) ({ \ + (tid) &= ~(EXP_TID_TID##field##_MASK << \ + EXP_TID_TID##field##_SHIFT); \ + }) +#define EXP_TID_RESET(tid, field, value) do { \ + EXP_TID_CLEAR(tid, field); \ + (tid) |= EXP_TID_SET(field, (value)); \ + } while (0) + +/* + * Define fields in the KDETH header so we can update the header + * template. + */ +#define KDETH_OFFSET_SHIFT 0 +#define KDETH_OFFSET_MASK 0x7fff +#define KDETH_OM_SHIFT 15 +#define KDETH_OM_MASK 0x1 +#define KDETH_TID_SHIFT 16 +#define KDETH_TID_MASK 0x3ff +#define KDETH_TIDCTRL_SHIFT 26 +#define KDETH_TIDCTRL_MASK 0x3 +#define KDETH_INTR_SHIFT 28 +#define KDETH_INTR_MASK 0x1 +#define KDETH_SH_SHIFT 29 +#define KDETH_SH_MASK 0x1 +#define KDETH_KVER_SHIFT 30 +#define KDETH_KVER_MASK 0x3 +#define KDETH_JKEY_SHIFT 0x0 +#define KDETH_JKEY_MASK 0xff +#define KDETH_HCRC_UPPER_SHIFT 16 +#define KDETH_HCRC_UPPER_MASK 0xff +#define KDETH_HCRC_LOWER_SHIFT 24 +#define KDETH_HCRC_LOWER_MASK 0xff + +#define KDETH_GET(val, field) \ + (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK) +#define KDETH_SET(dw, field, val) do { \ + u32 dwval = le32_to_cpu(dw); \ + dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \ + dwval |= (((val) & KDETH_##field##_MASK) << \ + KDETH_##field##_SHIFT); \ + dw = cpu_to_le32(dwval); \ + } while (0) + +#define KDETH_RESET(dw, field, val) ({ dw = 0; KDETH_SET(dw, field, val); }) + +/* KDETH OM multipliers and switch over point */ +#define KDETH_OM_SMALL 4 +#define KDETH_OM_SMALL_SHIFT 2 +#define KDETH_OM_LARGE 64 +#define KDETH_OM_LARGE_SHIFT 6 +#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1)) + +struct tid_group { + struct list_head list; + u32 base; + u8 size; + u8 used; + u8 map; +}; + +/* + * Write an "empty" RcvArray entry. + * This function exists so the TID registaration code can use it + * to write to unused/unneeded entries and still take advantage + * of the WC performance improvements. The HFI will ignore this + * write to the RcvArray entry. + */ +static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index) +{ + /* + * Doing the WC fill writes only makes sense if the device is + * present and the RcvArray has been mapped as WC memory. + */ + if ((dd->flags & HFI1_PRESENT) && dd->rcvarray_wc) + writeq(0, dd->rcvarray_wc + (index * 8)); +} + +static inline void tid_group_add_tail(struct tid_group *grp, + struct exp_tid_set *set) +{ + list_add_tail(&grp->list, &set->list); + set->count++; +} + +static inline void tid_group_remove(struct tid_group *grp, + struct exp_tid_set *set) +{ + list_del_init(&grp->list); + set->count--; +} + +static inline void tid_group_move(struct tid_group *group, + struct exp_tid_set *s1, + struct exp_tid_set *s2) +{ + tid_group_remove(group, s1); + tid_group_add_tail(group, s2); +} + +static inline struct tid_group *tid_group_pop(struct exp_tid_set *set) +{ + struct tid_group *grp = + list_first_entry(&set->list, struct tid_group, list); + list_del_init(&grp->list); + set->count--; + return grp; +} + +static inline u32 rcventry2tidinfo(u32 rcventry) +{ + u32 pair = rcventry & ~0x1; + + return EXP_TID_SET(IDX, pair >> 1) | + EXP_TID_SET(CTRL, 1 << (rcventry - pair)); +} + +int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd); +void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd); +void hfi1_exp_tid_group_init(struct exp_tid_set *set); + +#endif /* _HFI1_EXP_RCV_H */ diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 3158128d57e8..2dd8758f0644 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -804,7 +804,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) dd->rcd[uctxt->ctxt] = NULL; - hfi1_user_exp_rcv_grp_free(uctxt); + hfi1_free_ctxt_rcv_groups(uctxt); hfi1_clear_ctxt_pkey(dd, uctxt); uctxt->rcvwait_to = 0; @@ -1260,7 +1260,7 @@ static int setup_base_ctxt(struct hfi1_filedata *fd) if (ret) goto setup_failed; - ret = hfi1_user_exp_rcv_grp_init(fd); + ret = hfi1_alloc_ctxt_rcv_groups(uctxt); if (ret) goto setup_failed; diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index a8f0aa4722f6..6318e6ca1b18 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c @@ -51,14 +51,6 @@ #include "trace.h" #include "mmu_rb.h" -struct tid_group { - struct list_head list; - u32 base; - u8 size; - u8 used; - u8 map; -}; - struct tid_rb_node { struct mmu_rb_node mmu; unsigned long phys; @@ -75,8 +67,6 @@ struct tid_pageset { u16 count; }; -#define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list)) - #define num_user_pages(vaddr, len) \ (1 + (((((unsigned long)(vaddr) + \ (unsigned long)(len) - 1) & PAGE_MASK) - \ @@ -109,88 +99,6 @@ static struct mmu_rb_ops tid_rb_ops = { .invalidate = tid_rb_invalidate }; -static inline u32 rcventry2tidinfo(u32 rcventry) -{ - u32 pair = rcventry & ~0x1; - - return EXP_TID_SET(IDX, pair >> 1) | - EXP_TID_SET(CTRL, 1 << (rcventry - pair)); -} - -static inline void exp_tid_group_init(struct exp_tid_set *set) -{ - INIT_LIST_HEAD(&set->list); - set->count = 0; -} - -static inline void tid_group_remove(struct tid_group *grp, - struct exp_tid_set *set) -{ - list_del_init(&grp->list); - set->count--; -} - -static inline void tid_group_add_tail(struct tid_group *grp, - struct exp_tid_set *set) -{ - list_add_tail(&grp->list, &set->list); - set->count++; -} - -static inline struct tid_group *tid_group_pop(struct exp_tid_set *set) -{ - struct tid_group *grp = - list_first_entry(&set->list, struct tid_group, list); - list_del_init(&grp->list); - set->count--; - return grp; -} - -static inline void tid_group_move(struct tid_group *group, - struct exp_tid_set *s1, - struct exp_tid_set *s2) -{ - tid_group_remove(group, s1); - tid_group_add_tail(group, s2); -} - -int hfi1_user_exp_rcv_grp_init(struct hfi1_filedata *fd) -{ - struct hfi1_ctxtdata *uctxt = fd->uctxt; - struct hfi1_devdata *dd = fd->dd; - u32 tidbase; - u32 i; - struct tid_group *grp, *gptr; - - exp_tid_group_init(&uctxt->tid_group_list); - exp_tid_group_init(&uctxt->tid_used_list); - exp_tid_group_init(&uctxt->tid_full_list); - - tidbase = uctxt->expected_base; - for (i = 0; i < uctxt->expected_count / - dd->rcv_entries.group_size; i++) { - grp = kzalloc(sizeof(*grp), GFP_KERNEL); - if (!grp) - goto grp_failed; - - grp->size = dd->rcv_entries.group_size; - grp->base = tidbase; - tid_group_add_tail(grp, &uctxt->tid_group_list); - tidbase += dd->rcv_entries.group_size; - } - - return 0; - -grp_failed: - list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list, - list) { - list_del_init(&grp->list); - kfree(grp); - } - - return -ENOMEM; -} - /* * Initialize context and file private data needed for Expected * receive caching. This needs to be done after the context has @@ -266,18 +174,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd) return ret; } -void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt) -{ - struct tid_group *grp, *gptr; - - list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list, - list) { - list_del_init(&grp->list); - kfree(grp); - } - hfi1_clear_tids(uctxt); -} - void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) { struct hfi1_ctxtdata *uctxt = fd->uctxt; @@ -302,23 +198,6 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) fd->entry_to_rb = NULL; } -/* - * Write an "empty" RcvArray entry. - * This function exists so the TID registaration code can use it - * to write to unused/unneeded entries and still take advantage - * of the WC performance improvements. The HFI will ignore this - * write to the RcvArray entry. - */ -static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index) -{ - /* - * Doing the WC fill writes only makes sense if the device is - * present and the RcvArray has been mapped as WC memory. - */ - if ((dd->flags & HFI1_PRESENT) && dd->rcvarray_wc) - writeq(0, dd->rcvarray_wc + (index * 8)); -} - /* * RcvArray entry allocation for Expected Receives is done by the * following algorithm: diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h index 5250c897298d..1bdc61be53cb 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h @@ -49,29 +49,8 @@ #include "hfi.h" -#define EXP_TID_TIDLEN_MASK 0x7FFULL -#define EXP_TID_TIDLEN_SHIFT 0 -#define EXP_TID_TIDCTRL_MASK 0x3ULL -#define EXP_TID_TIDCTRL_SHIFT 20 -#define EXP_TID_TIDIDX_MASK 0x3FFULL -#define EXP_TID_TIDIDX_SHIFT 22 -#define EXP_TID_GET(tid, field) \ - (((tid) >> EXP_TID_TID##field##_SHIFT) & EXP_TID_TID##field##_MASK) +#include "exp_rcv.h" -#define EXP_TID_SET(field, value) \ - (((value) & EXP_TID_TID##field##_MASK) << \ - EXP_TID_TID##field##_SHIFT) -#define EXP_TID_CLEAR(tid, field) ({ \ - (tid) &= ~(EXP_TID_TID##field##_MASK << \ - EXP_TID_TID##field##_SHIFT); \ - }) -#define EXP_TID_RESET(tid, field, value) do { \ - EXP_TID_CLEAR(tid, field); \ - (tid) |= EXP_TID_SET(field, (value)); \ - } while (0) - -void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt); -int hfi1_user_exp_rcv_grp_init(struct hfi1_filedata *fd); int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd); void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd); int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index fcadbb9978ca..8f7cfdd9e9ec 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -94,27 +94,6 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 /* Number of BTH.PSN bits used for sequence number in expected rcvs */ #define BTH_SEQ_MASK 0x7ffull -/* - * Define fields in the KDETH header so we can update the header - * template. - */ -#define KDETH_OFFSET_SHIFT 0 -#define KDETH_OFFSET_MASK 0x7fff -#define KDETH_OM_SHIFT 15 -#define KDETH_OM_MASK 0x1 -#define KDETH_TID_SHIFT 16 -#define KDETH_TID_MASK 0x3ff -#define KDETH_TIDCTRL_SHIFT 26 -#define KDETH_TIDCTRL_MASK 0x3 -#define KDETH_INTR_SHIFT 28 -#define KDETH_INTR_MASK 0x1 -#define KDETH_SH_SHIFT 29 -#define KDETH_SH_MASK 0x1 -#define KDETH_HCRC_UPPER_SHIFT 16 -#define KDETH_HCRC_UPPER_MASK 0xff -#define KDETH_HCRC_LOWER_SHIFT 24 -#define KDETH_HCRC_LOWER_MASK 0xff - #define AHG_KDETH_INTR_SHIFT 12 #define AHG_KDETH_SH_SHIFT 13 #define AHG_KDETH_ARRAY_SIZE 9 @@ -122,16 +101,6 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4) #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff) -#define KDETH_GET(val, field) \ - (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK) -#define KDETH_SET(dw, field, val) do { \ - u32 dwval = le32_to_cpu(dw); \ - dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \ - dwval |= (((val) & KDETH_##field##_MASK) << \ - KDETH_##field##_SHIFT); \ - dw = cpu_to_le32(dwval); \ - } while (0) - #define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \ do { \ if ((idx) < ARRAY_SIZE((arr))) \ @@ -142,13 +111,6 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 return -ERANGE; \ } while (0) -/* KDETH OM multipliers and switch over point */ -#define KDETH_OM_SMALL 4 -#define KDETH_OM_SMALL_SHIFT 2 -#define KDETH_OM_LARGE 64 -#define KDETH_OM_LARGE_SHIFT 6 -#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1)) - /* Tx request flag bits */ #define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */ #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */ From f5114440c5491d4737b061c3a77195088bbaca52 Mon Sep 17 00:00:00 2001 From: Jan Sokolowski Date: Fri, 9 Jun 2017 15:59:46 -0700 Subject: [PATCH 0117/1795] IB/hfi1: Remove reading platform configuration from EFI variable Currently, platform configuration can be read from EFI variable for discrete cards. It will happen when reading from EPROM fails. EFI variables should not be queried for platform configuration in any scenario. Reviewed-by: Jakub Byczkowski Signed-off-by: Jan Sokolowski Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/platform.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c index cbda9b189216..41307e474525 100644 --- a/drivers/infiniband/hw/hfi1/platform.c +++ b/drivers/infiniband/hw/hfi1/platform.c @@ -136,7 +136,6 @@ static void save_platform_config_fields(struct hfi1_devdata *dd) void get_platform_config(struct hfi1_devdata *dd) { int ret = 0; - unsigned long size = 0; u8 *temp_platform_config = NULL; u32 esize; @@ -160,15 +159,6 @@ void get_platform_config(struct hfi1_devdata *dd) dd->platform_config.size = esize; return; } - /* fail, try EFI variable */ - - ret = read_hfi1_efi_var(dd, "configuration", &size, - (void **)&temp_platform_config); - if (!ret) { - dd->platform_config.data = temp_platform_config; - dd->platform_config.size = size; - return; - } } dd_dev_err(dd, "%s: Failed to get platform config, falling back to sub-optimal default file\n", From f523984fb85d16e098aac94642cc16803a4cc61f Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Fri, 9 Jun 2017 15:59:53 -0700 Subject: [PATCH 0118/1795] IB/hfi1: Use a template for tid reg/unreg This is the preferred way to add a duplicate trace call. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/trace_rx.h | 46 ++++++++------------------- 1 file changed, 13 insertions(+), 33 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h index 05fc6d68ffe8..7af593827d37 100644 --- a/drivers/infiniband/hw/hfi1/trace_rx.h +++ b/drivers/infiniband/hw/hfi1/trace_rx.h @@ -138,7 +138,8 @@ TRACE_EVENT(hfi1_receive_interrupt, ) ); -TRACE_EVENT(hfi1_exp_tid_reg, +DECLARE_EVENT_CLASS( + hfi1_exp_tid_reg_unreg, TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, unsigned long va, unsigned long pa, dma_addr_t dma), @@ -172,38 +173,17 @@ TRACE_EVENT(hfi1_exp_tid_reg, ) ); -TRACE_EVENT(hfi1_exp_tid_unreg, - TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, - unsigned long va, unsigned long pa, dma_addr_t dma), - TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma), - TP_STRUCT__entry( - __field(unsigned int, ctxt) - __field(u16, subctxt) - __field(u32, rarr) - __field(u32, npages) - __field(unsigned long, va) - __field(unsigned long, pa) - __field(dma_addr_t, dma) - ), - TP_fast_assign( - __entry->ctxt = ctxt; - __entry->subctxt = subctxt; - __entry->rarr = rarr; - __entry->npages = npages; - __entry->va = va; - __entry->pa = pa; - __entry->dma = dma; - ), - TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx", - __entry->ctxt, - __entry->subctxt, - __entry->rarr, - __entry->npages, - __entry->pa, - __entry->va, - __entry->dma - ) - ); +DEFINE_EVENT( + hfi1_exp_tid_reg_unreg, hfi1_exp_tid_unreg, + TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, + unsigned long va, unsigned long pa, dma_addr_t dma), + TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)); + +DEFINE_EVENT( + hfi1_exp_tid_reg_unreg, hfi1_exp_tid_reg, + TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, + unsigned long va, unsigned long pa, dma_addr_t dma), + TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)); TRACE_EVENT(hfi1_exp_tid_inval, TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr, From 8cb1021b806bda3f5fda6f3699e1f98df14245df Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Fri, 9 Jun 2017 15:59:59 -0700 Subject: [PATCH 0119/1795] IB/hfi1: Add traces for TID operations This patch adds a trace for putting a TID and for writing the RcvArray CSR. The CSR access template can be easily extended for additional CSR readq/writeq calls. Reviewed-by: Ashutosh Dixit Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/chip.c | 16 ++--------- drivers/infiniband/hw/hfi1/trace_misc.h | 20 ++++++++++++++ drivers/infiniband/hw/hfi1/trace_rx.h | 35 +++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 14 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index b8ee0e27dae6..937350d9deab 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -9745,17 +9745,6 @@ static inline int init_cpu_counters(struct hfi1_devdata *dd) return 0; } -static const char * const pt_names[] = { - "expected", - "eager", - "invalid" -}; - -static const char *pt_name(u32 type) -{ - return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type]; -} - /* * index is the index into the receive array */ @@ -9777,15 +9766,14 @@ void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, type, index); goto done; } - - hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx", - pt_name(type), index, pa, (unsigned long)order); + trace_hfi1_put_tid(dd, index, type, pa, order); #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */ reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK) << RCV_ARRAY_RT_ADDR_SHIFT; + trace_hfi1_write_rcvarray(base + (index * 8), reg); writeq(reg, base + (index * 8)); if (type == PT_EAGER) diff --git a/drivers/infiniband/hw/hfi1/trace_misc.h b/drivers/infiniband/hw/hfi1/trace_misc.h index deac77ddaeab..8db2253523ff 100644 --- a/drivers/infiniband/hw/hfi1/trace_misc.h +++ b/drivers/infiniband/hw/hfi1/trace_misc.h @@ -72,6 +72,26 @@ TRACE_EVENT(hfi1_interrupt, __entry->src) ); +DECLARE_EVENT_CLASS( + hfi1_csr_template, + TP_PROTO(void __iomem *addr, u64 value), + TP_ARGS(addr, value), + TP_STRUCT__entry( + __field(void __iomem *, addr) + __field(u64, value) + ), + TP_fast_assign( + __entry->addr = addr; + __entry->value = value; + ), + TP_printk("addr %p value %llx", __entry->addr, __entry->value) +); + +DEFINE_EVENT( + hfi1_csr_template, hfi1_write_rcvarray, + TP_PROTO(void __iomem *addr, u64 value), + TP_ARGS(addr, value)); + #ifdef CONFIG_FAULT_INJECTION TRACE_EVENT(hfi1_fault_opcode, TP_PROTO(struct rvt_qp *qp, u8 opcode), diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h index 7af593827d37..84929578cfe6 100644 --- a/drivers/infiniband/hw/hfi1/trace_rx.h +++ b/drivers/infiniband/hw/hfi1/trace_rx.h @@ -52,6 +52,13 @@ #include "hfi.h" +#define tidtype_name(type) { PT_##type, #type } +#define show_tidtype(type) \ +__print_symbolic(type, \ + tidtype_name(EXPECTED), \ + tidtype_name(EAGER), \ + tidtype_name(INVALID)) \ + #undef TRACE_SYSTEM #define TRACE_SYSTEM hfi1_rx @@ -185,6 +192,34 @@ DEFINE_EVENT( unsigned long va, unsigned long pa, dma_addr_t dma), TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)); +TRACE_EVENT( + hfi1_put_tid, + TP_PROTO(struct hfi1_devdata *dd, + u32 index, u32 type, unsigned long pa, u16 order), + TP_ARGS(dd, index, type, pa, order), + TP_STRUCT__entry( + DD_DEV_ENTRY(dd) + __field(unsigned long, pa); + __field(u32, index); + __field(u32, type); + __field(u16, order); + ), + TP_fast_assign( + DD_DEV_ASSIGN(dd); + __entry->pa = pa; + __entry->index = index; + __entry->type = type; + __entry->order = order; + ), + TP_printk("[%s] type %s pa %lx index %u order %u", + __get_str(dev), + show_tidtype(__entry->type), + __entry->pa, + __entry->index, + __entry->order + ) +); + TRACE_EVENT(hfi1_exp_tid_inval, TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr, u32 npages, dma_addr_t dma), From 581d01aaaca1fbb9df83cf3337c77e85215dcc5b Mon Sep 17 00:00:00 2001 From: "Michael J. Ruhl" Date: Fri, 9 Jun 2017 16:00:06 -0700 Subject: [PATCH 0120/1795] IB/qib: Replace deprecated pci functions with new API pci_enable_msix_range() and pci_disable_msix() have been deprecated. Updating to the new pci_alloc_irq_vectors() interface. Reviewed-by: Sebastian Sanchez Reviewed-by: Mike Marciniszyn Signed-off-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qib/qib.h | 8 +- drivers/infiniband/hw/qib/qib_iba6120.c | 6 +- drivers/infiniband/hw/qib/qib_iba7220.c | 7 +- drivers/infiniband/hw/qib/qib_iba7322.c | 48 ++++---- drivers/infiniband/hw/qib/qib_pcie.c | 149 ++++++++++-------------- 5 files changed, 98 insertions(+), 120 deletions(-) diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index a3e21a25cea5..f9e1c69603a5 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -1,7 +1,7 @@ #ifndef _QIB_KERNEL_H #define _QIB_KERNEL_H /* - * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. + * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * @@ -443,7 +443,7 @@ struct qib_irq_notify; #endif struct qib_msix_entry { - struct msix_entry msix; + int irq; void *arg; #ifdef CONFIG_INFINIBAND_QIB_DCA int dca; @@ -1433,9 +1433,9 @@ int qib_pcie_init(struct pci_dev *, const struct pci_device_id *); int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *, const struct pci_device_id *); void qib_pcie_ddcleanup(struct qib_devdata *); -int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *); +int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent); int qib_reinit_intr(struct qib_devdata *); -void qib_enable_intx(struct pci_dev *); +void qib_enable_intx(struct qib_devdata *dd); void qib_nomsi(struct qib_devdata *); void qib_nomsix(struct qib_devdata *); void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *); diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index e423b71e6ea0..46045fc28fa0 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Intel Corporation. All rights reserved. + * Copyright (c) 2013 - 2017 Intel Corporation. All rights reserved. * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. @@ -1838,7 +1838,7 @@ static int qib_6120_setup_reset(struct qib_devdata *dd) bail: if (ret) { - if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) + if (qib_pcie_params(dd, dd->lbus_width, NULL)) qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; continuing anyway\n"); /* clear the reset error, init error/hwerror mask */ @@ -3562,7 +3562,7 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev, if (qib_mini_init) goto bail; - if (qib_pcie_params(dd, 8, NULL, NULL)) + if (qib_pcie_params(dd, 8, NULL)) qib_dev_err(dd, "Failed to setup PCIe or interrupts; continuing anyway\n"); dd->cspec->irq = pdev->irq; /* save IRQ */ diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index c3679c48e61c..49cd6e3beb72 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2011 - 2017 Intel Corporation. All rights reserved. * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. @@ -2148,7 +2149,7 @@ static int qib_setup_7220_reset(struct qib_devdata *dd) bail: if (ret) { - if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) + if (qib_pcie_params(dd, dd->lbus_width, NULL)) qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; continuing anyway\n"); @@ -3309,7 +3310,7 @@ static int qib_7220_intr_fallback(struct qib_devdata *dd) qib_devinfo(dd->pcidev, "MSI interrupt not detected, trying INTx interrupts\n"); qib_7220_free_irq(dd); - qib_enable_intx(dd->pcidev); + qib_enable_intx(dd); /* * Some newer kernels require free_irq before disable_msi, * and irq can be changed during disable and INTx enable @@ -4619,7 +4620,7 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev, minwidth = 8; /* x8 capable boards */ break; } - if (qib_pcie_params(dd, minwidth, NULL, NULL)) + if (qib_pcie_params(dd, minwidth, NULL)) qib_dev_err(dd, "Failed to setup PCIe or interrupts; continuing anyway\n"); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index bb2439fff8fa..2653064ce9e9 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved. * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved. * * This software is available to you under a choice of one of two @@ -2841,10 +2841,10 @@ static void qib_7322_nomsix(struct qib_devdata *dd) reset_dca_notifier(dd, &dd->cspec->msix_entries[i]); #endif irq_set_affinity_hint( - dd->cspec->msix_entries[i].msix.vector, NULL); + dd->cspec->msix_entries[i].irq, NULL); free_cpumask_var(dd->cspec->msix_entries[i].mask); - free_irq(dd->cspec->msix_entries[i].msix.vector, - dd->cspec->msix_entries[i].arg); + free_irq(dd->cspec->msix_entries[i].irq, + dd->cspec->msix_entries[i].arg); } qib_nomsix(dd); } @@ -3336,9 +3336,9 @@ static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n", dd->unit, - m->msix.vector); + m->irq); irq_set_affinity_notifier( - m->msix.vector, + m->irq, NULL); m->notifier = NULL; } @@ -3354,7 +3354,7 @@ static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) int ret; m->notifier = n; - n->notify.irq = m->msix.vector; + n->notify.irq = m->irq; n->notify.notify = qib_irq_notifier_notify; n->notify.release = qib_irq_notifier_release; n->arg = m->arg; @@ -3500,10 +3500,21 @@ try_intx: - 1, QIB_DRV_NAME "%d (kctx)", dd->unit); } - ret = request_irq( - dd->cspec->msix_entries[msixnum].msix.vector, - handler, 0, dd->cspec->msix_entries[msixnum].name, - arg); + + dd->cspec->msix_entries[msixnum].irq = pci_irq_vector( + dd->pcidev, msixnum); + if (dd->cspec->msix_entries[msixnum].irq < 0) { + qib_dev_err(dd, + "Couldn't get MSIx irq (vec=%d): %d\n", + msixnum, + dd->cspec->msix_entries[msixnum].irq); + qib_7322_nomsix(dd); + goto try_intx; + } + ret = request_irq(dd->cspec->msix_entries[msixnum].irq, + handler, 0, + dd->cspec->msix_entries[msixnum].name, + arg); if (ret) { /* * Shouldn't happen since the enable said we could @@ -3512,7 +3523,7 @@ try_intx: qib_dev_err(dd, "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n", msixnum, - dd->cspec->msix_entries[msixnum].msix.vector, + dd->cspec->msix_entries[msixnum].irq, ret); qib_7322_nomsix(dd); goto try_intx; @@ -3548,7 +3559,7 @@ try_intx: dd->cspec->msix_entries[msixnum].mask); } irq_set_affinity_hint( - dd->cspec->msix_entries[msixnum].msix.vector, + dd->cspec->msix_entries[msixnum].irq, dd->cspec->msix_entries[msixnum].mask); } msixnum++; @@ -3744,7 +3755,6 @@ static int qib_do_7322_reset(struct qib_devdata *dd) if (msix_entries) { /* restore the MSIx vector address and data if saved above */ for (i = 0; i < msix_entries; i++) { - dd->cspec->msix_entries[i].msix.entry = i; if (!msix_vecsave || !msix_vecsave[2 * i]) continue; qib_write_kreg(dd, 2 * i + @@ -3762,8 +3772,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd) write_7322_initregs(dd); if (qib_pcie_params(dd, dd->lbus_width, - &dd->cspec->num_msix_entries, - dd->cspec->msix_entries)) + &dd->cspec->num_msix_entries)) qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; continuing anyway\n"); @@ -5195,7 +5204,7 @@ static int qib_7322_intr_fallback(struct qib_devdata *dd) qib_devinfo(dd->pcidev, "MSIx interrupt not detected, trying INTx interrupts\n"); qib_7322_nomsix(dd); - qib_enable_intx(dd->pcidev); + qib_enable_intx(dd); qib_setup_7322_interrupt(dd, 0); return 1; } @@ -7327,10 +7336,7 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, if (!dd->cspec->msix_entries) tabsize = 0; - for (i = 0; i < tabsize; i++) - dd->cspec->msix_entries[i].msix.entry = i; - - if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) + if (qib_pcie_params(dd, 8, &tabsize)) qib_dev_err(dd, "Failed to setup PCIe or interrupts; continuing anyway\n"); /* may be less than we wanted, if not enough available */ diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index c379b8342a09..d90403e31a9d 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2010 - 2017 Intel Corporation. All rights reserved. * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved. * * This software is available to you under a choice of one of two @@ -187,112 +188,84 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd) pci_set_drvdata(dd->pcidev, NULL); } -static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, - struct qib_msix_entry *qib_msix_entry) -{ - int ret; - int nvec = *msixcnt; - struct msix_entry *msix_entry; - int i; - - ret = pci_msix_vec_count(dd->pcidev); - if (ret < 0) - goto do_intx; - - nvec = min(nvec, ret); - - /* We can't pass qib_msix_entry array to qib_msix_setup - * so use a dummy msix_entry array and copy the allocated - * irq back to the qib_msix_entry array. */ - msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL); - if (!msix_entry) - goto do_intx; - - for (i = 0; i < nvec; i++) - msix_entry[i] = qib_msix_entry[i].msix; - - ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec); - if (ret < 0) - goto free_msix_entry; - else - nvec = ret; - - for (i = 0; i < nvec; i++) - qib_msix_entry[i].msix = msix_entry[i]; - - kfree(msix_entry); - *msixcnt = nvec; - return; - -free_msix_entry: - kfree(msix_entry); - -do_intx: - qib_dev_err( - dd, - "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n", - nvec, ret); - *msixcnt = 0; - qib_enable_intx(dd->pcidev); -} - /** * We save the msi lo and hi values, so we can restore them after * chip reset (the kernel PCI infrastructure doesn't yet handle that * correctly. */ -static int qib_msi_setup(struct qib_devdata *dd, int pos) +static void qib_msi_setup(struct qib_devdata *dd, int pos) { struct pci_dev *pdev = dd->pcidev; u16 control; - int ret; - ret = pci_enable_msi(pdev); - if (ret) - qib_dev_err(dd, - "pci_enable_msi failed: %d, interrupts may not work\n", - ret); - /* continue even if it fails, we may still be OK... */ - - pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, - &dd->msi_lo); - pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, - &dd->msi_hi); + pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, &dd->msi_lo); + pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, &dd->msi_hi); pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); + /* now save the data (vector) info */ - pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT) - ? 12 : 8), + pci_read_config_word(pdev, + pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), &dd->msi_data); - return ret; } -int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, - struct qib_msix_entry *entry) +static int qib_allocate_irqs(struct qib_devdata *dd, u32 maxvec) +{ + unsigned int flags = PCI_IRQ_LEGACY; + + /* Check our capabilities */ + if (dd->pcidev->msix_cap) { + flags |= PCI_IRQ_MSIX; + } else { + if (dd->pcidev->msi_cap) { + flags |= PCI_IRQ_MSI; + /* Get msi_lo and msi_hi */ + qib_msi_setup(dd, dd->pcidev->msi_cap); + } + } + + if (!(flags & (PCI_IRQ_MSIX | PCI_IRQ_MSI))) + qib_dev_err(dd, "No PCI MSI or MSIx capability!\n"); + + return pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags); +} + +int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent) { u16 linkstat, speed; - int pos = 0, ret = 1; + int nvec; + int maxvec; + int ret = 0; if (!pci_is_pcie(dd->pcidev)) { qib_dev_err(dd, "Can't find PCI Express capability!\n"); /* set up something... */ dd->lbus_width = 1; dd->lbus_speed = 2500; /* Gen1, 2.5GHz */ + ret = -1; goto bail; } - pos = dd->pcidev->msix_cap; - if (nent && *nent && pos) { - qib_msix_setup(dd, pos, nent, entry); - ret = 0; /* did it, either MSIx or INTx */ - } else { - pos = dd->pcidev->msi_cap; - if (pos) - ret = qib_msi_setup(dd, pos); - else - qib_dev_err(dd, "No PCI MSI or MSIx capability!\n"); + maxvec = (nent && *nent) ? *nent : 1; + nvec = qib_allocate_irqs(dd, maxvec); + if (nvec < 0) { + ret = nvec; + goto bail; + } + + /* + * If nent exists, make sure to record how many vectors were allocated + */ + if (nent) { + *nent = nvec; + + /* + * If we requested (nent) MSIX, but msix_enabled is not set, + * pci_alloc_irq_vectors() enabled INTx. + */ + if (!dd->pcidev->msix_enabled) + qib_dev_err(dd, + "no msix vectors allocated, using INTx\n"); } - if (!pos) - qib_enable_intx(dd->pcidev); pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat); /* @@ -379,7 +352,7 @@ int qib_reinit_intr(struct qib_devdata *dd) ret = 1; bail: if (!ret && (dd->flags & QIB_HAS_INTX)) { - qib_enable_intx(dd->pcidev); + qib_enable_intx(dd); ret = 1; } @@ -397,7 +370,7 @@ bail: void qib_nomsi(struct qib_devdata *dd) { dd->msi_lo = 0; - pci_disable_msi(dd->pcidev); + pci_free_irq_vectors(dd->pcidev); } /* @@ -405,23 +378,21 @@ void qib_nomsi(struct qib_devdata *dd) */ void qib_nomsix(struct qib_devdata *dd) { - pci_disable_msix(dd->pcidev); + pci_free_irq_vectors(dd->pcidev); } /* * Similar to pci_intx(pdev, 1), except that we make sure * msi(x) is off. */ -void qib_enable_intx(struct pci_dev *pdev) +void qib_enable_intx(struct qib_devdata *dd) { u16 cw, new; int pos; + struct pci_dev *pdev = dd->pcidev; - /* first, turn on INTx */ - pci_read_config_word(pdev, PCI_COMMAND, &cw); - new = cw & ~PCI_COMMAND_INTX_DISABLE; - if (new != cw) - pci_write_config_word(pdev, PCI_COMMAND, new); + if (pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY) < 0) + qib_dev_err(dd, "Failed to enable INTx\n"); pos = pdev->msi_cap; if (pos) { From fe4e74eeb24286c730672e776ac4c2c3caa19137 Mon Sep 17 00:00:00 2001 From: "Michael J. Ruhl" Date: Fri, 9 Jun 2017 16:00:12 -0700 Subject: [PATCH 0121/1795] IB/hfi1: Initialize TID lists to avoid crash on cleanup The expected receive lists (tid_xxx_list) are not initialized until late in the receive context initialization. If an error happens before the initialization, a NULL pointer access will occur during cleanup. Initialized the lists sooner rather than later to avoid this Oops: IP: unlock_exp_tids.isra.11+0x26/0xd0 [hfi1] RIP: 0010:unlock_exp_tids.isra.11+0x26/0xd0 [hfi1] Call Trace: hfi1_user_exp_rcv_free+0x79/0xb0 [hfi1] hfi1_file_close+0x87/0x360 [hfi1] __fput+0xe7/0x210 ____fput+0xe/0x10 Reviewed-by: Mike Marciniszyn Reviewed-by: Sebastian Sanchez Signed-off-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/exp_rcv.c | 4 ---- drivers/infiniband/hw/hfi1/init.c | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c index 08d13ed1b574..0af91675acc6 100644 --- a/drivers/infiniband/hw/hfi1/exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/exp_rcv.c @@ -69,10 +69,6 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd) struct tid_group *grp; int i; - hfi1_exp_tid_group_init(&rcd->tid_group_list); - hfi1_exp_tid_group_init(&rcd->tid_used_list); - hfi1_exp_tid_group_init(&rcd->tid_full_list); - tidbase = rcd->expected_base; for (i = 0; i < rcd->expected_count / dd->rcv_entries.group_size; i++) { diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 4a11d4da4c92..a00308ccf016 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -67,6 +67,7 @@ #include "aspm.h" #include "affinity.h" #include "vnic.h" +#include "exp_rcv.h" #undef pr_fmt #define pr_fmt(fmt) DRIVER_NAME ": " fmt @@ -221,6 +222,9 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, hfi1_cdbg(PROC, "setting up context %u\n", ctxt); INIT_LIST_HEAD(&rcd->qp_wait_list); + hfi1_exp_tid_group_init(&rcd->tid_group_list); + hfi1_exp_tid_group_init(&rcd->tid_used_list); + hfi1_exp_tid_group_init(&rcd->tid_full_list); rcd->ppd = ppd; rcd->dd = dd; __set_bit(0, rcd->in_use_ctxts); From f683c80ca68e087b55c6f9ab6ca6beb88ebc6d69 Mon Sep 17 00:00:00 2001 From: "Michael J. Ruhl" Date: Fri, 9 Jun 2017 16:00:19 -0700 Subject: [PATCH 0122/1795] IB/hfi1: Resolve kernel panics by reference counting receive contexts Base receive contexts can be used by sub contexts. Because of this, resources for the context cannot be completely freed until all sub contexts are done using the base context. Introduce a reference count so that the base receive context can be freed only when all sub contexts are done with it. Use the provided function call for setting default send context integrity rather than the manual method. The cleanup path does not set all variables back to NULL after freeing resources. Since the clean up code can get called more than once, (e.g. during context close and on the error path), it is necessary to make sure that all the variables are NULLed. Possible crash are: BUG: unable to handle kernel paging request at 0000000001908900 IP: read_csr+0x24/0x30 [hfi1] RIP: 0010:read_csr+0x24/0x30 [hfi1] Call Trace: sc_disable+0x40/0x110 [hfi1] hfi1_file_close+0x16f/0x360 [hfi1] __fput+0xe7/0x210 ____fput+0xe/0x10 or kernel BUG at mm/slub.c:3877! RIP: 0010:kfree+0x14f/0x170 Call Trace: hfi1_free_ctxtdata+0x19a/0x2b0 [hfi1] ? hfi1_user_exp_rcv_grp_free+0x73/0x80 [hfi1] hfi1_file_close+0x20f/0x360 [hfi1] __fput+0xe7/0x210 ____fput+0xe/0x10 Fixes: Commit 62239fc6e554 ("IB/hfi1: Clean up on context initialization failure") Reviewed-by: Mike Marciniszyn Reviewed-by: Sebastian Sanchez Signed-off-by: Michael J. Ruhl Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/file_ops.c | 39 ++++++++++------- drivers/infiniband/hw/hfi1/hfi.h | 11 +++-- drivers/infiniband/hw/hfi1/init.c | 58 +++++++++++++++++++++----- drivers/infiniband/hw/hfi1/vnic_main.c | 11 +++-- 4 files changed, 85 insertions(+), 34 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 2dd8758f0644..bbf80b1dd9d9 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -774,6 +774,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) *ev = 0; __clear_bit(fdata->subctxt, uctxt->in_use_ctxts); + fdata->uctxt = NULL; + hfi1_rcd_put(uctxt); /* fdata reference */ if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { mutex_unlock(&hfi1_mutex); goto done; @@ -794,16 +796,15 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) /* Clear the context's J_KEY */ hfi1_clear_ctxt_jkey(dd, uctxt->ctxt); /* - * Reset context integrity checks to default. - * (writes to CSRs probably belong in chip.c) + * If a send context is allocated, reset context integrity + * checks to default and disable the send context. */ - write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE, - hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type)); - sc_disable(uctxt->sc); + if (uctxt->sc) { + set_pio_integrity(uctxt->sc); + sc_disable(uctxt->sc); + } spin_unlock_irqrestore(&dd->uctxt_lock, flags); - dd->rcd[uctxt->ctxt] = NULL; - hfi1_free_ctxt_rcv_groups(uctxt); hfi1_clear_ctxt_pkey(dd, uctxt); @@ -816,8 +817,11 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) hfi1_stats.sps_ctxts--; if (++dd->freectxts == dd->num_user_contexts) aspm_enable_all(dd); + + /* _rcd_put() should be done after releasing mutex */ + dd->rcd[uctxt->ctxt] = NULL; mutex_unlock(&hfi1_mutex); - hfi1_free_ctxtdata(dd, uctxt); + hfi1_rcd_put(uctxt); /* dd reference */ done: mmdrop(fdata->mm); kobject_put(&dd->kobj); @@ -887,16 +891,17 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo) ret = wait_event_interruptible(fd->uctxt->wait, !test_bit( HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags)); - if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags)) { - clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); - return -ENOMEM; - } + if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags)) + ret = -ENOMEM; + /* The only thing a sub context needs is the user_xxx stuff */ if (!ret) ret = init_user_ctxt(fd); - if (ret) + if (ret) { clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); + hfi1_rcd_put(fd->uctxt); + } } else if (!ret) { ret = setup_base_ctxt(fd); if (fd->uctxt->subctxt_cnt) { @@ -961,6 +966,8 @@ static int find_sub_ctxt(struct hfi1_filedata *fd, fd->uctxt = uctxt; fd->subctxt = subctxt; + + hfi1_rcd_get(uctxt); __set_bit(fd->subctxt, uctxt->in_use_ctxts); return 1; @@ -1069,11 +1076,14 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd, aspm_disable_all(dd); fd->uctxt = uctxt; + /* Count the reference for the fd */ + hfi1_rcd_get(uctxt); + return 0; ctxdata_free: dd->rcd[ctxt] = NULL; - hfi1_free_ctxtdata(dd, uctxt); + hfi1_rcd_put(uctxt); return ret; } @@ -1273,6 +1283,7 @@ static int setup_base_ctxt(struct hfi1_filedata *fd) return 0; setup_failed: + /* Call _free_ctxtdata, not _rcd_put(). We still need the context. */ hfi1_free_ctxtdata(dd, uctxt); return ret; } diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index bca781c3b5ac..1a33a5087734 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -213,11 +213,9 @@ struct hfi1_ctxtdata { /* dynamic receive available interrupt timeout */ u32 rcvavail_timeout; - /* - * number of opens (including slave sub-contexts) on this instance - * (ignoring forks, dup, etc. for now) - */ - int cnt; + /* Reference count the base context usage */ + struct kref kref; + /* Device context index */ unsigned ctxt; /* @@ -1291,7 +1289,8 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, struct hfi1_devdata *dd, u8 hw_pidx, u8 port); void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd); - +int hfi1_rcd_put(struct hfi1_ctxtdata *rcd); +void hfi1_rcd_get(struct hfi1_ctxtdata *rcd); int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread); int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread); int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread); diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index a00308ccf016..dfdb4126ca05 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -191,15 +191,45 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) nomem: ret = -ENOMEM; - if (dd->rcd) { - for (i = 0; i < dd->num_rcv_contexts; ++i) - hfi1_free_ctxtdata(dd, dd->rcd[i]); - } + for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) + hfi1_rcd_put(dd->rcd[i]); + + /* All the contexts should be freed, free the array */ kfree(dd->rcd); dd->rcd = NULL; return ret; } +/* + * Helper routines for the receive context reference count (rcd and uctxt) + */ +static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd) +{ + kref_init(&rcd->kref); +} + +static void hfi1_rcd_free(struct kref *kref) +{ + struct hfi1_ctxtdata *rcd = + container_of(kref, struct hfi1_ctxtdata, kref); + + hfi1_free_ctxtdata(rcd->dd, rcd); + kfree(rcd); +} + +int hfi1_rcd_put(struct hfi1_ctxtdata *rcd) +{ + if (rcd) + return kref_put(&rcd->kref, hfi1_rcd_free); + + return 0; +} + +void hfi1_rcd_get(struct hfi1_ctxtdata *rcd) +{ + kref_get(&rcd->kref); +} + /* * Common code for user and kernel context setup. */ @@ -332,6 +362,8 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, if (!rcd->opstats) goto bail; } + + hfi1_rcd_init(rcd); } return rcd; bail: @@ -931,14 +963,11 @@ static void shutdown_device(struct hfi1_devdata *dd) * @rcd: the ctxtdata structure * * free up any allocated data for a context - * This should not touch anything that would affect a simultaneous - * re-allocation of context data, because it is called after hfi1_mutex - * is released (and can be called from reinit as well). * It should never change any chip state, or global driver state. */ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) { - unsigned e; + u32 e; if (!rcd) return; @@ -957,6 +986,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) /* all the RcvArray entries should have been cleared by now */ kfree(rcd->egrbufs.rcvtids); + rcd->egrbufs.rcvtids = NULL; for (e = 0; e < rcd->egrbufs.alloced; e++) { if (rcd->egrbufs.buffers[e].dma) @@ -966,13 +996,21 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) rcd->egrbufs.buffers[e].dma); } kfree(rcd->egrbufs.buffers); + rcd->egrbufs.alloced = 0; + rcd->egrbufs.buffers = NULL; sc_free(rcd->sc); + rcd->sc = NULL; + vfree(rcd->subctxt_uregbase); vfree(rcd->subctxt_rcvegrbuf); vfree(rcd->subctxt_rcvhdr_base); kfree(rcd->opstats); - kfree(rcd); + + rcd->subctxt_uregbase = NULL; + rcd->subctxt_rcvegrbuf = NULL; + rcd->subctxt_rcvhdr_base = NULL; + rcd->opstats = NULL; } /* @@ -1366,7 +1404,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) tmp[ctxt] = NULL; /* debugging paranoia */ if (rcd) { hfi1_clear_tids(rcd); - hfi1_free_ctxtdata(dd, rcd); + hfi1_rcd_put(rcd); } } kfree(tmp); diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c index b601c2929f8f..950c1b4df442 100644 --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@ -156,11 +156,11 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd, return ret; bail: /* - * hfi1_free_ctxtdata() also releases send_context - * structure if uctxt->sc is not null + * hfi1_rcd_put() will call hfi1_free_ctxtdata(), which will + * release send_context structure if uctxt->sc is not null */ dd->rcd[uctxt->ctxt] = NULL; - hfi1_free_ctxtdata(dd, uctxt); + hfi1_rcd_put(uctxt); dd_dev_dbg(dd, "vnic allocation failed. rc %d\n", ret); return ret; } @@ -208,7 +208,7 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd, hfi1_clear_ctxt_pkey(dd, uctxt); hfi1_stats.sps_ctxts--; - hfi1_free_ctxtdata(dd, uctxt); + hfi1_rcd_put(uctxt); } void hfi1_vnic_setup(struct hfi1_devdata *dd) @@ -751,6 +751,7 @@ static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo) rc = hfi1_vnic_allot_ctxt(dd, &dd->vnic.ctxt[i]); if (rc) break; + hfi1_rcd_get(dd->vnic.ctxt[i]); dd->vnic.ctxt[i]->vnic_q_idx = i; } @@ -762,6 +763,7 @@ static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo) */ while (i-- > dd->vnic.num_ctxt) { deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]); + hfi1_rcd_put(dd->vnic.ctxt[i]); dd->vnic.ctxt[i] = NULL; } goto alloc_fail; @@ -791,6 +793,7 @@ static void hfi1_vnic_deinit(struct hfi1_vnic_vport_info *vinfo) if (--dd->vnic.num_vports == 0) { for (i = 0; i < dd->vnic.num_ctxt; i++) { deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]); + hfi1_rcd_put(dd->vnic.ctxt[i]); dd->vnic.ctxt[i] = NULL; } hfi1_deinit_vnic_rsm(dd); From bc5214ee29220251e5507882696ded5ca183b169 Mon Sep 17 00:00:00 2001 From: Jan Sokolowski Date: Fri, 9 Jun 2017 16:00:26 -0700 Subject: [PATCH 0123/1795] IB/hfi1: Handle missing magic values in config file Driver does not check whether proper configuration file exist in EPROM, and treats empty partition as possible valid configuration, preventing fallback to default firmware. Change EPROM read function to treat missing magic number as read error. Reviewed-by: Jakub Byczkowski Signed-off-by: Jan Sokolowski Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/eprom.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c index 26da124c88e2..d46b17107901 100644 --- a/drivers/infiniband/hw/hfi1/eprom.c +++ b/drivers/infiniband/hw/hfi1/eprom.c @@ -250,7 +250,6 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data, { void *buffer; void *p; - u32 length; int ret; buffer = kmalloc(P1_SIZE, GFP_KERNEL); @@ -265,13 +264,13 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data, /* scan for image magic that may trail the actual data */ p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); - if (p) - length = p - buffer; - else - length = P1_SIZE; + if (!p) { + kfree(buffer); + return -ENOENT; + } *data = buffer; - *size = length; + *size = p - buffer; return 0; } From 330c422a8ee88d77407f65494f3bbf141a8d9453 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Tue, 20 Jun 2017 13:57:06 -0400 Subject: [PATCH 0124/1795] drm/core: Fail atomic IOCTL with no CRTC state but with signaling. Problem : While running IGT kms_atomic_transition test suite i encountered a hang in drmHandleEvent immediately following an atomic_commit. After dumping the atomic state I relized that in this case there was not even one CRTC attached to the state and only disabled planes. This probably due to a commit which hadn't changed any property which would require attaching crtc state. This means drmHandleEvent will never wake up from read since without CRTC in atomic state the event fd will not be signaled. Fix: Protect against this issue by failing atomic_commit early in drm_mode_atomic_commit where such probelm can be identified. v2: Fix typos and extra newlines. Change-Id: I3ee28ffae35fd1e8bfe553146c44da53da02e6f8 Signed-off-by: Andrey Grodzovsky Reviewed-by: Harry Wentland Acked-by: Daniel Vetter Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/1497981426-27203-1-git-send-email-Andrey.Grodzovsky@amd.com --- drivers/gpu/drm/drm_atomic.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index c0f336d23f9c..095e87278a88 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -2039,7 +2039,7 @@ static int prepare_crtc_signaling(struct drm_device *dev, { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; - int i, ret; + int i, c = 0, ret; if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) return 0; @@ -2100,8 +2100,17 @@ static int prepare_crtc_signaling(struct drm_device *dev, crtc_state->event->base.fence = fence; } + + c++; } + /* + * Having this flag means user mode pends on event which will never + * reach due to lack of at least one CRTC for signaling + */ + if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) + return -EINVAL; + return 0; } From 9cd90018ebd5b4f484300f2a5af804317d3428a1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 27 Jun 2017 16:25:10 +0100 Subject: [PATCH 0125/1795] drm/i915: Cancel pending execlists irq handler upon idling Due to the slight asynchronicity in handling the execlists interrupts (i.e. we defer the work to a handler that may consume more than one interrupt event), when the engine is idle we may still have an irq tasklet queued (especially when it has been deferred to a ksoftirqd). At the beginning of the tasklet, we assert that we do hold a device wakeref for the access we are about to perform. This assumes that when we idle and release the GT wakeref, all execlists work has been completed (since the elsp tracking says the hw is idle). However, there may still be a tasklet queued, so as we mark the engine idle, also cancel any pending tasklet. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170627152510.28589-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/intel_engine_cs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 3b46c1f7b88b..49e875c46c96 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1328,6 +1328,7 @@ void intel_engines_mark_idle(struct drm_i915_private *i915) for_each_engine(engine, i915, id) { intel_engine_disarm_breadcrumbs(engine); i915_gem_batch_pool_fini(&engine->batch_pool); + tasklet_kill(&engine->irq_tasklet); engine->no_priolist = false; } } From c544815a212f15dd787f0783720518178346f7d7 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 28 Jun 2017 11:11:41 +0200 Subject: [PATCH 0126/1795] drm/hdlcd: remove drm_vblank_cleanup, rise of the zoombies edition This was accidentally restored in commit de5cc8155cd250a31da67dea49aff7637ce98887 Author: Liviu Dudau Date: Tue Jun 6 15:05:21 2017 +0100 drm/arm: hdlcd: Set the CRTC's port before binding the encoder Fixes: de5cc8155cd2 ("drm/arm: hdlcd: Set the CRTC's port before binding the encoder.") Cc: Liviu Dudau Cc: Brian Starkey Cc: Mali DP Maintainers Acked-by: Liviu Dudau Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170628091141.14539-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/arm/hdlcd_drv.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index d3da87fbd85a..90bd97bf0013 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -343,7 +343,6 @@ err_register: } err_fbdev: drm_kms_helper_poll_fini(drm); - drm_vblank_cleanup(drm); err_vblank: pm_runtime_disable(drm->dev); err_pm_active: @@ -375,7 +374,6 @@ static void hdlcd_drm_unbind(struct device *dev) component_unbind_all(dev, drm); of_node_put(hdlcd->crtc.port); hdlcd->crtc.port = NULL; - drm_vblank_cleanup(drm); pm_runtime_get_sync(drm->dev); drm_irq_uninstall(drm); pm_runtime_put_sync(drm->dev); From b4164d66c4a2adf7beac7cd5e3f8cc5d06723d57 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 26 Jun 2017 18:19:49 +0200 Subject: [PATCH 0127/1795] drm/vblank: Unexport drm_vblank_cleanup There's no reason for drivers to call this, and all the ones I've removed looked very fishy: - Proper quiescenting of the vblank machinery should be done by calling drm_crtc_vblank_off(), which is best done by shutting down the entire display engine with drm_atomic_helper_shutdown. - Releasing of allocated memory is done by the core already, it calls drm_vblank_cleanup as a fallback. - drm_vblank_cleanup also has checks for drivers which forget to clean up vblank interrupts. This essentially reverts commit e77cef9c2d87db835ad9d70cde4a9b00b0ca2262 Author: Jerome Glisse Date: Thu Jan 7 15:39:13 2010 +0100 drm: Avoid calling vblank function is vblank wasn't initialized which was done to fix a bug in radeon code with msi interrupts: commit 003e69f9862bcda89a75c27750efdbc17ac02945 Author: Jerome Glisse Date: Thu Jan 7 15:39:14 2010 +0100 drm/radeon/kms: Don't try to enable IRQ if we have no handler installed Afaict from digging around in old code, this was needed to avoid blowing up in the ums fallback, and has stopped serving it's purpose long ago - if irq init fails, the driver fails to load, and there's really no way to blow up anymore. Long story short, this was most likely a small ums compat/fallback hack that became a thing of it's own and got cargo-cult duplicated all over the drm codebase for essentially no gain at all. v2: Mention that for drivers with a ->release callback cleanup is handled by drm_dev_fini() (Thierry). Cc: Thierry Reding Acked-by: Thierry Reding Cc: Jerome Glisse Reviewed-by: Sean Paul Acked-by: Alex Deucher Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170626161949.25629-2-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_internal.h | 1 + drivers/gpu/drm/drm_vblank.c | 19 ++----------------- include/drm/drm_vblank.h | 1 - 3 files changed, 3 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index f89371e920e6..068b685608cf 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -57,6 +57,7 @@ int drm_gem_name_info(struct seq_file *m, void *data); /* drm_vblank.c */ extern unsigned int drm_timestamp_monotonic; void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe); +void drm_vblank_cleanup(struct drm_device *dev); /* IOCTLS */ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 7e3f59182571..05d043e9219f 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -394,19 +394,6 @@ static void vblank_disable_fn(unsigned long arg) spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } -/** - * drm_vblank_cleanup - cleanup vblank support - * @dev: DRM device - * - * This function cleans up any resources allocated in drm_vblank_init(). It is - * called by the DRM core when @dev is finalized. - * - * Drivers can call drm_vblank_cleanup() if they need to quiescent the vblank - * interrupt in their unload code. But in general this should be handled by - * disabling all active &drm_crtc through e.g. drm_atomic_helper_shutdown, which - * should end up calling drm_crtc_vblank_off(). - * - */ void drm_vblank_cleanup(struct drm_device *dev) { unsigned int pipe; @@ -428,7 +415,6 @@ void drm_vblank_cleanup(struct drm_device *dev) dev->num_crtcs = 0; } -EXPORT_SYMBOL(drm_vblank_cleanup); /** * drm_vblank_init - initialize vblank support @@ -436,9 +422,8 @@ EXPORT_SYMBOL(drm_vblank_cleanup); * @num_crtcs: number of CRTCs supported by @dev * * This function initializes vblank support for @num_crtcs display pipelines. - * Drivers do not need to call drm_vblank_cleanup(), cleanup is already handled - * by the DRM core, or through calling drm_dev_fini() for drivers with a - * &drm_driver.release callback. + * Cleanup is handled by the DRM core, or through calling drm_dev_fini() for + * drivers with a &drm_driver.release callback. * * Returns: * Zero on success or a negative error code on failure. diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index 4ceef128582f..7fba9efe4951 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -168,7 +168,6 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); void drm_crtc_vblank_off(struct drm_crtc *crtc); void drm_crtc_vblank_reset(struct drm_crtc *crtc); void drm_crtc_vblank_on(struct drm_crtc *crtc); -void drm_vblank_cleanup(struct drm_device *dev); u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, From f3f63e6b5683ffb83eebc40f9768356a7beab6dc Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Jun 2017 00:16:14 +0300 Subject: [PATCH 0128/1795] drm: arcpgu: Remove CRTC .commit() helper operation The CRTC helper .commit() operation is legacy code, the atomic helpers prefer the .enable() operation. As the arcpgu driver implements the .enable() operation, .commit() is never used and can be removed. Signed-off-by: Laurent Pinchart Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170627211621.27767-1-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/arc/arcpgu_crtc.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index 611af74a31c0..51745608e09d 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -164,7 +164,6 @@ static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = { .enable = arc_pgu_crtc_enable, .disable = arc_pgu_crtc_disable, .prepare = arc_pgu_crtc_disable, - .commit = arc_pgu_crtc_enable, .atomic_begin = arc_pgu_crtc_atomic_begin, }; From b35954722c5e753d3e7170d71b0058c9fd2620e3 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Jun 2017 00:16:15 +0300 Subject: [PATCH 0129/1795] drm: arcpgu: Remove CRTC .prepare() helper operation The CRTC helper .prepare() operation is legacy code, the atomic helpers prefer the .disable() operation. As the arcpgu driver implements the .disable() and .prepare() operations identicallly, .prepare() can be removed. Signed-off-by: Laurent Pinchart Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170627211621.27767-2-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/arc/arcpgu_crtc.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index 51745608e09d..1f306781c9d5 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -163,7 +163,6 @@ static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = { .mode_set_nofb = arc_pgu_crtc_mode_set_nofb, .enable = arc_pgu_crtc_enable, .disable = arc_pgu_crtc_disable, - .prepare = arc_pgu_crtc_disable, .atomic_begin = arc_pgu_crtc_atomic_begin, }; From 4889b35d7fa1d211ff8246fae514800e07fb0099 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Jun 2017 00:16:16 +0300 Subject: [PATCH 0130/1795] drm: qxl: Remove unused CRTC .dpms() helper operation The CRTC .dpms() helper operation is called by the atomic helpers only when no .prepare(), .atomic_disable() or .disable() operation is provided. As the qxl driver provides a .disable() operation, the .dpms() operation is unused and can be removed. Signed-off-by: Laurent Pinchart Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170627211621.27767-3-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/qxl/qxl_display.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 72dcaab84bba..19ba336ce096 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -378,10 +378,6 @@ qxl_framebuffer_init(struct drm_device *dev, return 0; } -static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode) -{ -} - static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -467,7 +463,6 @@ static void qxl_crtc_disable(struct drm_crtc *crtc) } static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { - .dpms = qxl_crtc_dpms, .disable = qxl_crtc_disable, .mode_fixup = qxl_crtc_mode_fixup, .mode_set_nofb = qxl_mode_set_nofb, From 641164f23614ec3bcd551323e61a6e2707898d34 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Jun 2017 00:16:17 +0300 Subject: [PATCH 0131/1795] drm: qxl: Replace CRTC .commit() helper operation with .enable() The CRTC helper .commit() operation is legacy code, the atomic helpers prefer the .enable() operation. Replace the .commit() helper operation with .enable() in the driver. Signed-off-by: Laurent Pinchart Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170627211621.27767-4-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/qxl/qxl_display.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 19ba336ce096..7ede5f131a5c 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -447,7 +447,7 @@ static void qxl_mode_set_nofb(struct drm_crtc *crtc) } -static void qxl_crtc_commit(struct drm_crtc *crtc) +static void qxl_crtc_enable(struct drm_crtc *crtc) { DRM_DEBUG("\n"); } @@ -466,7 +466,7 @@ static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { .disable = qxl_crtc_disable, .mode_fixup = qxl_crtc_mode_fixup, .mode_set_nofb = qxl_mode_set_nofb, - .commit = qxl_crtc_commit, + .enable = qxl_crtc_enable, .atomic_flush = qxl_crtc_atomic_flush, }; From 4e004f644bc40b3d51653e91cc58af7c6b102f56 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Jun 2017 00:16:18 +0300 Subject: [PATCH 0132/1795] drm: vmwgfx: Remove unneeded CRTC .prepare() helper operation The CRTC .prepare() helper operation is part of the legacy helpers and is deprecated in favour of the .disable() helper operation. As the vmwgfx driver provides a .disable() helper operation, and as the .prepare() helper operation implementation is empty, we can remove it. Signed-off-by: Laurent Pinchart Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170627211621.27767-5-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 449ed4fba0f2..639e16703b80 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -202,18 +202,6 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc) { } -/** - * vmw_ldu_crtc_helper_prepare - Noop - * - * @crtc: CRTC associated with the new screen - * - * Prepares the CRTC for a mode set, but we don't need to do anything here. - * - */ -static void vmw_ldu_crtc_helper_prepare(struct drm_crtc *crtc) -{ -} - /** * vmw_ldu_crtc_helper_commit - Noop * @@ -388,7 +376,6 @@ drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = { }; static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = { - .prepare = vmw_ldu_crtc_helper_prepare, .commit = vmw_ldu_crtc_helper_commit, .disable = vmw_ldu_crtc_helper_disable, .mode_set_nofb = vmw_ldu_crtc_mode_set_nofb, From 4177b51e1949468c74a54dde99c775f5746fa11e Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Jun 2017 00:16:19 +0300 Subject: [PATCH 0133/1795] drm: vmwgfx: Replace CRTC .commit() helper operation with .enable() The CRTC helper .commit() operation is legacy code, the atomic helpers prefer the .enable() operation. Replace the .commit() helper operation with .enable() in the driver. Signed-off-by: Laurent Pinchart Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170627211621.27767-6-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 6 +++--- drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 6 +++--- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 639e16703b80..f8acd3a15523 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -203,7 +203,7 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc) } /** - * vmw_ldu_crtc_helper_commit - Noop + * vmw_ldu_crtc_helper_enable - Noop * * @crtc: CRTC associated with the new screen * @@ -212,7 +212,7 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc) * but since for LDU the display plane is closely tied to the * CRTC, it makes more sense to do those at plane update time. */ -static void vmw_ldu_crtc_helper_commit(struct drm_crtc *crtc) +static void vmw_ldu_crtc_helper_enable(struct drm_crtc *crtc) { } @@ -376,7 +376,7 @@ drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = { }; static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = { - .commit = vmw_ldu_crtc_helper_commit, + .enable = vmw_ldu_crtc_helper_enable, .disable = vmw_ldu_crtc_helper_disable, .mode_set_nofb = vmw_ldu_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 3b917c9b0c21..1cb826c503bf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -270,13 +270,13 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc) } /** - * vmw_sou_crtc_helper_commit - Noop + * vmw_sou_crtc_helper_enable - Noop * * @crtc: CRTC associated with the new screen * * This is called after a mode set has been completed. */ -static void vmw_sou_crtc_helper_commit(struct drm_crtc *crtc) +static void vmw_sou_crtc_helper_enable(struct drm_crtc *crtc) { } @@ -573,7 +573,7 @@ drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = { static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = { .prepare = vmw_sou_crtc_helper_prepare, - .commit = vmw_sou_crtc_helper_commit, + .enable = vmw_sou_crtc_helper_enable, .disable = vmw_sou_crtc_helper_disable, .mode_set_nofb = vmw_sou_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 6aecba6cd5e2..4eb93b47d6db 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -412,7 +412,7 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc) } -static void vmw_stdu_crtc_helper_commit(struct drm_crtc *crtc) +static void vmw_stdu_crtc_helper_enable(struct drm_crtc *crtc) { struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; @@ -1415,7 +1415,7 @@ drm_plane_helper_funcs vmw_stdu_primary_plane_helper_funcs = { static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = { .prepare = vmw_stdu_crtc_helper_prepare, - .commit = vmw_stdu_crtc_helper_commit, + .enable = vmw_stdu_crtc_helper_enable, .disable = vmw_stdu_crtc_helper_disable, .mode_set_nofb = vmw_stdu_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, From 13f8458f9a4522bcb7d1856dd8b329ff5d90f887 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 27 Jun 2017 18:37:31 +0100 Subject: [PATCH 0134/1795] drm/i915: Drop flushing of the object free list/worker from i915_gem_suspend i915_gem_suspend() is called from all of our finalization paths (suspend, hibernate, unload). i915_gem_drain_freed_objects() adds an arbitrary delay as it uses an rcu_barrier() to ensure that there are no more freed objects in flight, and this delay causes a large amount of variability in suspend timings. For S3 suspend, we do not need to free pages as doing so does not impact at all upon the system in its suspended state, unlike S4 hibernation where we do want the hibernation image to be as small as possible. Therefore we can forgo waiting inside i915_gem_suspend(), so long as we ensure that we do cleanup before unload (see i915_gem_load_cleanup()) and prefer to reap our objects prior to hibernation (see i915_gem_freeze()). Removing the rcu_barrier() from i915_gem_suspend() improves S3 latency by about 30ms on Skylake (ymmv). Reported-by: David Weinehall Signed-off-by: Chris Wilson Cc: David Weinehall Link: http://patchwork.freedesktop.org/patch/msgid/20170627173731.11566-1-chris@chris-wilson.co.uk Tested-by: David Weinehall Reviewed-by: David Weinehall --- drivers/gpu/drm/i915/i915_gem.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 36d838677982..f38c84e485ab 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4597,8 +4597,6 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) while (flush_delayed_work(&dev_priv->gt.idle_work)) ; - i915_gem_drain_freed_objects(dev_priv); - /* Assert that we sucessfully flushed all the work and * reset the GPU back to its idle, low power state. */ From 7b92c1bd0540b64f54d98331d67e57266f9343c4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 28 Jun 2017 13:35:48 +0100 Subject: [PATCH 0135/1795] drm/i915: Avoid keeping waitboost active for signaling threads MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Once a client has requested a waitboost, we keep that waitboost active until all clients are no longer waiting. This is because we don't distinguish which waiter deserves the boost. However, with the advent of fence signaling, the signaler threads appear as waiters to the RPS interrupt handler. So instead of using a single boolean to track when to keep the waitboost active, use a counter of all outstanding waitboosted requests. At this point, I have removed all vestiges of the rate limiting on clients. Whilst this means that compositors should remain more fluid, it also means that boosts are more prevalent. See commit b29c19b64528 ("drm/i915: Boost RPS frequency for CPU stalls") for a longer discussion on the pros and cons of both approaches. A drawback of this implementation is that it requires constant request submission to keep the waitboost trimmed (as it is now cancelled when the request is completed). This will be fine for a busy system, but near idle the boosts may be kept for longer than desired (effectively tens of vblanks worstcase) and there is a reliance on rc6 instead. v2: Remove defunct rps.client_lock Reported-by: Michał Winiarski Signed-off-by: Chris Wilson Cc: Michał Winiarski Reviewed-by: Michał Winiarski Link: http://patchwork.freedesktop.org/patch/msgid/20170628123548.9236-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 12 +++--- drivers/gpu/drm/i915/i915_drv.h | 10 ++--- drivers/gpu/drm/i915/i915_gem.c | 25 +---------- drivers/gpu/drm/i915/i915_gem_request.c | 7 ++- drivers/gpu/drm/i915/i915_gem_request.h | 2 + drivers/gpu/drm/i915/i915_irq.c | 18 ++------ drivers/gpu/drm/i915/intel_drv.h | 5 +-- drivers/gpu/drm/i915/intel_pm.c | 57 ++++++++++--------------- 8 files changed, 45 insertions(+), 91 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f7aa6cbe3a2e..580bd4f4a49e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2327,6 +2327,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) seq_printf(m, "GPU busy? %s [%d requests]\n", yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); + seq_printf(m, "Boosts outstanding? %d\n", + atomic_read(&dev_priv->rps.num_waiters)); seq_printf(m, "Frequency requested %d\n", intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", @@ -2340,22 +2342,20 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq)); mutex_lock(&dev->filelist_mutex); - spin_lock(&dev_priv->rps.client_lock); list_for_each_entry_reverse(file, &dev->filelist, lhead) { struct drm_i915_file_private *file_priv = file->driver_priv; struct task_struct *task; rcu_read_lock(); task = pid_task(file->pid, PIDTYPE_PID); - seq_printf(m, "%s [%d]: %d boosts%s\n", + seq_printf(m, "%s [%d]: %d boosts\n", task ? task->comm : "", task ? task->pid : -1, - file_priv->rps.boosts, - list_empty(&file_priv->rps.link) ? "" : ", active"); + atomic_read(&file_priv->rps.boosts)); rcu_read_unlock(); } - seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts); - spin_unlock(&dev_priv->rps.client_lock); + seq_printf(m, "Kernel (anonymous) boosts: %d\n", + atomic_read(&dev_priv->rps.boosts)); mutex_unlock(&dev->filelist_mutex); if (INTEL_GEN(dev_priv) >= 6 && diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5524e6fc9476..ddb5ee8e0535 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -584,8 +584,7 @@ struct drm_i915_file_private { struct idr context_idr; struct intel_rps_client { - struct list_head link; - unsigned boosts; + atomic_t boosts; } rps; unsigned int bsd_engine; @@ -1302,13 +1301,10 @@ struct intel_gen6_power_mgmt { int last_adj; enum { LOW_POWER, BETWEEN, HIGH_POWER } power; - spinlock_t client_lock; - struct list_head clients; - bool client_boost; - bool enabled; struct delayed_work autoenable_work; - unsigned boosts; + atomic_t num_waiters; + atomic_t boosts; /* manual wa residency calculations */ struct intel_rps_ei ei; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f38c84e485ab..1b2dfa8bdeef 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -388,7 +388,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence, */ if (rps) { if (INTEL_GEN(rq->i915) >= 6) - gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies); + gen6_rps_boost(rq, rps); else rps = NULL; } @@ -399,22 +399,6 @@ out: if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq)) i915_gem_request_retire_upto(rq); - if (rps && i915_gem_request_global_seqno(rq) == intel_engine_last_submit(rq->engine)) { - /* The GPU is now idle and this client has stalled. - * Since no other client has submitted a request in the - * meantime, assume that this client is the only one - * supplying work to the GPU but is unable to keep that - * work supplied because it is waiting. Since the GPU is - * then never kept fully busy, RPS autoclocking will - * keep the clocks relatively low, causing further delays. - * Compensate by giving the synchronous client credit for - * a waitboost next time. - */ - spin_lock(&rq->i915->rps.client_lock); - list_del_init(&rps->link); - spin_unlock(&rq->i915->rps.client_lock); - } - return timeout; } @@ -5053,12 +5037,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) list_for_each_entry(request, &file_priv->mm.request_list, client_link) request->file_priv = NULL; spin_unlock(&file_priv->mm.lock); - - if (!list_empty(&file_priv->rps.link)) { - spin_lock(&to_i915(dev)->rps.client_lock); - list_del(&file_priv->rps.link); - spin_unlock(&to_i915(dev)->rps.client_lock); - } } int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) @@ -5075,7 +5053,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) file->driver_priv = file_priv; file_priv->dev_priv = i915; file_priv->file = file; - INIT_LIST_HEAD(&file_priv->rps.link); spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 8c59c79cbd8b..483af8921060 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -384,7 +384,11 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) engine->context_unpin(engine, engine->last_retired_context); engine->last_retired_context = request->ctx; - dma_fence_signal(&request->fence); + spin_lock_irq(&request->lock); + if (request->waitboost) + atomic_dec(&request->i915->rps.num_waiters); + dma_fence_signal_locked(&request->fence); + spin_unlock_irq(&request->lock); i915_priotree_fini(request->i915, &request->priotree); i915_gem_request_put(request); @@ -639,6 +643,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, req->file_priv = NULL; req->batch = NULL; req->capture_list = NULL; + req->waitboost = false; /* * Reserve space in the ring buffer for all the commands required to diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 7b7c84369d78..604e131470a1 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -184,6 +184,8 @@ struct drm_i915_gem_request { /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; + bool waitboost; + /** engine->request_list entry for this request */ struct list_head link; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e4934d5adc9e..1d33cea01a1b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1091,18 +1091,6 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) return events; } -static bool any_waiters(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, dev_priv, id) - if (intel_engine_has_waiter(engine)) - return true; - - return false; -} - static void gen6_pm_rps_work(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -1114,7 +1102,7 @@ static void gen6_pm_rps_work(struct work_struct *work) spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->rps.interrupts_enabled) { pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir); - client_boost = fetch_and_zero(&dev_priv->rps.client_boost); + client_boost = atomic_read(&dev_priv->rps.num_waiters); } spin_unlock_irq(&dev_priv->irq_lock); @@ -1131,7 +1119,7 @@ static void gen6_pm_rps_work(struct work_struct *work) new_delay = dev_priv->rps.cur_freq; min = dev_priv->rps.min_freq_softlimit; max = dev_priv->rps.max_freq_softlimit; - if (client_boost || any_waiters(dev_priv)) + if (client_boost) max = dev_priv->rps.max_freq; if (client_boost && new_delay < dev_priv->rps.boost_freq) { new_delay = dev_priv->rps.boost_freq; @@ -1144,7 +1132,7 @@ static void gen6_pm_rps_work(struct work_struct *work) if (new_delay >= dev_priv->rps.max_freq_softlimit) adj = 0; - } else if (client_boost || any_waiters(dev_priv)) { + } else if (client_boost) { adj = 0; } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 4d3982bb596a..f73474a22b6b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1869,9 +1869,8 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); -void gen6_rps_boost(struct drm_i915_private *dev_priv, - struct intel_rps_client *rps, - unsigned long submitted); +void gen6_rps_boost(struct drm_i915_gem_request *rq, + struct intel_rps_client *rps); void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req); void g4x_wm_get_hw_state(struct drm_device *dev); void vlv_wm_get_hw_state(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 48ea0fca1f72..c3fcadfa0ae7 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6126,47 +6126,35 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) gen6_sanitize_rps_pm_mask(dev_priv, ~0)); } mutex_unlock(&dev_priv->rps.hw_lock); - - spin_lock(&dev_priv->rps.client_lock); - while (!list_empty(&dev_priv->rps.clients)) - list_del_init(dev_priv->rps.clients.next); - spin_unlock(&dev_priv->rps.client_lock); } -void gen6_rps_boost(struct drm_i915_private *dev_priv, - struct intel_rps_client *rps, - unsigned long submitted) +void gen6_rps_boost(struct drm_i915_gem_request *rq, + struct intel_rps_client *rps) { + struct drm_i915_private *i915 = rq->i915; + bool boost; + /* This is intentionally racy! We peek at the state here, then * validate inside the RPS worker. */ - if (!(dev_priv->gt.awake && - dev_priv->rps.enabled && - dev_priv->rps.cur_freq < dev_priv->rps.boost_freq)) + if (!i915->rps.enabled) return; - /* Force a RPS boost (and don't count it against the client) if - * the GPU is severely congested. - */ - if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES)) - rps = NULL; - - spin_lock(&dev_priv->rps.client_lock); - if (rps == NULL || list_empty(&rps->link)) { - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->rps.interrupts_enabled) { - dev_priv->rps.client_boost = true; - schedule_work(&dev_priv->rps.work); - } - spin_unlock_irq(&dev_priv->irq_lock); - - if (rps != NULL) { - list_add(&rps->link, &dev_priv->rps.clients); - rps->boosts++; - } else - dev_priv->rps.boosts++; + boost = false; + spin_lock_irq(&rq->lock); + if (!rq->waitboost && !i915_gem_request_completed(rq)) { + atomic_inc(&i915->rps.num_waiters); + rq->waitboost = true; + boost = true; } - spin_unlock(&dev_priv->rps.client_lock); + spin_unlock_irq(&rq->lock); + if (!boost) + return; + + if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq) + schedule_work(&i915->rps.work); + + atomic_inc(rps ? &rps->boosts : &i915->rps.boosts); } int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) @@ -9113,7 +9101,7 @@ static void __intel_rps_boost_work(struct work_struct *work) struct drm_i915_gem_request *req = boost->req; if (!i915_gem_request_completed(req)) - gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); + gen6_rps_boost(req, NULL); i915_gem_request_put(req); kfree(boost); @@ -9142,11 +9130,10 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) void intel_pm_setup(struct drm_i915_private *dev_priv) { mutex_init(&dev_priv->rps.hw_lock); - spin_lock_init(&dev_priv->rps.client_lock); INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, __intel_autoenable_gt_powersave); - INIT_LIST_HEAD(&dev_priv->rps.clients); + atomic_set(&dev_priv->rps.num_waiters, 0); dev_priv->pm.suspended = false; atomic_set(&dev_priv->pm.wakeref_count, 0); From faa8b0b7534aabdc8095eb05310f04eff4024b36 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 28 Jun 2017 15:41:01 +0300 Subject: [PATCH 0136/1795] drm/gma500: remove an unneeded NULL check "connector" is the list iterator and it can't be NULL. It causes a static checker warning because we dereference the iterator to get the next item in the list. Let's remove this check. Signed-off-by: Dan Carpenter Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170628124100.3pw2gyitsfopaib5@mwanda --- drivers/gpu/drm/gma500/mdfld_intel_display.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index 63c6e08600ae..531e4450c000 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -737,11 +737,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, sizeof(struct drm_display_mode)); list_for_each_entry(connector, &mode_config->connector_list, head) { - if (!connector) - continue; - encoder = connector->encoder; - if (!encoder) continue; From 886015a0ad43c7fc034b23ea4614ba39162f9ddd Mon Sep 17 00:00:00 2001 From: Gabriel Krisman Bertazi Date: Wed, 28 Jun 2017 18:06:05 -0300 Subject: [PATCH 0137/1795] drm/i915: reintroduce VLV/CHV PFI programming power domain workaround MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are still cases on these platforms where an attempt is made to configure the CDCLK while the power domain is off, like when coming back from a suspend. So the workaround below is still needed. This effectively reverts commit 63ff30442519 ("drm/i915: Nuke the VLV/CHV PFI programming power domain workaround"). Cc: stable@vger.kernel.org Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101517 Suggested-by: Ville Syrjälä Signed-off-by: Gabriel Krisman Bertazi Link: http://patchwork.freedesktop.org/patch/msgid/20170628210605.4994-1-krisman@collabora.co.uk Reviewed-by: Ville Syrjälä Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_cdclk.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index b8914db7d2e1..1241e5891b29 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -491,6 +491,14 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, int cdclk = cdclk_state->cdclk; u32 val, cmd; + /* There are cases where we can end up here with power domains + * off and a CDCLK frequency other than the minimum, like when + * issuing a modeset without actually changing any display after + * a system suspend. So grab the PIPE-A domain, which covers + * the HW blocks needed for the following programming. + */ + intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ cmd = 2; else if (cdclk == 266667) @@ -549,6 +557,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, intel_update_cdclk(dev_priv); vlv_program_pfi_credits(dev_priv); + + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); } static void chv_set_cdclk(struct drm_i915_private *dev_priv, @@ -568,6 +578,14 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, return; } + /* There are cases where we can end up here with power domains + * off and a CDCLK frequency other than the minimum, like when + * issuing a modeset without actually changing any display after + * a system suspend. So grab the PIPE-A domain, which covers + * the HW blocks needed for the following programming. + */ + intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + /* * Specs are full of misinformation, but testing on actual * hardware has shown that we just need to write the desired @@ -590,6 +608,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, intel_update_cdclk(dev_priv); vlv_program_pfi_credits(dev_priv); + + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); } static int bdw_calc_cdclk(int max_pixclk) From 4d470f7359c4bf22518baa30700ad45649371a22 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 29 Jun 2017 16:04:25 +0100 Subject: [PATCH 0138/1795] drm/i915: Avoid undefined behaviour of "u32 >> 32" When computing a hash for looking up relocation target handles in an execbuf, we start with a large size for the hashtable and proceed to halve it until the allocation succeeds. The final attempt is with an order of 0 (i.e. a single element). This means that we then pass bits=0 to hash_32() which then computes "hash >> (32 - 0)" to lookup the single element. Right shifting a value by the width of the operand is undefined, so limit the smallest hash table we use to order 1. v2: Keep the retry allocation flag for the final pass Fixes: 4ff4b44cbb70 ("drm/i915: Store a direct lookup from object handle to vma") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170629150425.27508-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 38 ++++++++++++++-------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index ec33b358fba9..929f275e67aa 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -288,20 +288,26 @@ static int eb_create(struct i915_execbuffer *eb) * direct lookup. */ do { + unsigned int flags; + + /* While we can still reduce the allocation size, don't + * raise a warning and allow the allocation to fail. + * On the last pass though, we want to try as hard + * as possible to perform the allocation and warn + * if it fails. + */ + flags = GFP_TEMPORARY; + if (size > 1) + flags |= __GFP_NORETRY | __GFP_NOWARN; + eb->buckets = kzalloc(sizeof(struct hlist_head) << size, - GFP_TEMPORARY | - __GFP_NORETRY | - __GFP_NOWARN); + flags); if (eb->buckets) break; } while (--size); - if (unlikely(!eb->buckets)) { - eb->buckets = kzalloc(sizeof(struct hlist_head), - GFP_TEMPORARY); - if (unlikely(!eb->buckets)) - return -ENOMEM; - } + if (unlikely(!size)) + return -ENOMEM; eb->lut_size = size; } else { @@ -452,7 +458,7 @@ eb_add_vma(struct i915_execbuffer *eb, return err; } - if (eb->lut_size >= 0) { + if (eb->lut_size > 0) { vma->exec_handle = entry->handle; hlist_add_head(&vma->exec_node, &eb->buckets[hash_32(entry->handle, @@ -895,7 +901,7 @@ static void eb_release_vmas(const struct i915_execbuffer *eb) static void eb_reset_vmas(const struct i915_execbuffer *eb) { eb_release_vmas(eb); - if (eb->lut_size >= 0) + if (eb->lut_size > 0) memset(eb->buckets, 0, sizeof(struct hlist_head) << eb->lut_size); } @@ -904,7 +910,7 @@ static void eb_destroy(const struct i915_execbuffer *eb) { GEM_BUG_ON(eb->reloc_cache.rq); - if (eb->lut_size >= 0) + if (eb->lut_size > 0) kfree(eb->buckets); } @@ -2180,8 +2186,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, } } - if (eb_create(&eb)) - return -ENOMEM; + err = eb_create(&eb); + if (err) + goto err_out_fence; + + GEM_BUG_ON(!eb.lut_size); err = eb_select_context(&eb); if (unlikely(err)) @@ -2341,6 +2350,7 @@ err_rpm: i915_gem_context_put(eb.ctx); err_destroy: eb_destroy(&eb); +err_out_fence: if (out_fence_fd != -1) put_unused_fd(out_fence_fd); err_in_fence: From 98eed3d1ade53596e1c8785e049f03da4480a820 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Mon, 19 Jun 2017 14:21:47 -0700 Subject: [PATCH 0139/1795] drm/i915/cfl: Fix Workarounds. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During the review of Coffee Lake workarounds Mika pointed out that WaDisableKillLogic and GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC should be removed from CFL and with that I should carry the rv-b. However when doing the v2 I removed another Workaround that should remain because although not mentioned by spec the history of hangs around it advocates on its favor. On some follow-up patches I continued operating on the wrong workardound, but Ville noticed that, so here is the fix for the current CFL code that is upstream already. Fixes: 46c26662d2f ("drm/i915/cfl: Introduce Coffee Lake workarounds.") Cc: Ville Syrjälä Cc: Dhinakaran Pandiyan Cc: Mika Kuoppala Signed-off-by: Rodrigo Vivi Reviewed-by: Dhinakaran Pandiyan --- drivers/gpu/drm/i915/intel_engine_cs.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 49e875c46c96..a55cd72aeeff 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -809,9 +809,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); - /* WaDisableKillLogic:bxt,skl,kbl,cfl */ - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | - ECOCHK_DIS_TLB); + /* WaDisableKillLogic:bxt,skl,kbl */ + if (!IS_COFFEELAKE(dev_priv)) + I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | + ECOCHK_DIS_TLB); /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ @@ -882,10 +883,9 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FORCE_NON_COHERENT); - /* WaDisableHDCInvalidation:skl,bxt,kbl */ - if (!IS_COFFEELAKE(dev_priv)) - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | - BDW_DISABLE_HDC_INVALIDATION); + /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ + I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | + BDW_DISABLE_HDC_INVALIDATION); /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ if (IS_SKYLAKE(dev_priv) || From 8111477663813caa1a4469cfe6afaae36cd04513 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 29 Jun 2017 13:59:24 +0100 Subject: [PATCH 0140/1795] dma-buf/dma-fence: Extract __dma_fence_is_later() Often we have the task of comparing two seqno known to be on the same context, so provide a common __dma_fence_is_later(). Signed-off-by: Chris Wilson Cc: Sumit Semwal Cc: Sean Paul Cc: Gustavo Padovan Reviewed-by: Sean Paul Signed-off-by: Gustavo Padovan Link: http://patchwork.freedesktop.org/patch/msgid/20170629125930.821-1-chris@chris-wilson.co.uk --- include/linux/dma-fence.h | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index a5195a7d6f77..ac5987989e9a 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -335,6 +335,19 @@ dma_fence_is_signaled(struct dma_fence *fence) return false; } +/** + * __dma_fence_is_later - return if f1 is chronologically later than f2 + * @f1: [in] the first fence's seqno + * @f2: [in] the second fence's seqno from the same context + * + * Returns true if f1 is chronologically later than f2. Both fences must be + * from the same context, since a seqno is not common across contexts. + */ +static inline bool __dma_fence_is_later(u32 f1, u32 f2) +{ + return (int)(f1 - f2) > 0; +} + /** * dma_fence_is_later - return if f1 is chronologically later than f2 * @f1: [in] the first fence from the same context @@ -349,7 +362,7 @@ static inline bool dma_fence_is_later(struct dma_fence *f1, if (WARN_ON(f1->context != f2->context)) return false; - return (int)(f1->seqno - f2->seqno) > 0; + return __dma_fence_is_later(f1->seqno, f2->seqno); } /** From 61894b02716f122dd7662d5d89f5b2245ca551e2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 29 Jun 2017 13:59:25 +0100 Subject: [PATCH 0141/1795] dma-buf/sw-sync: Fix the is-signaled test to handle u32 wraparound Use the canonical __dma_fence_is_later() to compare the fence seqno against the timeline seqno to check if the fence is signaled. Signed-off-by: Chris Wilson Cc: Sumit Semwal Cc: Sean Paul Cc: Gustavo Padovan Reviewed-by: Sean Paul Signed-off-by: Gustavo Padovan Link: http://patchwork.freedesktop.org/patch/msgid/20170629125930.821-2-chris@chris-wilson.co.uk --- drivers/dma-buf/sw_sync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 69c5ff36e2f9..4d5d8c5e2534 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -219,7 +219,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence) { struct sync_timeline *parent = dma_fence_parent(fence); - return (fence->seqno > parent->value) ? false : true; + return !__dma_fence_is_later(fence->seqno, parent->value); } static bool timeline_fence_enable_signaling(struct dma_fence *fence) From 8f66d3aa1735bc95ae58d846a157357e8d41abb8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 29 Jun 2017 13:59:26 +0100 Subject: [PATCH 0142/1795] dma-buf/sw-sync: Prevent user overflow on timeline advance The timeline is u32, which limits any single advance to INT_MAX so that we can detect all fences that need signaling. Signed-off-by: Chris Wilson Cc: Sumit Semwal Cc: Sean Paul Cc: Gustavo Padovan Reviewed-by: Sean Paul Signed-off-by: Gustavo Padovan Link: http://patchwork.freedesktop.org/patch/msgid/20170629125930.821-3-chris@chris-wilson.co.uk --- drivers/dma-buf/sw_sync.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 4d5d8c5e2534..0e676d08aa70 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -345,6 +345,11 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg) if (copy_from_user(&value, (void __user *)arg, sizeof(value))) return -EFAULT; + while (value > INT_MAX) { + sync_timeline_signal(obj, INT_MAX); + value -= INT_MAX; + } + sync_timeline_signal(obj, value); return 0; From a6aa8fca4d792c72947e341d7842d2f700534335 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 29 Jun 2017 13:59:27 +0100 Subject: [PATCH 0143/1795] dma-buf/sw-sync: Reduce irqsave/irqrestore from known context If we know the context under which we are called, then we can use the simpler form of spin_lock_irq (saving the save/restore). Signed-off-by: Chris Wilson Cc: Sumit Semwal Cc: Sean Paul Cc: Gustavo Padovan Reviewed-by: Sean Paul Signed-off-by: Gustavo Padovan Link: http://patchwork.freedesktop.org/patch/msgid/20170629125930.821-4-chris@chris-wilson.co.uk --- drivers/dma-buf/sw_sync.c | 15 +++++++++------ drivers/dma-buf/sync_debug.c | 14 ++++++-------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 0e676d08aa70..fc733621987d 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -135,12 +135,11 @@ static void sync_timeline_put(struct sync_timeline *obj) */ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) { - unsigned long flags; struct sync_pt *pt, *next; trace_sync_timeline(obj); - spin_lock_irqsave(&obj->child_list_lock, flags); + spin_lock_irq(&obj->child_list_lock); obj->value += inc; @@ -150,7 +149,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) list_del_init(&pt->active_list); } - spin_unlock_irqrestore(&obj->child_list_lock, flags); + spin_unlock_irq(&obj->child_list_lock); } /** @@ -167,7 +166,6 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size, unsigned int value) { - unsigned long flags; struct sync_pt *pt; if (size < sizeof(*pt)) @@ -177,13 +175,16 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size, if (!pt) return NULL; - spin_lock_irqsave(&obj->child_list_lock, flags); + spin_lock_irq(&obj->child_list_lock); + sync_timeline_get(obj); dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock, obj->context, value); list_add_tail(&pt->child_list, &obj->child_list_head); INIT_LIST_HEAD(&pt->active_list); - spin_unlock_irqrestore(&obj->child_list_lock, flags); + + spin_unlock_irq(&obj->child_list_lock); + return pt; } @@ -206,9 +207,11 @@ static void timeline_fence_release(struct dma_fence *fence) unsigned long flags; spin_lock_irqsave(fence->lock, flags); + list_del(&pt->child_list); if (!list_empty(&pt->active_list)) list_del(&pt->active_list); + spin_unlock_irqrestore(fence->lock, flags); sync_timeline_put(parent); diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 82a6e7f6d37f..0e91632248ba 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -116,17 +116,16 @@ static void sync_print_fence(struct seq_file *s, static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) { struct list_head *pos; - unsigned long flags; seq_printf(s, "%s: %d\n", obj->name, obj->value); - spin_lock_irqsave(&obj->child_list_lock, flags); + spin_lock_irq(&obj->child_list_lock); list_for_each(pos, &obj->child_list_head) { struct sync_pt *pt = container_of(pos, struct sync_pt, child_list); sync_print_fence(s, &pt->base, false); } - spin_unlock_irqrestore(&obj->child_list_lock, flags); + spin_unlock_irq(&obj->child_list_lock); } static void sync_print_sync_file(struct seq_file *s, @@ -151,12 +150,11 @@ static void sync_print_sync_file(struct seq_file *s, static int sync_debugfs_show(struct seq_file *s, void *unused) { - unsigned long flags; struct list_head *pos; seq_puts(s, "objs:\n--------------\n"); - spin_lock_irqsave(&sync_timeline_list_lock, flags); + spin_lock_irq(&sync_timeline_list_lock); list_for_each(pos, &sync_timeline_list_head) { struct sync_timeline *obj = container_of(pos, struct sync_timeline, @@ -165,11 +163,11 @@ static int sync_debugfs_show(struct seq_file *s, void *unused) sync_print_obj(s, obj); seq_putc(s, '\n'); } - spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + spin_unlock_irq(&sync_timeline_list_lock); seq_puts(s, "fences:\n--------------\n"); - spin_lock_irqsave(&sync_file_list_lock, flags); + spin_lock_irq(&sync_file_list_lock); list_for_each(pos, &sync_file_list_head) { struct sync_file *sync_file = container_of(pos, struct sync_file, sync_file_list); @@ -177,7 +175,7 @@ static int sync_debugfs_show(struct seq_file *s, void *unused) sync_print_sync_file(s, sync_file); seq_putc(s, '\n'); } - spin_unlock_irqrestore(&sync_file_list_lock, flags); + spin_unlock_irq(&sync_file_list_lock); return 0; } From 3b52ce44e720c240afc4c4b03140d7b7811b23bd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 29 Jun 2017 13:59:28 +0100 Subject: [PATCH 0144/1795] dma-buf/sw-sync: sync_pt is private and of fixed size Since sync_pt is only allocated from a single location and is no longer the base class for fences (that is struct dma_fence) it no longer needs a generic unsized allocator. Signed-off-by: Chris Wilson Cc: Sumit Semwal Cc: Sean Paul Cc: Gustavo Padovan Reviewed-by: Sean Paul Signed-off-by: Gustavo Padovan Link: http://patchwork.freedesktop.org/patch/msgid/20170629125930.821-5-chris@chris-wilson.co.uk --- drivers/dma-buf/sw_sync.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index fc733621987d..6effa1ce010e 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -155,7 +155,6 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) /** * sync_pt_create() - creates a sync pt * @parent: fence's parent sync_timeline - * @size: size to allocate for this pt * @inc: value of the fence * * Creates a new sync_pt as a child of @parent. @size bytes will be @@ -163,15 +162,12 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) * the generic sync_timeline struct. Returns the sync_pt object or * NULL in case of error. */ -static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size, - unsigned int value) +static struct sync_pt *sync_pt_create(struct sync_timeline *obj, + unsigned int value) { struct sync_pt *pt; - if (size < sizeof(*pt)) - return NULL; - - pt = kzalloc(size, GFP_KERNEL); + pt = kzalloc(sizeof(*pt), GFP_KERNEL); if (!pt) return NULL; @@ -312,7 +308,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj, goto err; } - pt = sync_pt_create(obj, sizeof(*pt), data.value); + pt = sync_pt_create(obj, data.value); if (!pt) { err = -ENOMEM; goto err; From d3862e44daa7a0c94d2f6193502a8c49379acfce Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 29 Jun 2017 22:05:32 +0100 Subject: [PATCH 0145/1795] dma-buf/sw-sync: Fix locking around sync_timeline lists The sync_pt were not adding themselves atomically to the timeline lists, corruption imminent. Only a single list is required to track the unsignaled sync_pt, so reduce it and rename the lock more appropriately along with using idiomatic names to distinguish a list from links along it. v2: Prevent spinlock recursion on free during create (next patch) and fixup crossref in kerneldoc Signed-off-by: Chris Wilson Cc: Sumit Semwal Cc: Sean Paul Cc: Gustavo Padovan Reviewed-by: Sean Paul Signed-off-by: Gustavo Padovan Link: http://patchwork.freedesktop.org/patch/msgid/20170629210532.5617-1-chris@chris-wilson.co.uk --- drivers/dma-buf/sw_sync.c | 48 ++++++++++++++---------------------- drivers/dma-buf/sync_debug.c | 9 +++---- drivers/dma-buf/sync_debug.h | 21 ++++++---------- 3 files changed, 31 insertions(+), 47 deletions(-) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 6effa1ce010e..f20d18c421a3 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -96,9 +96,8 @@ static struct sync_timeline *sync_timeline_create(const char *name) obj->context = dma_fence_context_alloc(1); strlcpy(obj->name, name, sizeof(obj->name)); - INIT_LIST_HEAD(&obj->child_list_head); - INIT_LIST_HEAD(&obj->active_list_head); - spin_lock_init(&obj->child_list_lock); + INIT_LIST_HEAD(&obj->pt_list); + spin_lock_init(&obj->lock); sync_timeline_debug_add(obj); @@ -139,17 +138,15 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) trace_sync_timeline(obj); - spin_lock_irq(&obj->child_list_lock); + spin_lock_irq(&obj->lock); obj->value += inc; - list_for_each_entry_safe(pt, next, &obj->active_list_head, - active_list) { + list_for_each_entry_safe(pt, next, &obj->pt_list, link) if (dma_fence_is_signaled_locked(&pt->base)) - list_del_init(&pt->active_list); - } + list_del_init(&pt->link); - spin_unlock_irq(&obj->child_list_lock); + spin_unlock_irq(&obj->lock); } /** @@ -171,15 +168,15 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, if (!pt) return NULL; - spin_lock_irq(&obj->child_list_lock); - sync_timeline_get(obj); - dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock, + dma_fence_init(&pt->base, &timeline_fence_ops, &obj->lock, obj->context, value); - list_add_tail(&pt->child_list, &obj->child_list_head); - INIT_LIST_HEAD(&pt->active_list); + INIT_LIST_HEAD(&pt->link); - spin_unlock_irq(&obj->child_list_lock); + spin_lock_irq(&obj->lock); + if (!dma_fence_is_signaled_locked(&pt->base)) + list_add_tail(&pt->link, &obj->pt_list); + spin_unlock_irq(&obj->lock); return pt; } @@ -200,15 +197,15 @@ static void timeline_fence_release(struct dma_fence *fence) { struct sync_pt *pt = dma_fence_to_sync_pt(fence); struct sync_timeline *parent = dma_fence_parent(fence); - unsigned long flags; - spin_lock_irqsave(fence->lock, flags); + if (!list_empty(&pt->link)) { + unsigned long flags; - list_del(&pt->child_list); - if (!list_empty(&pt->active_list)) - list_del(&pt->active_list); - - spin_unlock_irqrestore(fence->lock, flags); + spin_lock_irqsave(fence->lock, flags); + if (!list_empty(&pt->link)) + list_del(&pt->link); + spin_unlock_irqrestore(fence->lock, flags); + } sync_timeline_put(parent); dma_fence_free(fence); @@ -223,13 +220,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence) static bool timeline_fence_enable_signaling(struct dma_fence *fence) { - struct sync_pt *pt = dma_fence_to_sync_pt(fence); - struct sync_timeline *parent = dma_fence_parent(fence); - - if (timeline_fence_signaled(fence)) - return false; - - list_add_tail(&pt->active_list, &parent->active_list_head); return true; } diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 0e91632248ba..2264a075f6a9 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -119,13 +119,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) seq_printf(s, "%s: %d\n", obj->name, obj->value); - spin_lock_irq(&obj->child_list_lock); - list_for_each(pos, &obj->child_list_head) { - struct sync_pt *pt = - container_of(pos, struct sync_pt, child_list); + spin_lock_irq(&obj->lock); + list_for_each(pos, &obj->pt_list) { + struct sync_pt *pt = container_of(pos, struct sync_pt, link); sync_print_fence(s, &pt->base, false); } - spin_unlock_irq(&obj->child_list_lock); + spin_unlock_irq(&obj->lock); } static void sync_print_sync_file(struct seq_file *s, diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h index 26fe8b9907b3..6a2a8e69a7d0 100644 --- a/drivers/dma-buf/sync_debug.h +++ b/drivers/dma-buf/sync_debug.h @@ -24,42 +24,37 @@ * struct sync_timeline - sync object * @kref: reference count on fence. * @name: name of the sync_timeline. Useful for debugging - * @child_list_head: list of children sync_pts for this sync_timeline - * @child_list_lock: lock protecting @child_list_head and fence.status - * @active_list_head: list of active (unsignaled/errored) sync_pts + * @lock: lock protecting @pt_list and @value + * @pt_list: list of active (unsignaled/errored) sync_pts * @sync_timeline_list: membership in global sync_timeline_list */ struct sync_timeline { struct kref kref; char name[32]; - /* protected by child_list_lock */ + /* protected by lock */ u64 context; int value; - struct list_head child_list_head; - spinlock_t child_list_lock; - - struct list_head active_list_head; + struct list_head pt_list; + spinlock_t lock; struct list_head sync_timeline_list; }; static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence) { - return container_of(fence->lock, struct sync_timeline, child_list_lock); + return container_of(fence->lock, struct sync_timeline, lock); } /** * struct sync_pt - sync_pt object * @base: base fence object - * @child_list: sync timeline child's list - * @active_list: sync timeline active child's list + * @link: link on the sync timeline's list */ struct sync_pt { struct dma_fence base; - struct list_head child_list; - struct list_head active_list; + struct list_head link; }; #ifdef CONFIG_SW_SYNC From f1e8c67123cf171e2b0357e885e426328b241d7d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 29 Jun 2017 22:12:53 +0100 Subject: [PATCH 0146/1795] dma-buf/sw-sync: Use an rbtree to sort fences in the timeline Reduce the list iteration when incrementing the timeline by storing the fences in increasing order. v2: Prevent spinlock recursion on free during create v3: Fixup rebase conflict inside comments that escaped the compiler. Signed-off-by: Chris Wilson Cc: Sumit Semwal Cc: Sean Paul Cc: Gustavo Padovan Reviewed-by: Sean Paul Signed-off-by: Gustavo Padovan Link: http://patchwork.freedesktop.org/patch/msgid/20170629211253.22766-1-chris@chris-wilson.co.uk --- drivers/dma-buf/sw_sync.c | 49 +++++++++++++++++++++++++++++++----- drivers/dma-buf/sync_debug.h | 5 ++++ 2 files changed, 48 insertions(+), 6 deletions(-) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index f20d18c421a3..af1bc84802e5 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -96,6 +96,7 @@ static struct sync_timeline *sync_timeline_create(const char *name) obj->context = dma_fence_context_alloc(1); strlcpy(obj->name, name, sizeof(obj->name)); + obj->pt_tree = RB_ROOT; INIT_LIST_HEAD(&obj->pt_list); spin_lock_init(&obj->lock); @@ -142,9 +143,13 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) obj->value += inc; - list_for_each_entry_safe(pt, next, &obj->pt_list, link) - if (dma_fence_is_signaled_locked(&pt->base)) - list_del_init(&pt->link); + list_for_each_entry_safe(pt, next, &obj->pt_list, link) { + if (!dma_fence_is_signaled_locked(&pt->base)) + break; + + list_del_init(&pt->link); + rb_erase(&pt->node, &obj->pt_tree); + } spin_unlock_irq(&obj->lock); } @@ -174,8 +179,38 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, INIT_LIST_HEAD(&pt->link); spin_lock_irq(&obj->lock); - if (!dma_fence_is_signaled_locked(&pt->base)) - list_add_tail(&pt->link, &obj->pt_list); + if (!dma_fence_is_signaled_locked(&pt->base)) { + struct rb_node **p = &obj->pt_tree.rb_node; + struct rb_node *parent = NULL; + + while (*p) { + struct sync_pt *other; + int cmp; + + parent = *p; + other = rb_entry(parent, typeof(*pt), node); + cmp = value - other->base.seqno; + if (cmp > 0) { + p = &parent->rb_right; + } else if (cmp < 0) { + p = &parent->rb_left; + } else { + if (dma_fence_get_rcu(&other->base)) { + dma_fence_put(&pt->base); + pt = other; + goto unlock; + } + p = &parent->rb_left; + } + } + rb_link_node(&pt->node, parent, p); + rb_insert_color(&pt->node, &obj->pt_tree); + + parent = rb_next(&pt->node); + list_add_tail(&pt->link, + parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list); + } +unlock: spin_unlock_irq(&obj->lock); return pt; @@ -202,8 +237,10 @@ static void timeline_fence_release(struct dma_fence *fence) unsigned long flags; spin_lock_irqsave(fence->lock, flags); - if (!list_empty(&pt->link)) + if (!list_empty(&pt->link)) { list_del(&pt->link); + rb_erase(&pt->node, &parent->pt_tree); + } spin_unlock_irqrestore(fence->lock, flags); } diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h index 6a2a8e69a7d0..d615a89f774c 100644 --- a/drivers/dma-buf/sync_debug.h +++ b/drivers/dma-buf/sync_debug.h @@ -14,6 +14,7 @@ #define _LINUX_SYNC_H #include +#include #include #include @@ -25,6 +26,7 @@ * @kref: reference count on fence. * @name: name of the sync_timeline. Useful for debugging * @lock: lock protecting @pt_list and @value + * @pt_tree: rbtree of active (unsignaled/errored) sync_pts * @pt_list: list of active (unsignaled/errored) sync_pts * @sync_timeline_list: membership in global sync_timeline_list */ @@ -36,6 +38,7 @@ struct sync_timeline { u64 context; int value; + struct rb_root pt_tree; struct list_head pt_list; spinlock_t lock; @@ -51,10 +54,12 @@ static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence) * struct sync_pt - sync_pt object * @base: base fence object * @link: link on the sync timeline's list + * @node: node in the sync timeline's tree */ struct sync_pt { struct dma_fence base; struct list_head link; + struct rb_node node; }; #ifdef CONFIG_SW_SYNC From 0b20a0f8c3cb6f74fe326101b62eeb5e2c56a53c Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 30 Jun 2017 12:36:44 +0300 Subject: [PATCH 0147/1795] drm: Add old state pointer to CRTC .enable() helper function The old state is useful for drivers that need to perform operations at enable time that depend on the transition between the old and new states. While at it, rename the operation to .atomic_enable() to be consistent with .atomic_disable(), as the .enable() operation is used by atomic helpers only. Signed-off-by: Laurent Pinchart Acked-by: Maxime Ripard # for sun4i Acked-by: Philipp Zabel # for imx-drm and mediatek Acked-by: Alexey Brodkin # for arcpgu Acked-by: Boris Brezillon # for atmel-hlcdc Acked-by: Liviu Dudau # for hdlcd and mali-dp Acked-by: Stefan Agner # for fsl-dcu Tested-by: Philippe Cornu # for stm Acked-by: Philippe Cornu # for stm Acked-by: Vincent Abriou # for sti Reviewed-by: Thomas Hellstrom # for vmwgfx Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170630093646.7928-2-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/arc/arcpgu_crtc.c | 5 +- drivers/gpu/drm/arm/hdlcd_crtc.c | 5 +- drivers/gpu/drm/arm/malidp_crtc.c | 5 +- .../gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | 5 +- drivers/gpu/drm/drm_atomic_helper.c | 7 ++- drivers/gpu/drm/drm_simple_kms_helper.c | 5 +- drivers/gpu/drm/exynos/exynos_drm_crtc.c | 5 +- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c | 5 +- .../gpu/drm/hisilicon/hibmc/hibmc_drm_de.c | 5 +- .../gpu/drm/hisilicon/kirin/kirin_drm_ade.c | 5 +- drivers/gpu/drm/imx/ipuv3-crtc.c | 5 +- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 5 +- drivers/gpu/drm/meson/meson_crtc.c | 5 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 5 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 5 +- drivers/gpu/drm/omapdrm/omap_crtc.c | 5 +- drivers/gpu/drm/qxl/qxl_display.c | 5 +- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 5 +- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 5 +- drivers/gpu/drm/sti/sti_crtc.c | 5 +- drivers/gpu/drm/stm/ltdc.c | 5 +- drivers/gpu/drm/sun4i/sun4i_crtc.c | 5 +- drivers/gpu/drm/tegra/dc.c | 5 +- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 8 ++- drivers/gpu/drm/vc4/vc4_crtc.c | 5 +- drivers/gpu/drm/virtio/virtgpu_display.c | 5 +- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 7 ++- drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 7 ++- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 5 +- drivers/gpu/drm/zte/zx_vou.c | 5 +- include/drm/drm_modeset_helper_vtables.h | 56 ++++++++++--------- 31 files changed, 128 insertions(+), 87 deletions(-) diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index 1f306781c9d5..c9bc6a90ac83 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -119,7 +119,8 @@ static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc) clk_set_rate(arcpgu->clk, m->crtc_clock * 1000); } -static void arc_pgu_crtc_enable(struct drm_crtc *crtc) +static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); @@ -161,9 +162,9 @@ static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = { .mode_set = drm_helper_crtc_mode_set, .mode_set_base = drm_helper_crtc_mode_set_base, .mode_set_nofb = arc_pgu_crtc_mode_set_nofb, - .enable = arc_pgu_crtc_enable, .disable = arc_pgu_crtc_disable, .atomic_begin = arc_pgu_crtc_atomic_begin, + .atomic_enable = arc_pgu_crtc_atomic_enable, }; static void arc_pgu_plane_atomic_update(struct drm_plane *plane, diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index d67b6f15e8b8..2b7f4f05d91f 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -165,7 +165,8 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) clk_set_rate(hdlcd->clk, m->crtc_clock * 1000); } -static void hdlcd_crtc_enable(struct drm_crtc *crtc) +static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); @@ -218,10 +219,10 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { - .enable = hdlcd_crtc_enable, .disable = hdlcd_crtc_disable, .atomic_check = hdlcd_crtc_atomic_check, .atomic_begin = hdlcd_crtc_atomic_begin, + .atomic_enable = hdlcd_crtc_atomic_enable, }; static int hdlcd_plane_atomic_check(struct drm_plane *plane, diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 4bb38a21efec..8e5b1c0181ab 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -46,7 +46,8 @@ static enum drm_mode_status malidp_crtc_mode_valid(struct drm_crtc *crtc, return MODE_OK; } -static void malidp_crtc_enable(struct drm_crtc *crtc) +static void malidp_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct malidp_drm *malidp = crtc_to_malidp_device(crtc); struct malidp_hw_device *hwdev = malidp->dev; @@ -408,9 +409,9 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = { .mode_valid = malidp_crtc_mode_valid, - .enable = malidp_crtc_enable, .disable = malidp_crtc_disable, .atomic_check = malidp_crtc_atomic_check, + .atomic_enable = malidp_crtc_atomic_enable, }; static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index f6b8c5908a20..e54e503180f4 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -183,7 +183,8 @@ static void atmel_hlcdc_crtc_disable(struct drm_crtc *c) pm_runtime_put_sync(dev->dev); } -static void atmel_hlcdc_crtc_enable(struct drm_crtc *c) +static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c, + struct drm_crtc_state *old_state) { struct drm_device *dev = c->dev; struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); @@ -320,10 +321,10 @@ static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = { .mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb, .mode_set_base = drm_helper_crtc_mode_set_base, .disable = atmel_hlcdc_crtc_disable, - .enable = atmel_hlcdc_crtc_enable, .atomic_check = atmel_hlcdc_crtc_atomic_check, .atomic_begin = atmel_hlcdc_crtc_atomic_begin, .atomic_flush = atmel_hlcdc_crtc_atomic_flush, + .atomic_enable = atmel_hlcdc_crtc_atomic_enable, }; static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2f269e4267da..23e4661a62fe 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1069,12 +1069,13 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, struct drm_atomic_state *old_state) { struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; struct drm_crtc_state *new_crtc_state; struct drm_connector *connector; struct drm_connector_state *new_conn_state; int i; - for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { + for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { const struct drm_crtc_helper_funcs *funcs; /* Need to filter out CRTCs where only planes change. */ @@ -1090,8 +1091,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", crtc->base.id, crtc->name); - if (funcs->enable) - funcs->enable(crtc); + if (funcs->atomic_enable) + funcs->atomic_enable(crtc, old_crtc_state); else funcs->commit(crtc); } diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index e084f9f8ca66..58c27ab1756f 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -40,7 +40,8 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc, return drm_atomic_add_affected_planes(state->state, crtc); } -static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc) +static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct drm_simple_display_pipe *pipe; @@ -64,8 +65,8 @@ static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc) static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = { .atomic_check = drm_simple_kms_crtc_check, + .atomic_enable = drm_simple_kms_crtc_enable, .disable = drm_simple_kms_crtc_disable, - .enable = drm_simple_kms_crtc_enable, }; static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index d72777f6411a..b7e2fadb6442 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -21,7 +21,8 @@ #include "exynos_drm_drv.h" #include "exynos_drm_plane.h" -static void exynos_drm_crtc_enable(struct drm_crtc *crtc) +static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); @@ -82,11 +83,11 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { - .enable = exynos_drm_crtc_enable, .disable = exynos_drm_crtc_disable, .atomic_check = exynos_crtc_atomic_check, .atomic_begin = exynos_crtc_atomic_begin, .atomic_flush = exynos_crtc_atomic_flush, + .atomic_enable = exynos_drm_crtc_atomic_enable, }; void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index cc4e944a1d3c..0e3752437e44 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -63,7 +63,8 @@ static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc, clk_disable_unprepare(fsl_dev->pix_clk); } -static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) +static void fsl_dcu_drm_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; @@ -133,7 +134,7 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { .atomic_disable = fsl_dcu_drm_crtc_atomic_disable, .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, - .enable = fsl_dcu_drm_crtc_enable, + .atomic_enable = fsl_dcu_drm_crtc_atomic_enable, .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb, }; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index 59542bddc980..49ef47c0c81a 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -192,7 +192,8 @@ static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv) return plane; } -static void hibmc_crtc_enable(struct drm_crtc *crtc) +static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { unsigned int reg; struct hibmc_drm_private *priv = crtc->dev->dev_private; @@ -453,11 +454,11 @@ static const struct drm_crtc_funcs hibmc_crtc_funcs = { }; static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = { - .enable = hibmc_crtc_enable, .disable = hibmc_crtc_disable, .mode_set_nofb = hibmc_crtc_mode_set_nofb, .atomic_begin = hibmc_crtc_atomic_begin, .atomic_flush = hibmc_crtc_atomic_flush, + .atomic_enable = hibmc_crtc_atomic_enable, }; int hibmc_de_init(struct hibmc_drm_private *priv) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index c96c228a9898..8e00818b24fc 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -467,7 +467,8 @@ static void ade_dump_regs(void __iomem *base) static void ade_dump_regs(void __iomem *base) { } #endif -static void ade_crtc_enable(struct drm_crtc *crtc) +static void ade_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct ade_crtc *acrtc = to_ade_crtc(crtc); struct ade_hw_ctx *ctx = acrtc->ctx; @@ -553,11 +554,11 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = { - .enable = ade_crtc_enable, .disable = ade_crtc_disable, .mode_set_nofb = ade_crtc_mode_set_nofb, .atomic_begin = ade_crtc_atomic_begin, .atomic_flush = ade_crtc_atomic_flush, + .atomic_enable = ade_crtc_atomic_enable, }; static const struct drm_crtc_funcs ade_crtc_funcs = { diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 5456c15d962c..53e0b24beda6 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -50,7 +50,8 @@ static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc) return container_of(crtc, struct ipu_crtc, base); } -static void ipu_crtc_enable(struct drm_crtc *crtc) +static void ipu_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); @@ -293,7 +294,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = { .atomic_check = ipu_crtc_atomic_check, .atomic_begin = ipu_crtc_atomic_begin, .atomic_disable = ipu_crtc_atomic_disable, - .enable = ipu_crtc_enable, + .atomic_enable = ipu_crtc_atomic_enable, }; static void ipu_put_resources(struct ipu_crtc *ipu_crtc) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 6582e1f56d37..5971b0827d1b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -366,7 +366,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) } } -static void mtk_drm_crtc_enable(struct drm_crtc *crtc) +static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; @@ -487,10 +488,10 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = { static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = { .mode_fixup = mtk_drm_crtc_mode_fixup, .mode_set_nofb = mtk_drm_crtc_mode_set_nofb, - .enable = mtk_drm_crtc_enable, .disable = mtk_drm_crtc_disable, .atomic_begin = mtk_drm_crtc_atomic_begin, .atomic_flush = mtk_drm_crtc_atomic_flush, + .atomic_enable = mtk_drm_crtc_atomic_enable, }; static int mtk_drm_crtc_init(struct drm_device *drm, diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index c986eb03b9d9..6f148307b0e0 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -79,7 +79,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = { }; -static void meson_crtc_enable(struct drm_crtc *crtc) +static void meson_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct drm_crtc_state *crtc_state = crtc->state; @@ -149,10 +150,10 @@ static void meson_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = { - .enable = meson_crtc_enable, .disable = meson_crtc_disable, .atomic_begin = meson_crtc_atomic_begin, .atomic_flush = meson_crtc_atomic_flush, + .atomic_enable = meson_crtc_atomic_enable, }; void meson_crtc_irq(struct meson_drm *priv) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 615e1def64d9..9c20133f3f8d 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -295,7 +295,8 @@ static void mdp4_crtc_disable(struct drm_crtc *crtc) mdp4_crtc->enabled = false; } -static void mdp4_crtc_enable(struct drm_crtc *crtc) +static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct mdp4_kms *mdp4_kms = get_kms(crtc); @@ -493,10 +494,10 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = { static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { .mode_set_nofb = mdp4_crtc_mode_set_nofb, .disable = mdp4_crtc_disable, - .enable = mdp4_crtc_enable, .atomic_check = mdp4_crtc_atomic_check, .atomic_begin = mdp4_crtc_atomic_begin, .atomic_flush = mdp4_crtc_atomic_flush, + .atomic_enable = mdp4_crtc_atomic_enable, }; static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index cb5415d6c04b..d39d9d24d169 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -429,7 +429,8 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc) mdp5_crtc->enabled = false; } -static void mdp5_crtc_enable(struct drm_crtc *crtc) +static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); @@ -940,10 +941,10 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { .mode_set_nofb = mdp5_crtc_mode_set_nofb, .disable = mdp5_crtc_disable, - .enable = mdp5_crtc_enable, .atomic_check = mdp5_crtc_atomic_check, .atomic_begin = mdp5_crtc_atomic_begin, .atomic_flush = mdp5_crtc_atomic_flush, + .atomic_enable = mdp5_crtc_atomic_enable, }; static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index dd0ef40ca469..7a1b7a9cc270 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -356,7 +356,8 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc) } } -static void omap_crtc_enable(struct drm_crtc *crtc) +static void omap_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); int ret; @@ -568,10 +569,10 @@ static const struct drm_crtc_funcs omap_crtc_funcs = { static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = { .mode_set_nofb = omap_crtc_mode_set_nofb, .disable = omap_crtc_disable, - .enable = omap_crtc_enable, .atomic_check = omap_crtc_atomic_check, .atomic_begin = omap_crtc_atomic_begin, .atomic_flush = omap_crtc_atomic_flush, + .atomic_enable = omap_crtc_atomic_enable, }; /* ----------------------------------------------------------------------------- diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 7ede5f131a5c..ea95e7e7cc7f 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -447,7 +447,8 @@ static void qxl_mode_set_nofb(struct drm_crtc *crtc) } -static void qxl_crtc_enable(struct drm_crtc *crtc) +static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { DRM_DEBUG("\n"); } @@ -466,8 +467,8 @@ static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { .disable = qxl_crtc_disable, .mode_fixup = qxl_crtc_mode_fixup, .mode_set_nofb = qxl_mode_set_nofb, - .enable = qxl_crtc_enable, .atomic_flush = qxl_crtc_atomic_flush, + .atomic_enable = qxl_crtc_atomic_enable, }; static int qxl_primary_atomic_check(struct drm_plane *plane, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 345eff72f581..6aa3fa8d06f9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -552,7 +552,8 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc) * CRTC Functions */ -static void rcar_du_crtc_enable(struct drm_crtc *crtc) +static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); @@ -610,9 +611,9 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs crtc_helper_funcs = { .disable = rcar_du_crtc_disable, - .enable = rcar_du_crtc_enable, .atomic_begin = rcar_du_crtc_atomic_begin, .atomic_flush = rcar_du_crtc_atomic_flush, + .atomic_enable = rcar_du_crtc_atomic_enable, }; static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 5d450332c2fd..bb59f7410634 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -871,7 +871,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, return true; } -static void vop_crtc_enable(struct drm_crtc *crtc) +static void vop_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct vop *vop = to_vop(crtc); const struct vop_data *vop_data = vop->data; @@ -1079,11 +1080,11 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { - .enable = vop_crtc_enable, .disable = vop_crtc_disable, .mode_fixup = vop_crtc_mode_fixup, .atomic_flush = vop_crtc_atomic_flush, .atomic_begin = vop_crtc_atomic_begin, + .atomic_enable = vop_crtc_atomic_enable, }; static void vop_crtc_destroy(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index d45a4335df5d..bb864345fedf 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -20,7 +20,8 @@ #include "sti_vid.h" #include "sti_vtg.h" -static void sti_crtc_enable(struct drm_crtc *crtc) +static void sti_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct sti_mixer *mixer = to_sti_mixer(crtc); @@ -222,10 +223,10 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { - .enable = sti_crtc_enable, .disable = sti_crtc_disabling, .mode_set_nofb = sti_crtc_mode_set_nofb, .atomic_flush = sti_crtc_atomic_flush, + .atomic_enable = sti_crtc_atomic_enable, }; static void sti_crtc_destroy(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 1b9483d4f2a4..337fce004d08 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -386,7 +386,8 @@ static void ltdc_crtc_load_lut(struct drm_crtc *crtc) ldev->clut[i]); } -static void ltdc_crtc_enable(struct drm_crtc *crtc) +static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct ltdc_device *ldev = crtc_to_ltdc(crtc); @@ -524,10 +525,10 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc, static struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = { .load_lut = ltdc_crtc_load_lut, - .enable = ltdc_crtc_enable, .disable = ltdc_crtc_disable, .mode_set_nofb = ltdc_crtc_mode_set_nofb, .atomic_flush = ltdc_crtc_atomic_flush, + .atomic_enable = ltdc_crtc_atomic_enable, }; int ltdc_crtc_enable_vblank(struct drm_device *ddev, unsigned int pipe) diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index f8c70439d1e2..509e4d8014f3 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -86,7 +86,8 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc) } } -static void sun4i_crtc_enable(struct drm_crtc *crtc) +static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); @@ -98,8 +99,8 @@ static void sun4i_crtc_enable(struct drm_crtc *crtc) static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = { .atomic_begin = sun4i_crtc_atomic_begin, .atomic_flush = sun4i_crtc_atomic_flush, + .atomic_enable = sun4i_crtc_atomic_enable, .disable = sun4i_crtc_disable, - .enable = sun4i_crtc_enable, }; static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index c875f11786b9..2e0d167cb657 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1243,7 +1243,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc) pm_runtime_put_sync(dc->dev); } -static void tegra_crtc_enable(struct drm_crtc *crtc) +static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct drm_display_mode *mode = &crtc->state->adjusted_mode; struct tegra_dc_state *state = to_dc_state(crtc->state); @@ -1352,10 +1353,10 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = { .disable = tegra_crtc_disable, - .enable = tegra_crtc_enable, .atomic_check = tegra_crtc_atomic_check, .atomic_begin = tegra_crtc_atomic_begin, .atomic_flush = tegra_crtc_atomic_flush, + .atomic_enable = tegra_crtc_atomic_enable, }; static irqreturn_t tegra_dc_irq(int irq, void *data) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index d524ed0d5146..6c5892763d27 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -504,6 +504,12 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc) mutex_unlock(&tilcdc_crtc->enable_lock); } +static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + tilcdc_crtc_enable(crtc); +} + static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown) { struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); @@ -729,9 +735,9 @@ static const struct drm_crtc_funcs tilcdc_crtc_funcs = { static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = { .mode_fixup = tilcdc_crtc_mode_fixup, - .enable = tilcdc_crtc_enable, .disable = tilcdc_crtc_disable, .atomic_check = tilcdc_crtc_atomic_check, + .atomic_enable = tilcdc_crtc_atomic_enable, }; int tilcdc_crtc_max_width(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index f20c01759c0d..4c03a565b276 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -533,7 +533,8 @@ static void vc4_crtc_disable(struct drm_crtc *crtc) } } -static void vc4_crtc_enable(struct drm_crtc *crtc) +static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -870,10 +871,10 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = { static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { .mode_set_nofb = vc4_crtc_mode_set_nofb, .disable = vc4_crtc_disable, - .enable = vc4_crtc_enable, .mode_valid = vc4_crtc_mode_valid, .atomic_check = vc4_crtc_atomic_check, .atomic_flush = vc4_crtc_atomic_flush, + .atomic_enable = vc4_crtc_atomic_enable, }; static const struct vc4_crtc_data pv0_data = { diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index d51bd4521f17..03a3c12ed100 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -113,7 +113,8 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc) crtc->mode.vdisplay, 0, 0); } -static void virtio_gpu_crtc_enable(struct drm_crtc *crtc) +static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { } @@ -145,11 +146,11 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { - .enable = virtio_gpu_crtc_enable, .disable = virtio_gpu_crtc_disable, .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb, .atomic_check = virtio_gpu_crtc_atomic_check, .atomic_flush = virtio_gpu_crtc_atomic_flush, + .atomic_enable = virtio_gpu_crtc_atomic_enable, }; static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index f8acd3a15523..832b83c582c2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -203,7 +203,7 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc) } /** - * vmw_ldu_crtc_helper_enable - Noop + * vmw_ldu_crtc_atomic_enable - Noop * * @crtc: CRTC associated with the new screen * @@ -212,7 +212,8 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc) * but since for LDU the display plane is closely tied to the * CRTC, it makes more sense to do those at plane update time. */ -static void vmw_ldu_crtc_helper_enable(struct drm_crtc *crtc) +static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { } @@ -376,12 +377,12 @@ drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = { }; static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = { - .enable = vmw_ldu_crtc_helper_enable, .disable = vmw_ldu_crtc_helper_disable, .mode_set_nofb = vmw_ldu_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, .atomic_begin = vmw_du_crtc_atomic_begin, .atomic_flush = vmw_du_crtc_atomic_flush, + .atomic_enable = vmw_ldu_crtc_atomic_enable, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 1cb826c503bf..8ba3bad06909 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -270,13 +270,14 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc) } /** - * vmw_sou_crtc_helper_enable - Noop + * vmw_sou_crtc_atomic_enable - Noop * * @crtc: CRTC associated with the new screen * * This is called after a mode set has been completed. */ -static void vmw_sou_crtc_helper_enable(struct drm_crtc *crtc) +static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { } @@ -573,12 +574,12 @@ drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = { static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = { .prepare = vmw_sou_crtc_helper_prepare, - .enable = vmw_sou_crtc_helper_enable, .disable = vmw_sou_crtc_helper_disable, .mode_set_nofb = vmw_sou_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, .atomic_begin = vmw_du_crtc_atomic_begin, .atomic_flush = vmw_du_crtc_atomic_flush, + .atomic_enable = vmw_sou_crtc_atomic_enable, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 4eb93b47d6db..f2b39d9c51cc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -412,7 +412,8 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc) } -static void vmw_stdu_crtc_helper_enable(struct drm_crtc *crtc) +static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; @@ -1415,12 +1416,12 @@ drm_plane_helper_funcs vmw_stdu_primary_plane_helper_funcs = { static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = { .prepare = vmw_stdu_crtc_helper_prepare, - .enable = vmw_stdu_crtc_helper_enable, .disable = vmw_stdu_crtc_helper_disable, .mode_set_nofb = vmw_stdu_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, .atomic_begin = vmw_du_crtc_atomic_begin, .atomic_flush = vmw_du_crtc_atomic_flush, + .atomic_enable = vmw_stdu_crtc_atomic_enable, }; diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c index 5fbd10b60ee5..ccb4b0bdb8c6 100644 --- a/drivers/gpu/drm/zte/zx_vou.c +++ b/drivers/gpu/drm/zte/zx_vou.c @@ -350,7 +350,8 @@ static inline void vou_chn_set_update(struct zx_crtc *zcrtc) zx_writel(zcrtc->chnreg + CHN_UPDATE, 1); } -static void zx_crtc_enable(struct drm_crtc *crtc) +static void zx_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct drm_display_mode *mode = &crtc->state->adjusted_mode; bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; @@ -490,9 +491,9 @@ static void zx_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs zx_crtc_helper_funcs = { - .enable = zx_crtc_enable, .disable = zx_crtc_disable, .atomic_flush = zx_crtc_atomic_flush, + .atomic_enable = zx_crtc_atomic_enable, }; static int zx_vou_enable_vblank(struct drm_crtc *crtc) diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 474a1029ec79..c85124f687ba 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -71,7 +71,7 @@ struct drm_crtc_helper_funcs { * This callback is used by the legacy CRTC helpers. Atomic helpers * also support using this hook for enabling and disabling a CRTC to * facilitate transitions to atomic, but it is deprecated. Instead - * @enable and @disable should be used. + * @atomic_enable and @atomic_disable should be used. */ void (*dpms)(struct drm_crtc *crtc, int mode); @@ -85,8 +85,8 @@ struct drm_crtc_helper_funcs { * * This callback is used by the legacy CRTC helpers. Atomic helpers * also support using this hook for disabling a CRTC to facilitate - * transitions to atomic, but it is deprecated. Instead @disable should - * be used. + * transitions to atomic, but it is deprecated. Instead @atomic_disable + * should be used. */ void (*prepare)(struct drm_crtc *crtc); @@ -100,8 +100,8 @@ struct drm_crtc_helper_funcs { * * This callback is used by the legacy CRTC helpers. Atomic helpers * also support using this hook for enabling a CRTC to facilitate - * transitions to atomic, but it is deprecated. Instead @enable should - * be used. + * transitions to atomic, but it is deprecated. Instead @atomic_enable + * should be used. */ void (*commit)(struct drm_crtc *crtc); @@ -222,7 +222,7 @@ struct drm_crtc_helper_funcs { * pipeline is suspended using either DPMS or the new "ACTIVE" property. * Which means register values set in this callback might get reset when * the CRTC is suspended, but not restored. Such drivers should instead - * move all their CRTC setup into the @enable callback. + * move all their CRTC setup into the @atomic_enable callback. * * This callback is optional. */ @@ -297,7 +297,7 @@ struct drm_crtc_helper_funcs { * Atomic drivers don't need to implement it if there's no need to * disable anything at the CRTC level. To ensure that runtime PM * handling (using either DPMS or the new "ACTIVE" property) works - * @disable must be the inverse of @enable for atomic drivers. + * @disable must be the inverse of @atomic_enable for atomic drivers. * Atomic drivers should consider to use @atomic_disable instead of * this one. * @@ -315,24 +315,6 @@ struct drm_crtc_helper_funcs { */ void (*disable)(struct drm_crtc *crtc); - /** - * @enable: - * - * This callback should be used to enable the CRTC. With the atomic - * drivers it is called before all encoders connected to this CRTC are - * enabled through the encoder's own &drm_encoder_helper_funcs.enable - * hook. If that sequence is too simple drivers can just add their own - * hooks and call it from this CRTC callback here by looping over all - * encoders connected to it using for_each_encoder_on_crtc(). - * - * This hook is used only by atomic helpers, for symmetry with @disable. - * Atomic drivers don't need to implement it if there's no need to - * enable anything at the CRTC level. To ensure that runtime PM handling - * (using either DPMS or the new "ACTIVE" property) works - * @enable must be the inverse of @disable for atomic drivers. - */ - void (*enable)(struct drm_crtc *crtc); - /** * @atomic_check: * @@ -432,6 +414,30 @@ struct drm_crtc_helper_funcs { void (*atomic_flush)(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state); + /** + * @atomic_enable: + * + * This callback should be used to enable the CRTC. With the atomic + * drivers it is called before all encoders connected to this CRTC are + * enabled through the encoder's own &drm_encoder_helper_funcs.enable + * hook. If that sequence is too simple drivers can just add their own + * hooks and call it from this CRTC callback here by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with + * @atomic_disable. Atomic drivers don't need to implement it if there's + * no need to enable anything at the CRTC level. To ensure that runtime + * PM handling (using either DPMS or the new "ACTIVE" property) works + * @atomic_enable must be the inverse of @atomic_disable for atomic + * drivers. + * + * Drivers can use the @old_crtc_state input parameter if the operations + * needed to enable the CRTC don't depend solely on the new state but + * also on the transition between the old state and the new state. + */ + void (*atomic_enable)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + /** * @atomic_disable: * From 64581714b58bc3e16ede8dc37a025c3aa0e0eef1 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 30 Jun 2017 12:36:45 +0300 Subject: [PATCH 0148/1795] drm: Convert atomic drivers from CRTC .disable() to .atomic_disable() The CRTC .disable() helper operation is deprecated for atomic drivers, the new .atomic_disable() helper operation being preferred. Convert all atomic drivers to .atomic_disable() to avoid cargo-cult use of .disable() in new drivers. Signed-off-by: Laurent Pinchart Acked-by: Maxime Ripard # for sun4i Acked-by: Philipp Zabel # for mediatek Acked-by: Alexey Brodkin # for arcpgu Acked-by: Boris Brezillon # for atmel-hlcdc Tested-by: Philippe Cornu # for stm Acked-by: Philippe Cornu # for stm Acked-by: Vincent Abriou # for sti Reviewed-by: Thomas Hellstrom # for vmwgfx Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170630093646.7928-3-laurent.pinchart+renesas@ideasonboard.com --- drivers/gpu/drm/arc/arcpgu_crtc.c | 5 +++-- drivers/gpu/drm/arm/hdlcd_crtc.c | 5 +++-- drivers/gpu/drm/arm/malidp_crtc.c | 5 +++-- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | 5 +++-- drivers/gpu/drm/drm_simple_kms_helper.c | 5 +++-- drivers/gpu/drm/exynos/exynos_drm_crtc.c | 5 +++-- drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c | 5 +++-- drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c | 5 +++-- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 5 +++-- drivers/gpu/drm/meson/meson_crtc.c | 5 +++-- drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 5 +++-- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 5 +++-- drivers/gpu/drm/omapdrm/omap_crtc.c | 5 +++-- drivers/gpu/drm/qxl/qxl_display.c | 5 +++-- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 5 +++-- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 5 +++-- drivers/gpu/drm/sti/sti_crtc.c | 5 +++-- drivers/gpu/drm/stm/ltdc.c | 5 +++-- drivers/gpu/drm/sun4i/sun4i_crtc.c | 5 +++-- drivers/gpu/drm/tegra/dc.c | 5 +++-- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 8 +++++++- drivers/gpu/drm/vc4/vc4_crtc.c | 5 +++-- drivers/gpu/drm/virtio/virtgpu_display.c | 5 +++-- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 7 ++++--- drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 7 ++++--- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 5 +++-- drivers/gpu/drm/zte/zx_vou.c | 5 +++-- 27 files changed, 87 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index c9bc6a90ac83..1859dd3ad622 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -130,7 +130,8 @@ static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc, ARCPGU_CTRL_ENABLE_MASK); } -static void arc_pgu_crtc_disable(struct drm_crtc *crtc) +static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); @@ -162,9 +163,9 @@ static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = { .mode_set = drm_helper_crtc_mode_set, .mode_set_base = drm_helper_crtc_mode_set_base, .mode_set_nofb = arc_pgu_crtc_mode_set_nofb, - .disable = arc_pgu_crtc_disable, .atomic_begin = arc_pgu_crtc_atomic_begin, .atomic_enable = arc_pgu_crtc_atomic_enable, + .atomic_disable = arc_pgu_crtc_atomic_disable, }; static void arc_pgu_plane_atomic_update(struct drm_plane *plane, diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 2b7f4f05d91f..16e1e20cf04c 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -176,7 +176,8 @@ static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc, drm_crtc_vblank_on(crtc); } -static void hdlcd_crtc_disable(struct drm_crtc *crtc) +static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); @@ -219,10 +220,10 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { - .disable = hdlcd_crtc_disable, .atomic_check = hdlcd_crtc_atomic_check, .atomic_begin = hdlcd_crtc_atomic_begin, .atomic_enable = hdlcd_crtc_atomic_enable, + .atomic_disable = hdlcd_crtc_atomic_disable, }; static int hdlcd_plane_atomic_check(struct drm_plane *plane, diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 8e5b1c0181ab..3615d18a7ddf 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -70,7 +70,8 @@ static void malidp_crtc_atomic_enable(struct drm_crtc *crtc, drm_crtc_vblank_on(crtc); } -static void malidp_crtc_disable(struct drm_crtc *crtc) +static void malidp_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct malidp_drm *malidp = crtc_to_malidp_device(crtc); struct malidp_hw_device *hwdev = malidp->dev; @@ -409,9 +410,9 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = { .mode_valid = malidp_crtc_mode_valid, - .disable = malidp_crtc_disable, .atomic_check = malidp_crtc_atomic_check, .atomic_enable = malidp_crtc_atomic_enable, + .atomic_disable = malidp_crtc_atomic_disable, }; static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index e54e503180f4..441769c5bcd4 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -149,7 +149,8 @@ atmel_hlcdc_crtc_mode_valid(struct drm_crtc *c, return atmel_hlcdc_dc_mode_valid(crtc->dc, mode); } -static void atmel_hlcdc_crtc_disable(struct drm_crtc *c) +static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c, + struct drm_crtc_state *old_state) { struct drm_device *dev = c->dev; struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); @@ -320,11 +321,11 @@ static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = { .mode_set = drm_helper_crtc_mode_set, .mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb, .mode_set_base = drm_helper_crtc_mode_set_base, - .disable = atmel_hlcdc_crtc_disable, .atomic_check = atmel_hlcdc_crtc_atomic_check, .atomic_begin = atmel_hlcdc_crtc_atomic_begin, .atomic_flush = atmel_hlcdc_crtc_atomic_flush, .atomic_enable = atmel_hlcdc_crtc_atomic_enable, + .atomic_disable = atmel_hlcdc_crtc_atomic_disable, }; static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c) diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index 58c27ab1756f..98250854af75 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -52,7 +52,8 @@ static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc, pipe->funcs->enable(pipe, crtc->state); } -static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc) +static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct drm_simple_display_pipe *pipe; @@ -66,7 +67,7 @@ static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc) static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = { .atomic_check = drm_simple_kms_crtc_check, .atomic_enable = drm_simple_kms_crtc_enable, - .disable = drm_simple_kms_crtc_disable, + .atomic_disable = drm_simple_kms_crtc_disable, }; static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index b7e2fadb6442..c37078fbe0ea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -32,7 +32,8 @@ static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc, drm_crtc_vblank_on(crtc); } -static void exynos_drm_crtc_disable(struct drm_crtc *crtc) +static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); @@ -83,11 +84,11 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { - .disable = exynos_drm_crtc_disable, .atomic_check = exynos_crtc_atomic_check, .atomic_begin = exynos_crtc_atomic_begin, .atomic_flush = exynos_crtc_atomic_flush, .atomic_enable = exynos_drm_crtc_atomic_enable, + .atomic_disable = exynos_drm_crtc_atomic_disable, }; void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index 49ef47c0c81a..54a4542a40f1 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -210,7 +210,8 @@ static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc, drm_crtc_vblank_on(crtc); } -static void hibmc_crtc_disable(struct drm_crtc *crtc) +static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { unsigned int reg; struct hibmc_drm_private *priv = crtc->dev->dev_private; @@ -454,11 +455,11 @@ static const struct drm_crtc_funcs hibmc_crtc_funcs = { }; static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = { - .disable = hibmc_crtc_disable, .mode_set_nofb = hibmc_crtc_mode_set_nofb, .atomic_begin = hibmc_crtc_atomic_begin, .atomic_flush = hibmc_crtc_atomic_flush, .atomic_enable = hibmc_crtc_atomic_enable, + .atomic_disable = hibmc_crtc_atomic_disable, }; int hibmc_de_init(struct hibmc_drm_private *priv) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 8e00818b24fc..7e3abbf4ef73 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -490,7 +490,8 @@ static void ade_crtc_atomic_enable(struct drm_crtc *crtc, acrtc->enable = true; } -static void ade_crtc_disable(struct drm_crtc *crtc) +static void ade_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct ade_crtc *acrtc = to_ade_crtc(crtc); struct ade_hw_ctx *ctx = acrtc->ctx; @@ -554,11 +555,11 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = { - .disable = ade_crtc_disable, .mode_set_nofb = ade_crtc_mode_set_nofb, .atomic_begin = ade_crtc_atomic_begin, .atomic_flush = ade_crtc_atomic_flush, .atomic_enable = ade_crtc_atomic_enable, + .atomic_disable = ade_crtc_atomic_disable, }; static const struct drm_crtc_funcs ade_crtc_funcs = { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 5971b0827d1b..fc65c57dda8c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -391,7 +391,8 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, mtk_crtc->enabled = true; } -static void mtk_drm_crtc_disable(struct drm_crtc *crtc) +static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; @@ -488,10 +489,10 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = { static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = { .mode_fixup = mtk_drm_crtc_mode_fixup, .mode_set_nofb = mtk_drm_crtc_mode_set_nofb, - .disable = mtk_drm_crtc_disable, .atomic_begin = mtk_drm_crtc_atomic_begin, .atomic_flush = mtk_drm_crtc_atomic_flush, .atomic_enable = mtk_drm_crtc_atomic_enable, + .atomic_disable = mtk_drm_crtc_atomic_disable, }; static int mtk_drm_crtc_init(struct drm_device *drm, diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 6f148307b0e0..5155f0179b61 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -103,7 +103,8 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc, priv->viu.osd1_enabled = true; } -static void meson_crtc_disable(struct drm_crtc *crtc) +static void meson_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct meson_drm *priv = meson_crtc->priv; @@ -150,10 +151,10 @@ static void meson_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = { - .disable = meson_crtc_disable, .atomic_begin = meson_crtc_atomic_begin, .atomic_flush = meson_crtc_atomic_flush, .atomic_enable = meson_crtc_atomic_enable, + .atomic_disable = meson_crtc_atomic_disable, }; void meson_crtc_irq(struct meson_drm *priv) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 9c20133f3f8d..3c7a9d343e05 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -279,7 +279,8 @@ static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc) } } -static void mdp4_crtc_disable(struct drm_crtc *crtc) +static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct mdp4_kms *mdp4_kms = get_kms(crtc); @@ -493,11 +494,11 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = { static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { .mode_set_nofb = mdp4_crtc_mode_set_nofb, - .disable = mdp4_crtc_disable, .atomic_check = mdp4_crtc_atomic_check, .atomic_begin = mdp4_crtc_atomic_begin, .atomic_flush = mdp4_crtc_atomic_flush, .atomic_enable = mdp4_crtc_atomic_enable, + .atomic_disable = mdp4_crtc_atomic_disable, }; static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index d39d9d24d169..4322a502555a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -409,7 +409,8 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); } -static void mdp5_crtc_disable(struct drm_crtc *crtc) +static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); @@ -940,11 +941,11 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { .mode_set_nofb = mdp5_crtc_mode_set_nofb, - .disable = mdp5_crtc_disable, .atomic_check = mdp5_crtc_atomic_check, .atomic_begin = mdp5_crtc_atomic_begin, .atomic_flush = mdp5_crtc_atomic_flush, .atomic_enable = mdp5_crtc_atomic_enable, + .atomic_disable = mdp5_crtc_atomic_disable, }; static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 7a1b7a9cc270..14e8a7738b06 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -373,7 +373,8 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc, spin_unlock_irq(&crtc->dev->event_lock); } -static void omap_crtc_disable(struct drm_crtc *crtc) +static void omap_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); @@ -568,11 +569,11 @@ static const struct drm_crtc_funcs omap_crtc_funcs = { static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = { .mode_set_nofb = omap_crtc_mode_set_nofb, - .disable = omap_crtc_disable, .atomic_check = omap_crtc_atomic_check, .atomic_begin = omap_crtc_atomic_begin, .atomic_flush = omap_crtc_atomic_flush, .atomic_enable = omap_crtc_atomic_enable, + .atomic_disable = omap_crtc_atomic_disable, }; /* ----------------------------------------------------------------------------- diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index ea95e7e7cc7f..5eeae89c138d 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -453,7 +453,8 @@ static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, DRM_DEBUG("\n"); } -static void qxl_crtc_disable(struct drm_crtc *crtc) +static void qxl_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); struct qxl_device *qdev = crtc->dev->dev_private; @@ -464,11 +465,11 @@ static void qxl_crtc_disable(struct drm_crtc *crtc) } static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { - .disable = qxl_crtc_disable, .mode_fixup = qxl_crtc_mode_fixup, .mode_set_nofb = qxl_mode_set_nofb, .atomic_flush = qxl_crtc_atomic_flush, .atomic_enable = qxl_crtc_atomic_enable, + .atomic_disable = qxl_crtc_atomic_disable, }; static int qxl_primary_atomic_check(struct drm_plane *plane, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 6aa3fa8d06f9..f131fc68cc46 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -561,7 +561,8 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, rcar_du_crtc_start(rcrtc); } -static void rcar_du_crtc_disable(struct drm_crtc *crtc) +static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); @@ -610,10 +611,10 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs crtc_helper_funcs = { - .disable = rcar_du_crtc_disable, .atomic_begin = rcar_du_crtc_atomic_begin, .atomic_flush = rcar_du_crtc_atomic_flush, .atomic_enable = rcar_du_crtc_atomic_enable, + .atomic_disable = rcar_du_crtc_atomic_disable, }; static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index bb59f7410634..ee876a9631f0 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -563,7 +563,8 @@ err_put_pm_runtime: return ret; } -static void vop_crtc_disable(struct drm_crtc *crtc) +static void vop_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct vop *vop = to_vop(crtc); int i; @@ -1080,11 +1081,11 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { - .disable = vop_crtc_disable, .mode_fixup = vop_crtc_mode_fixup, .atomic_flush = vop_crtc_atomic_flush, .atomic_begin = vop_crtc_atomic_begin, .atomic_enable = vop_crtc_atomic_enable, + .atomic_disable = vop_crtc_atomic_disable, }; static void vop_crtc_destroy(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index bb864345fedf..e8a4d48e985a 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -32,7 +32,8 @@ static void sti_crtc_atomic_enable(struct drm_crtc *crtc, drm_crtc_vblank_on(crtc); } -static void sti_crtc_disabling(struct drm_crtc *crtc) +static void sti_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct sti_mixer *mixer = to_sti_mixer(crtc); @@ -223,10 +224,10 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { - .disable = sti_crtc_disabling, .mode_set_nofb = sti_crtc_mode_set_nofb, .atomic_flush = sti_crtc_atomic_flush, .atomic_enable = sti_crtc_atomic_enable, + .atomic_disable = sti_crtc_atomic_disable, }; static void sti_crtc_destroy(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 337fce004d08..533176015cbb 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -408,7 +408,8 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc, drm_crtc_vblank_on(crtc); } -static void ltdc_crtc_disable(struct drm_crtc *crtc) +static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct ltdc_device *ldev = crtc_to_ltdc(crtc); @@ -525,10 +526,10 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc, static struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = { .load_lut = ltdc_crtc_load_lut, - .disable = ltdc_crtc_disable, .mode_set_nofb = ltdc_crtc_mode_set_nofb, .atomic_flush = ltdc_crtc_atomic_flush, .atomic_enable = ltdc_crtc_atomic_enable, + .atomic_disable = ltdc_crtc_atomic_disable, }; int ltdc_crtc_enable_vblank(struct drm_device *ddev, unsigned int pipe) diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index 509e4d8014f3..d097c6f93ad0 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -69,7 +69,8 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc, } } -static void sun4i_crtc_disable(struct drm_crtc *crtc) +static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); @@ -100,7 +101,7 @@ static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = { .atomic_begin = sun4i_crtc_atomic_begin, .atomic_flush = sun4i_crtc_atomic_flush, .atomic_enable = sun4i_crtc_atomic_enable, - .disable = sun4i_crtc_disable, + .atomic_disable = sun4i_crtc_atomic_disable, }; static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 2e0d167cb657..0cb9b90e2e68 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1199,7 +1199,8 @@ static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout) return -ETIMEDOUT; } -static void tegra_crtc_disable(struct drm_crtc *crtc) +static void tegra_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; @@ -1352,11 +1353,11 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = { - .disable = tegra_crtc_disable, .atomic_check = tegra_crtc_atomic_check, .atomic_begin = tegra_crtc_atomic_begin, .atomic_flush = tegra_crtc_atomic_flush, .atomic_enable = tegra_crtc_atomic_enable, + .atomic_disable = tegra_crtc_atomic_disable, }; static irqreturn_t tegra_dc_irq(int irq, void *data) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 6c5892763d27..a43e720ab4e8 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -568,6 +568,12 @@ static void tilcdc_crtc_disable(struct drm_crtc *crtc) tilcdc_crtc_off(crtc, false); } +static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + tilcdc_crtc_disable(crtc); +} + void tilcdc_crtc_shutdown(struct drm_crtc *crtc) { tilcdc_crtc_off(crtc, true); @@ -735,9 +741,9 @@ static const struct drm_crtc_funcs tilcdc_crtc_funcs = { static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = { .mode_fixup = tilcdc_crtc_mode_fixup, - .disable = tilcdc_crtc_disable, .atomic_check = tilcdc_crtc_atomic_check, .atomic_enable = tilcdc_crtc_atomic_enable, + .atomic_disable = tilcdc_crtc_atomic_disable, }; int tilcdc_crtc_max_width(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 4c03a565b276..9e0c1500375c 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -479,7 +479,8 @@ static void require_hvs_enabled(struct drm_device *dev) SCALER_DISPCTRL_ENABLE); } -static void vc4_crtc_disable(struct drm_crtc *crtc) +static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -870,11 +871,11 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = { static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { .mode_set_nofb = vc4_crtc_mode_set_nofb, - .disable = vc4_crtc_disable, .mode_valid = vc4_crtc_mode_valid, .atomic_check = vc4_crtc_atomic_check, .atomic_flush = vc4_crtc_atomic_flush, .atomic_enable = vc4_crtc_atomic_enable, + .atomic_disable = vc4_crtc_atomic_disable, }; static const struct vc4_crtc_data pv0_data = { diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 03a3c12ed100..ffd22e5ab43a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -118,7 +118,8 @@ static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc, { } -static void virtio_gpu_crtc_disable(struct drm_crtc *crtc) +static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct drm_device *dev = crtc->dev; struct virtio_gpu_device *vgdev = dev->dev_private; @@ -146,11 +147,11 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { - .disable = virtio_gpu_crtc_disable, .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb, .atomic_check = virtio_gpu_crtc_atomic_check, .atomic_flush = virtio_gpu_crtc_atomic_flush, .atomic_enable = virtio_gpu_crtc_atomic_enable, + .atomic_disable = virtio_gpu_crtc_atomic_disable, }; static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 832b83c582c2..6391069498d6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -218,11 +218,12 @@ static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc, } /** - * vmw_ldu_crtc_helper_disable - Turns off CRTC + * vmw_ldu_crtc_atomic_disable - Turns off CRTC * * @crtc: CRTC to be turned off */ -static void vmw_ldu_crtc_helper_disable(struct drm_crtc *crtc) +static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { } @@ -377,12 +378,12 @@ drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = { }; static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = { - .disable = vmw_ldu_crtc_helper_disable, .mode_set_nofb = vmw_ldu_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, .atomic_begin = vmw_du_crtc_atomic_begin, .atomic_flush = vmw_du_crtc_atomic_flush, .atomic_enable = vmw_ldu_crtc_atomic_enable, + .atomic_disable = vmw_ldu_crtc_atomic_disable, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 8ba3bad06909..854403509216 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -282,11 +282,12 @@ static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc, } /** - * vmw_sou_crtc_helper_disable - Turns off CRTC + * vmw_sou_crtc_atomic_disable - Turns off CRTC * * @crtc: CRTC to be turned off */ -static void vmw_sou_crtc_helper_disable(struct drm_crtc *crtc) +static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct vmw_private *dev_priv; struct vmw_screen_object_unit *sou; @@ -574,12 +575,12 @@ drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = { static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = { .prepare = vmw_sou_crtc_helper_prepare, - .disable = vmw_sou_crtc_helper_disable, .mode_set_nofb = vmw_sou_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, .atomic_begin = vmw_du_crtc_atomic_begin, .atomic_flush = vmw_du_crtc_atomic_flush, .atomic_enable = vmw_sou_crtc_atomic_enable, + .atomic_disable = vmw_sou_crtc_atomic_disable, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index f2b39d9c51cc..ed9404a7f457 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -433,7 +433,8 @@ static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc, vmw_kms_del_active(dev_priv, &stdu->base); } -static void vmw_stdu_crtc_helper_disable(struct drm_crtc *crtc) +static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; @@ -1416,12 +1417,12 @@ drm_plane_helper_funcs vmw_stdu_primary_plane_helper_funcs = { static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = { .prepare = vmw_stdu_crtc_helper_prepare, - .disable = vmw_stdu_crtc_helper_disable, .mode_set_nofb = vmw_stdu_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, .atomic_begin = vmw_du_crtc_atomic_begin, .atomic_flush = vmw_du_crtc_atomic_flush, .atomic_enable = vmw_stdu_crtc_atomic_enable, + .atomic_disable = vmw_stdu_crtc_atomic_disable, }; diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c index ccb4b0bdb8c6..7491813131f3 100644 --- a/drivers/gpu/drm/zte/zx_vou.c +++ b/drivers/gpu/drm/zte/zx_vou.c @@ -455,7 +455,8 @@ static void zx_crtc_atomic_enable(struct drm_crtc *crtc, DRM_DEV_ERROR(vou->dev, "failed to enable pixclk: %d\n", ret); } -static void zx_crtc_disable(struct drm_crtc *crtc) +static void zx_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) { struct zx_crtc *zcrtc = to_zx_crtc(crtc); const struct zx_crtc_bits *bits = zcrtc->bits; @@ -491,9 +492,9 @@ static void zx_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs zx_crtc_helper_funcs = { - .disable = zx_crtc_disable, .atomic_flush = zx_crtc_atomic_flush, .atomic_enable = zx_crtc_atomic_enable, + .atomic_disable = zx_crtc_atomic_disable, }; static int zx_vou_enable_vblank(struct drm_crtc *crtc) From d48cb5f541c953ae2a33657f582d93c4151f27d9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 30 Jun 2017 09:39:21 +0200 Subject: [PATCH 0149/1795] drm/atomic: Drop helper include from drm_atomic.c Core code should never have to look at helper stuff, to make sure that all helper code is 100% optional and can be overriden. Cc: Gustavo Padovan Reviewed-by: Gustavo Padovan Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170630073921.2345-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_atomic.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 095e87278a88..09ca662fcd35 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include From fcace3b9b727e25ffa3f7ad2c96e76b8584a9f3e Mon Sep 17 00:00:00 2001 From: "Navare, Manasi D" Date: Thu, 29 Jun 2017 18:14:01 -0700 Subject: [PATCH 0150/1795] drm/i915/cnl: Fix the CURSOR_COEFF_MASK used in DDI Vswing Programming The Cursor Coeff is lower 6 bits in the PORT_TX_DW4 register and hence the CURSOR_COEFF_MASK should be (0x3F << 0) Fixes: 04416108ccea ("drm/i915/cnl: Add registers related to voltage swing sequences.") Signed-off-by: Manasi Navare Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1498785241-21138-1-git-send-email-manasi.d.navare@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c8647cfa81ba..64cc674b652a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1802,7 +1802,7 @@ enum skl_disp_power_wells { #define POST_CURSOR_2(x) ((x) << 6) #define POST_CURSOR_2_MASK (0x3F << 6) #define CURSOR_COEFF(x) ((x) << 0) -#define CURSOR_COEFF_MASK (0x3F << 6) +#define CURSOR_COEFF_MASK (0x3F << 0) #define _CNL_PORT_TX_DW5_GRP_AE 0x162354 #define _CNL_PORT_TX_DW5_GRP_B 0x1623D4 From 2c4b851933a6aae3e3a034f23fbcb3ec345d7e51 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 3 Jul 2017 08:40:41 +0200 Subject: [PATCH 0151/1795] drm/i915: Update DRIVER_DATE to 20170703 Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ddb5ee8e0535..29d162b24d8d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -80,8 +80,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20170619" -#define DRIVER_TIMESTAMP 1497857498 +#define DRIVER_DATE "20170703" +#define DRIVER_TIMESTAMP 1499064041 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions From fef9df8b594531a4257b6a3bf7e190570c17be29 Mon Sep 17 00:00:00 2001 From: Gustavo Padovan Date: Fri, 30 Jun 2017 15:03:17 -0300 Subject: [PATCH 0152/1795] drm/atomic: initial support for asynchronous plane update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In some cases, like cursor updates, it is interesting to update the plane in an asynchronous fashion to avoid big delays. The current queued update could be still waiting for a fence to signal and thus block any subsequent update until its scan out. In cases like this if we update the cursor synchronously through the atomic API it will cause significant delays that would even be noticed by the final user. This patch creates a fast path to jump ahead the current queued state and do single planes updates without going through all atomic steps in drm_atomic_helper_commit(). We take this path for legacy cursor updates. For now only single plane updates are supported, but we plan to support multiple planes updates and async PageFlips through this interface as well in the near future. v6: - move check code to drm_atomic_helper.c (Daniel Vetter) v5: - improve comments (Eric Anholt) v4: - fix state->crtc NULL check (Archit Taneja) v3: - fix iteration on the wrong crtc state - put back code to forbid updates if there is a queued update for the same plane (Ville Syrjälä) - move size checks back to drivers (Ville Syrjälä) - move ASYNC_UPDATE flag addition to its own patch (Ville Syrjälä) v2: - allow updates even if there is a queued update for the same plane. - fixes on the documentation (Emil Velikov) - unconditionally call ->atomic_async_update (Emil Velikov) - check for ->atomic_async_update earlier (Daniel Vetter) - make ->atomic_async_check() the last step (Daniel Vetter) - add ASYNC_UPDATE flag (Eric Anholt) - update state in core after ->atomic_async_update (Eric Anholt) - update docs (Eric Anholt) Cc: Daniel Vetter Cc: Rob Clark Cc: Eric Anholt Signed-off-by: Gustavo Padovan Reviewed-by: Archit Taneja (v5) Acked-by: Eric Anholt (v5) Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170630180322.29007-2-gustavo@padovan.org --- drivers/gpu/drm/drm_atomic_helper.c | 122 +++++++++++++++++++++++ include/drm/drm_atomic.h | 2 + include/drm/drm_atomic_helper.h | 4 + include/drm/drm_modeset_helper_vtables.h | 50 ++++++++++ 4 files changed, 178 insertions(+) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 23e4661a62fe..4f6e52961951 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -795,6 +795,9 @@ int drm_atomic_helper_check(struct drm_device *dev, if (ret) return ret; + if (state->legacy_cursor_update) + state->async_update = !drm_atomic_helper_async_check(dev, state); + return ret; } EXPORT_SYMBOL(drm_atomic_helper_check); @@ -1352,6 +1355,114 @@ static void commit_work(struct work_struct *work) commit_tail(state); } +/** + * drm_atomic_helper_async_check - check if state can be commited asynchronously + * @dev: DRM device + * @state: the driver state object + * + * This helper will check if it is possible to commit the state asynchronously. + * Async commits are not supposed to swap the states like normal sync commits + * but just do in-place changes on the current state. + * + * It will return 0 if the commit can happen in an asynchronous fashion or error + * if not. Note that error just mean it can't be commited asynchronously, if it + * fails the commit should be treated like a normal synchronous commit. + */ +int drm_atomic_helper_async_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + struct drm_plane *__plane, *plane = NULL; + struct drm_plane_state *__plane_state, *plane_state = NULL; + const struct drm_plane_helper_funcs *funcs; + int i, j, n_planes = 0; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) + return -EINVAL; + } + + for_each_new_plane_in_state(state, __plane, __plane_state, i) { + n_planes++; + plane = __plane; + plane_state = __plane_state; + } + + /* FIXME: we support only single plane updates for now */ + if (!plane || n_planes != 1) + return -EINVAL; + + if (!plane_state->crtc) + return -EINVAL; + + funcs = plane->helper_private; + if (!funcs->atomic_async_update) + return -EINVAL; + + if (plane_state->fence) + return -EINVAL; + + /* + * Don't do an async update if there is an outstanding commit modifying + * the plane. This prevents our async update's changes from getting + * overridden by a previous synchronous update's state. + */ + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + if (plane->crtc != crtc) + continue; + + spin_lock(&crtc->commit_lock); + commit = list_first_entry_or_null(&crtc->commit_list, + struct drm_crtc_commit, + commit_entry); + if (!commit) { + spin_unlock(&crtc->commit_lock); + continue; + } + spin_unlock(&crtc->commit_lock); + + if (!crtc->state->state) + continue; + + for_each_plane_in_state(crtc->state->state, __plane, + __plane_state, j) { + if (__plane == plane) + return -EINVAL; + } + } + + return funcs->atomic_async_check(plane, plane_state); +} +EXPORT_SYMBOL(drm_atomic_helper_async_check); + +/** + * drm_atomic_helper_async_commit - commit state asynchronously + * @dev: DRM device + * @state: the driver state object + * + * This function commits a state asynchronously, i.e., not vblank + * synchronized. It should be used on a state only when + * drm_atomic_async_check() succeeds. Async commits are not supposed to swap + * the states like normal sync commits, but just do in-place changes on the + * current state. + */ +void drm_atomic_helper_async_commit(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_plane *plane; + struct drm_plane_state *plane_state; + const struct drm_plane_helper_funcs *funcs; + int i; + + for_each_new_plane_in_state(state, plane, plane_state, i) { + funcs = plane->helper_private; + funcs->atomic_async_update(plane, plane_state); + } +} +EXPORT_SYMBOL(drm_atomic_helper_async_commit); + /** * drm_atomic_helper_commit - commit validated state object * @dev: DRM device @@ -1376,6 +1487,17 @@ int drm_atomic_helper_commit(struct drm_device *dev, { int ret; + if (state->async_update) { + ret = drm_atomic_helper_prepare_planes(dev, state); + if (ret) + return ret; + + drm_atomic_helper_async_commit(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); + + return 0; + } + ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) return ret; diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 0196f264a418..dcc8e0cdb7ff 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -207,6 +207,7 @@ struct __drm_private_objs_state { * @dev: parent DRM device * @allow_modeset: allow full modeset * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics + * @async_update: hint for asynchronous plane update * @planes: pointer to array of structures with per-plane data * @crtcs: pointer to array of CRTC pointers * @num_connector: size of the @connectors and @connector_states arrays @@ -221,6 +222,7 @@ struct drm_atomic_state { struct drm_device *dev; bool allow_modeset : 1; bool legacy_cursor_update : 1; + bool async_update : 1; struct __drm_planes_state *planes; struct __drm_crtcs_state *crtcs; int num_connector; diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 3bfeb2b2f746..dd196cc0afd7 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -44,6 +44,10 @@ void drm_atomic_helper_commit_tail(struct drm_atomic_state *state); int drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); +int drm_atomic_helper_async_check(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_async_commit(struct drm_device *dev, + struct drm_atomic_state *state); int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state, diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index c85124f687ba..06569845708c 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1135,6 +1135,56 @@ struct drm_plane_helper_funcs { */ void (*atomic_disable)(struct drm_plane *plane, struct drm_plane_state *old_state); + + /** + * @atomic_async_check: + * + * Drivers should set this function pointer to check if the plane state + * can be updated in a async fashion. Here async means "not vblank + * synchronized". + * + * This hook is called by drm_atomic_async_check() to establish if a + * given update can be committed asynchronously, that is, if it can + * jump ahead of the state currently queued for update. + * + * RETURNS: + * + * Return 0 on success and any error returned indicates that the update + * can not be applied in asynchronous manner. + */ + int (*atomic_async_check)(struct drm_plane *plane, + struct drm_plane_state *state); + + /** + * @atomic_async_update: + * + * Drivers should set this function pointer to perform asynchronous + * updates of planes, that is, jump ahead of the currently queued + * state and update the plane. Here async means "not vblank + * synchronized". + * + * This hook is called by drm_atomic_helper_async_commit(). + * + * An async update will happen on legacy cursor updates. An async + * update won't happen if there is an outstanding commit modifying + * the same plane. + * + * Note that unlike &drm_plane_helper_funcs.atomic_update this hook + * takes the new &drm_plane_state as parameter. When doing async_update + * drivers shouldn't replace the &drm_plane_state but update the + * current one with the new plane configurations in the new + * plane_state. + * + * FIXME: + * - It only works for single plane updates + * - Async Pageflips are not supported yet + * - Some hw might still scan out the old buffer until the next + * vblank, however we let go of the fb references as soon as + * we run this hook. For now drivers must implement their own workers + * for deferring if needed, until a common solution is created. + */ + void (*atomic_async_update)(struct drm_plane *plane, + struct drm_plane_state *new_state); }; /** From aaaf2f12999a416514368e32853fb13f745e27fc Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 1 Jul 2017 15:30:15 +0530 Subject: [PATCH 0153/1795] drm: armada: constify drm_prop_enum_list structures. drm_prop_enum_lists are not supposed to change at runtime. All functions working with drm_prop_enum_list provided by work with const drm_prop_enum_list. So mark the non-const structs as const. File size before: text data bss dec hex filename 9629 744 0 10373 2885 drivers/gpu/drm/armada/armada_crtc.o File size After adding 'const': text data bss dec hex filename 9757 616 0 10373 2885 drivers/gpu/drm/armada/armada_crtc.o Signed-off-by: Arvind Yadav Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/d2344c4dc40238cfe48fa6d917767df0f053150a.1498902844.git.arvind.yadav.cs@gmail.com --- drivers/gpu/drm/armada/armada_crtc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 94b76bdd7553..b57fb80acec1 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -1150,13 +1150,13 @@ int armada_drm_plane_init(struct armada_plane *plane) return 0; } -static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = { +static const struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = { { CSC_AUTO, "Auto" }, { CSC_YUV_CCIR601, "CCIR601" }, { CSC_YUV_CCIR709, "CCIR709" }, }; -static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = { +static const struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = { { CSC_AUTO, "Auto" }, { CSC_RGB_COMPUTER, "Computer system" }, { CSC_RGB_STUDIO, "Studio" }, From 8a63ca58f879f7ac40fac5af0bc1b37393ad18ea Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 1 Jul 2017 16:24:42 +0530 Subject: [PATCH 0154/1795] drm: armada: Constify drm_prop_enum_list structures. drm_prop_enum_lists are not supposed to change at runtime. All functions working with drm_prop_enum_list provided by work with const drm_prop_enum_list. So mark the non-const structs as const. File size before: text data bss dec hex filename 3594 176 0 3770 eba drivers/gpu/drm/armada/armada_overlay.o File size After adding 'const': text data bss dec hex filename 3722 48 0 3770 eba drivers/gpu/drm/armada/armada_overlay.o Signed-off-by: Arvind Yadav Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/dbeb176765bda8eaa9efdaa2dcd14c7bbae39cfa.1498905467.git.arvind.yadav.cs@gmail.com --- drivers/gpu/drm/armada/armada_overlay.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index e9a29df4b443..677b44f3534b 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -388,7 +388,7 @@ static const uint32_t armada_ovl_formats[] = { DRM_FORMAT_BGR565, }; -static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = { +static const struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = { { CKMODE_DISABLE, "disabled" }, { CKMODE_Y, "Y component" }, { CKMODE_U, "U component" }, From 7b4e1eaa9d9eb3662876833bafcbe059eb6ac186 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 1 Jul 2017 18:06:46 +0530 Subject: [PATCH 0155/1795] drm/udl: dma-buf: Constify dma_buf_ops structures. dma_buf_ops are not supposed to change at runtime. All functions working with dma_buf_ops provided by work with const dma_buf_ops. So mark the non-const structs as const. File size before: text data bss dec hex filename 2002 112 0 2114 842 drivers/gpu/drm/udl/udl_dmabuf.o File size After adding 'const': text data bss dec hex filename 2114 0 0 2114 842 drivers/gpu/drm/udl/udl_dmabuf.o Signed-off-by: Arvind Yadav Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/cb4dfac90e85e2270779331f8cb10b635042bad7.1498912415.git.arvind.yadav.cs@gmail.com --- drivers/gpu/drm/udl/udl_dmabuf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c index 2e031a894813..2867ed155ff6 100644 --- a/drivers/gpu/drm/udl/udl_dmabuf.c +++ b/drivers/gpu/drm/udl/udl_dmabuf.c @@ -186,7 +186,7 @@ static int udl_dmabuf_mmap(struct dma_buf *dma_buf, return -EINVAL; } -static struct dma_buf_ops udl_dmabuf_ops = { +static const struct dma_buf_ops udl_dmabuf_ops = { .attach = udl_attach_dma_buf, .detach = udl_detach_dma_buf, .map_dma_buf = udl_map_dma_buf, From 05ccf211efbb6c8b5da2b5fda4f9399a7bc0db2e Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 2 Jul 2017 13:11:02 +0530 Subject: [PATCH 0156/1795] drm: ttm: virtio-gpu: dma-buf: Constify ttm_place structures. ttm_place are not supposed to change at runtime. All functions working with ttm_place provided by work with const ttm_place. So mark the non-const structs as const. File size before: text data bss dec hex filename 2315 184 0 2499 9c3 drivers/gpu/drm/virtio/virtgpu_ttm.o File size After adding 'const': text data bss dec hex filename 2347 152 0 2499 9c3 drivers/gpu/drm/virtio/virtgpu_ttm.o Signed-off-by: Arvind Yadav Link: http://patchwork.freedesktop.org/patch/msgid/25a189402a516a0142d9a4412da0a597c660a96a.1498981093.git.arvind.yadav.cs@gmail.com Signed-off-by: Gerd Hoffmann --- drivers/gpu/drm/virtio/virtgpu_ttm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index c1f2af4ca4ca..e695d74eaa9f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c @@ -234,7 +234,7 @@ static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { - static struct ttm_place placements = { + static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM, From 9e2033a6bb13346620dbeb659b284a2bef8eb4d6 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 2 Jul 2017 23:03:35 +0530 Subject: [PATCH 0157/1795] drm: qxl: constify ttm_place structures. ttm_place are not supposed to change at runtime. All functions working with ttm_place provided by work with const ttm_place. So mark the non-const structs as const. File size before: text data bss dec hex filename 3485 184 264 3933 f5d drivers/gpu/drm/qxl/qxl_ttm.o File size After adding 'const': text data bss dec hex filename 3501 152 264 3917 f4d drivers/gpu/drm/qxl/qxl_ttm.o Signed-off-by: Arvind Yadav Link: http://patchwork.freedesktop.org/patch/msgid/a4f21d3bd2497129f084b8055ecf27f0d3ff1bba.1499013516.git.arvind.yadav.cs@gmail.com Signed-off-by: Gerd Hoffmann --- drivers/gpu/drm/qxl/qxl_ttm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 87fc1dbd0a2f..7ecf8a4b9fe6 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -187,7 +187,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct qxl_bo *qbo; - static struct ttm_place placements = { + static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM From 931e8c661a2d85e6bdfe145cfc52dffaf4a60516 Mon Sep 17 00:00:00 2001 From: Mark Cave-Ayland Date: Sun, 2 Jul 2017 22:52:43 +0100 Subject: [PATCH 0158/1795] drm/bochs: switch fb_ops over to use drm_fb_helper_cfb helpers The current drm_fb_helper_sys helpers referenced in fb_ops assume that the video memory is in system RAM. This is not the case for sparc which uses direct physical memory accesses for IO memory and causes the bochs_drm module to panic immediately upon startup as it tries to initialise the framebuffer. Switching fb_ops over to use the drm_fb_helper_cfb helpers ensures that the correct accesses are used on sparc, fixing the panic and allowing the bochs_drm module to function under qemu-system-sparc64. Signed-off-by: Mark Cave-Ayland Link: http://patchwork.freedesktop.org/patch/msgid/1499032363-8290-1-git-send-email-mark.cave-ayland@ilande.co.uk Signed-off-by: Gerd Hoffmann --- drivers/gpu/drm/bochs/bochs_fbdev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c index c38deffa14de..49d5a2b7d630 100644 --- a/drivers/gpu/drm/bochs/bochs_fbdev.c +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c @@ -23,9 +23,9 @@ static int bochsfb_mmap(struct fb_info *info, static struct fb_ops bochsfb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, - .fb_fillrect = drm_fb_helper_sys_fillrect, - .fb_copyarea = drm_fb_helper_sys_copyarea, - .fb_imageblit = drm_fb_helper_sys_imageblit, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_mmap = bochsfb_mmap, }; From 6cbf04001ec0eede72a43f297c93586971445128 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Tue, 6 Jun 2017 09:17:06 +0200 Subject: [PATCH 0159/1795] drm/etnaviv: don't trigger OOM killer when page allocation fails GPU buffers can be quite large, so userspace is expected to deal with allocation failure. Don't trigger the OOM killer when page allocation for the GEM objects fails, as this opens an easy possiblity for unprivileged applications to DOS the system,a s the shmem pages are not fully accounted to the allocating process. Signed-off-by: Lucas Stach --- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 9a3bea738330..b7541a455451 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -662,7 +662,8 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, * going to pin these pages. */ mapping = obj->filp->f_mapping; - mapping_set_gfp_mask(mapping, GFP_HIGHUSER); + mapping_set_gfp_mask(mapping, GFP_HIGHUSER | + __GFP_NORETRY | __GFP_NOWARN); } if (ret) From f91ac470a8b1b358e9c2c7dc17da2642d125c3ac Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Tue, 6 Jun 2017 09:17:07 +0200 Subject: [PATCH 0160/1795] drm/etnaviv: reduce allocation failure message severity The GPU userspace is expected to deal with failure to allocate memory for the GPU buffers, there is no need to spam the log on failure. Signed-off-by: Lucas Stach --- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index b7541a455451..a4f392c38b38 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -68,7 +68,7 @@ static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj) struct page **p = drm_gem_get_pages(&etnaviv_obj->base); if (IS_ERR(p)) { - dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); + dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); return PTR_ERR(p); } From 8cc47b3ea0822175deb8bf436e4c59a7f128ac30 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Tue, 6 Jun 2017 09:17:08 +0200 Subject: [PATCH 0161/1795] drm/etnaviv: populate GEM objects on cpu_prep CPU prep is the point where we can reasonably return an error to userspace when something goes wrong while populating the object. If we leave the object unpopulated at this point, the allocation will happen in the fault handler when userspace accesses the object through the mmap space, where we don't have any other option than to OOM the system. Signed-off-by: Lucas Stach --- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index a4f392c38b38..408c0fc476dd 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -413,6 +413,16 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, bool write = !!(op & ETNA_PREP_WRITE); int ret; + if (!etnaviv_obj->sgt) { + void *ret; + + mutex_lock(&etnaviv_obj->lock); + ret = etnaviv_gem_get_pages(etnaviv_obj); + mutex_unlock(&etnaviv_obj->lock); + if (IS_ERR(ret)) + return PTR_ERR(ret); + } + if (op & ETNA_PREP_NOSYNC) { if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, write)) @@ -427,16 +437,6 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, } if (etnaviv_obj->flags & ETNA_BO_CACHED) { - if (!etnaviv_obj->sgt) { - void *ret; - - mutex_lock(&etnaviv_obj->lock); - ret = etnaviv_gem_get_pages(etnaviv_obj); - mutex_unlock(&etnaviv_obj->lock); - if (IS_ERR(ret)) - return PTR_ERR(ret); - } - dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, etnaviv_obj->sgt->nents, etnaviv_op_to_dma_dir(op)); From 987f8c444aa2c33d98e7030d0c5f0a5325cc84ea Mon Sep 17 00:00:00 2001 From: "sagar.a.kamble@intel.com" Date: Tue, 27 Jun 2017 23:09:41 +0530 Subject: [PATCH 0162/1795] drm/i915: Hold RPM wakelock while initializing OA buffer OA buffer initialization involves access to HW registers to set the OA base, head and tail. Ensure device is awake while setting these. With this, all oa.ops are covered under RPM and forcewake wakelock. Cc: Lionel Landwerlin Signed-off-by: Sagar Arun Kamble Reviewed-by: Lionel Landwerlin Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/1498585181-23048-1-git-send-email-sagar.a.kamble@intel.com Fixes: d79651522e89c ("drm/i915: Enable i915 perf stream for Haswell OA unit") Cc: # v4.11+ --- drivers/gpu/drm/i915/i915_perf.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index afd8260cd096..d9f77a4d85db 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2067,10 +2067,6 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return ret; } - ret = alloc_oa_buffer(dev_priv); - if (ret) - goto err_oa_buf_alloc; - /* PRM - observability performance counters: * * OACONTROL, performance counter enable, note: @@ -2086,6 +2082,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, intel_runtime_pm_get(dev_priv); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + ret = alloc_oa_buffer(dev_priv); + if (ret) + goto err_oa_buf_alloc; + ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv); if (ret) goto err_enable; @@ -2097,11 +2097,11 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return 0; err_enable: - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); free_oa_buffer(dev_priv); err_oa_buf_alloc: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_runtime_pm_put(dev_priv); if (stream->ctx) oa_put_render_ctx_id(stream); From a874b6a3fb22c8c30f36b1f6ba5430a4c2278a6a Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Mon, 26 Jun 2017 10:18:35 +0200 Subject: [PATCH 0163/1795] drm/i915: Prevent kernel panic when reading/writing compliance debugfs files, v2. When reading all debugfs files on a system with DP-MST the kernel panics on a null pointer dereference because intel_dp is null for a DP-MST connector. Detect this case and skip those connectors. Also fix the write for the DP compliance file in the same way. Changes since v1: - Fix i915_displayport_test_active_write too. (DK) Signed-off-by: Maarten Lankhorst Cc: Dhinakaran Pandiyan Cc: Manasi Navare Reviewed-by: Dhinakaran Pandiyan Link: http://patchwork.freedesktop.org/patch/msgid/20170626081835.24251-1-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 44 +++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 580bd4f4a49e..643f56b8b87c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3778,13 +3778,18 @@ static ssize_t i915_displayport_test_active_write(struct file *file, drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; - if (connector->status == connector_status_connected && - connector->encoder != NULL) { - intel_dp = enc_to_intel_dp(connector->encoder); + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(&encoder->base); status = kstrtoint(input_buffer, 10, &val); if (status < 0) break; @@ -3816,13 +3821,18 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data) drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; - if (connector->status == connector_status_connected && - connector->encoder != NULL) { - intel_dp = enc_to_intel_dp(connector->encoder); + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(&encoder->base); if (intel_dp->compliance.test_active) seq_puts(m, "1"); else @@ -3862,13 +3872,18 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data) drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; - if (connector->status == connector_status_connected && - connector->encoder != NULL) { - intel_dp = enc_to_intel_dp(connector->encoder); + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(&encoder->base); if (intel_dp->compliance.test_type == DP_TEST_LINK_EDID_READ) seq_printf(m, "%lx", @@ -3915,13 +3930,18 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data) drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) continue; - if (connector->status == connector_status_connected && - connector->encoder != NULL) { - intel_dp = enc_to_intel_dp(connector->encoder); + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(&encoder->base); seq_printf(m, "%02lx", intel_dp->compliance.test_type); } else seq_puts(m, "0"); From 7114d2e22fadd9060fd8c38b1dbc3ceeac7c536b Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Mon, 3 Jul 2017 13:51:06 +0200 Subject: [PATCH 0164/1795] drm/fb-helper: Remove drm_mode_config_fb. Remove drm_mode_config_fb, I don't see the point of it. To make it clear that it's ok to use plane->fb directly, move up drm_drv_uses_atomic_modeset so the code is skipped for atomic drivers that require plane_state->fb. Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/20170703115106.18783-1-maarten.lankhorst@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/drm_fb_helper.c | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 574af01d3ce9..84e47e512fe9 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -298,20 +298,6 @@ int drm_fb_helper_debug_enter(struct fb_info *info) } EXPORT_SYMBOL(drm_fb_helper_debug_enter); -/* Find the real fb for a given fb helper CRTC */ -static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_crtc *c; - - drm_for_each_crtc(c, dev) { - if (crtc->base.id == c->base.id) - return c->primary->fb; - } - - return NULL; -} - /** * drm_fb_helper_debug_leave - implementation for &fb_ops.fb_debug_leave * @info: fbdev registered by the helper @@ -328,8 +314,11 @@ int drm_fb_helper_debug_leave(struct fb_info *info) struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; crtc = mode_set->crtc; + if (drm_drv_uses_atomic_modeset(crtc->dev)) + continue; + funcs = crtc->helper_private; - fb = drm_mode_config_fb(crtc); + fb = crtc->primary->fb; if (!crtc->enabled) continue; @@ -342,9 +331,6 @@ int drm_fb_helper_debug_leave(struct fb_info *info) if (funcs->mode_set_base_atomic == NULL) continue; - if (drm_drv_uses_atomic_modeset(crtc->dev)) - continue; - drm_fb_helper_restore_lut_atomic(mode_set->crtc); funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, crtc->y, LEAVE_ATOMIC_MODE_SET); From fad2083483bb7f743486199f90dc042939323453 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 1 Jul 2017 00:05:17 +0100 Subject: [PATCH 0165/1795] drm/i915: Fix use-after-free of context during free_contexts When iterating the list of contexts to free, we need to use a safe iterator as we are freeing the link as we go. Pass an extra thick brown paper bag. Fixes: 5f09a9c8ab6b ("drm/i915: Allow contexts to be unreferenced locklessly") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Cc: Matthew Auld Link: http://patchwork.freedesktop.org/patch/msgid/20170630230517.1938-1-chris@chris-wilson.co.uk Reviewed-by: Matthew Auld --- drivers/gpu/drm/i915/i915_gem_context.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 71d2ea7dab64..2eb5d8203999 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -193,11 +193,11 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) static void contexts_free(struct drm_i915_private *i915) { struct llist_node *freed = llist_del_all(&i915->contexts.free_list); - struct i915_gem_context *ctx; + struct i915_gem_context *ctx, *cn; lockdep_assert_held(&i915->drm.struct_mutex); - llist_for_each_entry(ctx, freed, free_link) + llist_for_each_entry_safe(ctx, cn, freed, free_link) i915_gem_context_free(ctx); } From 15727ed0d944ce1dec8b9e1082dd3df29a0fdf44 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 22 Jun 2017 17:02:11 +0100 Subject: [PATCH 0166/1795] drm/i915/fbdev: Check for existence of ifbdev->vma before operations Commit fabef825626d ("drm/i915: Drop struct_mutex around frontbuffer flushes") adds a dependency to ifbdev->vma when flushing the framebufer, but the checks are only against the existence of the ifbdev->fb and not against ifbdev->vma. This leaves a window of opportunity where we may try to operate on the fbdev prior to it being probed (thanks to asynchronous booting). Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101534 Fixes: fabef825626d ("drm/i915: Drop struct_mutex around frontbuffer flushes") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170622160211.783-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin Cc: stable@vger.kernel.org --- drivers/gpu/drm/i915/intel_fbdev.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 03347c6ae599..0c4cde6b2e6f 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -535,14 +535,15 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) drm_fb_helper_fini(&ifbdev->helper); - if (ifbdev->fb) { + if (ifbdev->vma) { mutex_lock(&ifbdev->helper.dev->struct_mutex); intel_unpin_fb_vma(ifbdev->vma); mutex_unlock(&ifbdev->helper.dev->struct_mutex); - - drm_framebuffer_remove(&ifbdev->fb->base); } + if (ifbdev->fb) + drm_framebuffer_remove(&ifbdev->fb->base); + kfree(ifbdev); } @@ -765,7 +766,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous struct intel_fbdev *ifbdev = dev_priv->fbdev; struct fb_info *info; - if (!ifbdev || !ifbdev->fb) + if (!ifbdev || !ifbdev->vma) return; info = ifbdev->helper.fbdev; @@ -812,7 +813,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; - if (ifbdev && ifbdev->fb) + if (ifbdev && ifbdev->vma) drm_fb_helper_hotplug_event(&ifbdev->helper); } @@ -824,7 +825,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev) return; intel_fbdev_sync(ifbdev); - if (!ifbdev->fb) + if (!ifbdev->vma) return; if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0) From 11f489d3c62b1e55efbc5f1ab058d85851a96d0e Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Fri, 30 Jun 2017 17:40:59 +0530 Subject: [PATCH 0167/1795] drm/i915/skl+: Check for supported plane configuration in Interlace mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In Gen9 platform Interlaced fetch mode doesn't support following plane configuration: - Y/Yf tiling - 90/270 rotation - YUV420 hybrid planar source pixel formats. This patch adds check to fail the flip if any of the above configuration is requested. Changes since V1: - handle checks in intel_plane_atomic_check_with_state (ville) - takeout plane scaler checks combile with pipe scaler in next patch Changes since V2: - No need to check for NV12 as it need scaling, so it will be rejected by scaling check (ville) Signed-off-by: Mahesh Kumar Link: http://patchwork.freedesktop.org/patch/msgid/20170630121100.20159-2-mahesh1.kumar@intel.com Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=90238 Reviewed-by: Ville Syrjälä Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_atomic_plane.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index 4325cb0a04f5..ee76fab7bb6f 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -114,6 +114,8 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_plane_state *state = &intel_state->base; struct intel_plane *intel_plane = to_intel_plane(plane); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; int ret; /* @@ -173,6 +175,19 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state, if (ret) return ret; + /* + * Y-tiling is not supported in IF-ID Interlace mode in + * GEN9 and above. + */ + if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && + adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED || + state->fb->modifier == I915_FORMAT_MOD_Yf_TILED) { + DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n"); + return -EINVAL; + } + } + /* FIXME pre-g4x don't work like this */ if (intel_state->base.visible) crtc_state->active_planes |= BIT(intel_plane->id); From 7f58cbb187aff05e179c2d7bef9d4605fb3ced72 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Fri, 30 Jun 2017 17:41:00 +0530 Subject: [PATCH 0168/1795] drm/i915/skl+: Scaling not supported in IF-ID Interlace mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GEN9+ Interlace fetch mode doesn't support pipe/plane scaling, This patch adds check to fail the flip if pipe/plane scaling is requested in Interlace fetch mode. Changes since V1: - move check to skl_update_scaler (ville) - mode to adjusted_mode (ville) - combine pipe/plane scaling check Changes since V2: - Indentation fix - Added TODO to handle/reject NV12 with interlace mode Signed-off-by: Mahesh Kumar Link: http://patchwork.freedesktop.org/patch/msgid/20170630121100.20159-3-mahesh1.kumar@intel.com Reviewed-by: Ville Syrjälä Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_display.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4836e537c7f8..9b6689ef5e30 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4612,6 +4612,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, &crtc_state->scaler_state; struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; int need_scaling; /* @@ -4621,6 +4624,18 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, */ need_scaling = src_w != dst_w || src_h != dst_h; + /* + * Scaling/fitting not supported in IF-ID mode in GEN9+ + * TODO: Interlace fetch mode doesn't support YUV420 planar formats. + * Once NV12 is enabled, handle it here while allocating scaler + * for NV12. + */ + if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && + need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); + return -EINVAL; + } + /* * if plane is being disabled or scaler is no more required or force detach * - free scaler binded to this plane/crtc From c99a259b4b5192ba30d37fe25f09632d4c52f5d7 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Fri, 30 Jun 2017 09:33:48 -0700 Subject: [PATCH 0169/1795] drm/i915/edp: Add a T12 panel delay quirk to fix DP AUX CH timeouts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes the DP AUX CH timeouts observed during CI IGT tests thus fixing the CI failures. This is done by adding a quirk for a particular PCI device that requires the panel power cycle delay (T12) to be set to 800ms which is 300msecs more than the minimum value specified in the eDP spec. So a quirk is implemented for that specific PCI device. v4: * Add Bugzilla links for FDO bugs in the commit message (Ville, Jani) v3: * Change some comments, specify the delay as 800 * 10 (Ville) v2: * Change the function and variable names to from PPS_T12_ to _T12 since it is a T12 delay (Clint) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101144 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101154 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101167 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101515 Cc: Ville Syrjala Cc: Clinton Taylor Signed-off-by: Manasi Navare Reviewed-by: Clinton Taylor Link: http://patchwork.freedesktop.org/patch/msgid/1498840428-23176-1-git-send-email-manasi.d.navare@intel.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_display.c | 14 ++++++++++++++ drivers/gpu/drm/i915/intel_dp.c | 10 ++++++++++ 3 files changed, 25 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 29d162b24d8d..a7db1ab60884 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1166,6 +1166,7 @@ enum intel_sbi_destination { #define QUIRK_INVERT_BRIGHTNESS (1<<2) #define QUIRK_BACKLIGHT_PRESENT (1<<3) #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) +#define QUIRK_INCREASE_T12_DELAY (1<<6) struct intel_fbdev; struct intel_fbc_work; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9b6689ef5e30..ede3c6c02ec5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14771,6 +14771,17 @@ static void quirk_backlight_present(struct drm_device *dev) DRM_INFO("applying backlight present quirk\n"); } +/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms + * which is 300 ms greater than eDP spec T12 min. + */ +static void quirk_increase_t12_delay(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY; + DRM_INFO("Applying T12 delay quirk\n"); +} + struct intel_quirk { int device; int subsystem_vendor; @@ -14854,6 +14865,9 @@ static struct intel_quirk intel_quirks[] = { /* Dell Chromebook 11 (2015 version) */ { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, + + /* Toshiba Satellite P50-C-18C */ + { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, }; static void intel_init_quirks(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 0445d11224d4..a9022d13e959 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -5255,6 +5255,16 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, intel_pps_dump_state("cur", &cur); vbt = dev_priv->vbt.edp.pps; + /* On Toshiba Satellite P50-C-18C system the VBT T12 delay + * of 500ms appears to be too short. Ocassionally the panel + * just fails to power back on. Increasing the delay to 800ms + * seems sufficient to avoid this problem. + */ + if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { + vbt.t11_t12 = max_t(u16, vbt.t11_t12, 800 * 10); + DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", + vbt.t11_t12); + } /* T11_T12 delay is special and actually in units of 100ms, but zero * based in the hw (so we need to add 100 ms). But the sw vbt * table multiplies it with 1000 to make it in units of 100usec, From 8c3a8181a46b55ec0ab9dee3c178ba2b2b6ed77b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 27 Jun 2017 16:59:36 +0200 Subject: [PATCH 0170/1795] drm/atomic-helper: Realign function parameters Too jarring. Fixes: f869a6ecf254 ("drm/atomic: Add target_vblank support in atomic helpers (v2)") Cc: Andrey Grodzovsky Cc: Alex Deucher Reviewed-by: Alex Deucher Reviewed-by: Harry Wentland Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170627145936.18983-14-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_atomic_helper.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 4f6e52961951..667ec97d4efb 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3087,12 +3087,11 @@ backoff: } EXPORT_SYMBOL(drm_atomic_helper_connector_set_property); -static int page_flip_common( - struct drm_atomic_state *state, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t flags) +static int page_flip_common(struct drm_atomic_state *state, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags) { struct drm_plane *plane = crtc->primary; struct drm_plane_state *plane_state; @@ -3186,13 +3185,12 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip); * Returns: * Returns 0 on success, negative errno numbers on failure. */ -int drm_atomic_helper_page_flip_target( - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t flags, - uint32_t target, - struct drm_modeset_acquire_ctx *ctx) +int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + uint32_t target, + struct drm_modeset_acquire_ctx *ctx) { struct drm_plane *plane = crtc->primary; struct drm_atomic_state *state; From e6090cc9be15f8e3a5e736d427186e39fea7cceb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 5 Jul 2017 16:49:00 +0100 Subject: [PATCH 0171/1795] drm: Remove pending_read_domains and pending_write_domain The last user of these (i915.ko) no longer does. We can slim down the core GEM object by removing the unused 8 bytes. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170705154900.28697-1-chris@chris-wilson.co.uk --- include/drm/drm_gem.h | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 663d80358057..4a9d231b4294 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -130,21 +130,6 @@ struct drm_gem_object { */ uint32_t write_domain; - /** - * @pending_read_domains: - * - * While validating an exec operation, the - * new read/write domain values are computed here. - * They will be transferred to the above values - * at the point that any cache flushing occurs - */ - uint32_t pending_read_domains; - - /** - * @pending_write_domain: Write domain similar to @pending_read_domains. - */ - uint32_t pending_write_domain; - /** * @dma_buf: * From af2405af07d168e2905f2ac9494d3a804a549f0f Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Tue, 4 Jul 2017 17:18:21 +0200 Subject: [PATCH 0172/1795] drm/fb-helper: Push down modeset lock into FB helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the modeset locking from drivers into FB helpers. v2: Also handle intel_connector_add_to_fbdev. v3: Prevent race in intel_dp_mst with ->detect (Maarten) Cc: Maarten Lankhorst Cc: Alex Deucher Cc: Christian König Tested-by: John Stultz Signed-off-by: Thierry Reding (v1) Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170704151833.17304-2-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_fb_helper.c | 40 ++++++++++++++++++++++---- drivers/gpu/drm/i915/intel_dp_mst.c | 9 ++---- drivers/gpu/drm/radeon/radeon_dp_mst.c | 7 ----- 3 files changed, 37 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 84e47e512fe9..7d0d50e404ee 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -109,8 +109,8 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); for (({ lockdep_assert_held(&(fbh)->dev->mode_config.mutex); }), \ i__ = 0; i__ < (fbh)->connector_count; i__++) -int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, - struct drm_connector *connector) +static int __drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) { struct drm_fb_helper_connector *fb_conn; struct drm_fb_helper_connector **temp; @@ -141,8 +141,23 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, drm_connector_get(connector); fb_conn->connector = connector; fb_helper->connector_info[fb_helper->connector_count++] = fb_conn; + return 0; } + +int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + int err; + + mutex_lock(&fb_helper->dev->mode_config.mutex); + + err = __drm_fb_helper_add_one_connector(fb_helper, connector); + + mutex_unlock(&fb_helper->dev->mode_config.mutex); + + return err; +} EXPORT_SYMBOL(drm_fb_helper_add_one_connector); /** @@ -172,8 +187,7 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) mutex_lock(&dev->mode_config.mutex); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { - ret = drm_fb_helper_add_one_connector(fb_helper, connector); - + ret = __drm_fb_helper_add_one_connector(fb_helper, connector); if (ret) goto fail; } @@ -198,8 +212,8 @@ out: } EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); -int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, - struct drm_connector *connector) +static int __drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) { struct drm_fb_helper_connector *fb_helper_connector; int i, j; @@ -227,6 +241,20 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, return 0; } + +int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + int err; + + mutex_lock(&fb_helper->dev->mode_config.mutex); + + err = __drm_fb_helper_remove_one_connector(fb_helper, connector); + + mutex_unlock(&fb_helper->dev->mode_config.mutex); + + return err; +} EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper) diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 2cf046beae0f..9aa959284497 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -501,11 +501,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo static void intel_dp_register_mst_connector(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); - struct drm_device *dev = connector->dev; - drm_modeset_lock_all(dev); intel_connector_add_to_fbdev(intel_connector); - drm_modeset_unlock_all(dev); drm_connector_register(&intel_connector->base); } @@ -514,15 +511,15 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); - struct drm_device *dev = connector->dev; drm_connector_unregister(connector); /* need to nuke the connector */ - drm_modeset_lock_all(dev); intel_connector_remove_from_fbdev(intel_connector); + /* prevent race with the check in ->detect */ + drm_modeset_lock(&connector->dev->mode_config.connection_mutex, NULL); intel_connector->mst_port = NULL; - drm_modeset_unlock_all(dev); + drm_modeset_unlock(&connector->dev->mode_config.connection_mutex); drm_connector_unreference(&intel_connector->base); DRM_DEBUG_KMS("\n"); diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 6598306dca9b..ebdf1b859cb6 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -300,9 +300,7 @@ static void radeon_dp_register_mst_connector(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; - drm_modeset_lock_all(dev); radeon_fb_add_connector(rdev, connector); - drm_modeset_unlock_all(dev); drm_connector_register(connector); } @@ -315,13 +313,8 @@ static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct radeon_device *rdev = dev->dev_private; drm_connector_unregister(connector); - /* need to nuke the connector */ - drm_modeset_lock_all(dev); - /* dpms off */ radeon_fb_remove_connector(rdev, connector); - drm_connector_cleanup(connector); - drm_modeset_unlock_all(dev); kfree(connector); DRM_DEBUG_KMS("\n"); From 666b7cdc69b03cb55266a14aa5874af0d27bfb0f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 4 Jul 2017 17:18:22 +0200 Subject: [PATCH 0173/1795] drm/i915: Drop FBDEV #ifdev in mst code Since commit a03fdcb1863297481a4b817c2a759cafcbdfa0ae Author: Archit Taneja Date: Wed Aug 5 12:28:57 2015 +0530 drm: Add top level Kconfig option for DRM fbdev emulation this is properly handled using dummy functions. This essentially undoes commit 7296c849bf2eca2bd7d34a4686a53e3089150ac1 Author: Chris Wilson Date: Tue Jul 22 20:10:28 2014 +1000 drm/i915: fix build without fbde v2: We also need to drop the #ifdef from headers. Seems like a small price to pay for slightly cleaner code. Cc: Chris Wilson Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170704151833.17304-3-daniel.vetter@ffwll.ch --- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/intel_dp_mst.c | 38 ++++++++--------------------- 2 files changed, 10 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e1f7c97a338a..2981014fcfe2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2303,11 +2303,9 @@ struct drm_i915_private { struct drm_i915_gem_object *vlv_pctx; -#ifdef CONFIG_DRM_FBDEV_EMULATION /* list of fbdev register on this device */ struct intel_fbdev *fbdev; struct work_struct fbdev_suspend_work; -#endif struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 9aa959284497..e4ea968b1d6b 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -443,28 +443,6 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector) return false; } -static void intel_connector_add_to_fbdev(struct intel_connector *connector) -{ -#ifdef CONFIG_DRM_FBDEV_EMULATION - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - - if (dev_priv->fbdev) - drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, - &connector->base); -#endif -} - -static void intel_connector_remove_from_fbdev(struct intel_connector *connector) -{ -#ifdef CONFIG_DRM_FBDEV_EMULATION - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - - if (dev_priv->fbdev) - drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, - &connector->base); -#endif -} - static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop) { struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); @@ -500,28 +478,32 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo static void intel_dp_register_mst_connector(struct drm_connector *connector) { - struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_i915_private *dev_priv = to_i915(connector->dev); - intel_connector_add_to_fbdev(intel_connector); + if (dev_priv->fbdev) + drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, + connector); - drm_connector_register(&intel_connector->base); + drm_connector_register(connector); } static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_i915_private *dev_priv = to_i915(connector->dev); drm_connector_unregister(connector); - /* need to nuke the connector */ - intel_connector_remove_from_fbdev(intel_connector); + if (dev_priv->fbdev) + drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, + connector); /* prevent race with the check in ->detect */ drm_modeset_lock(&connector->dev->mode_config.connection_mutex, NULL); intel_connector->mst_port = NULL; drm_modeset_unlock(&connector->dev->mode_config.connection_mutex); - drm_connector_unreference(&intel_connector->base); + drm_connector_unreference(connector); DRM_DEBUG_KMS("\n"); } From e9827d8ea2142d49a6622c72a3371a1d1e929c1d Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Tue, 4 Jul 2017 17:18:23 +0200 Subject: [PATCH 0174/1795] drm/fb-helper: Add top-level lock Introduce a new top-level lock for the FB helper code. This will allow better locking granularity and avoid the need to abuse modeset locking for this purpose instead. This patch just adds the new lock everywhere we currently grab mode_config->mutex (explicitly, or through drm_modeset_lock_all). Follow-up patches will push the kms locking down into only the places that need it. v2: - use lockdep_assert_held - use drm_fb_helper_for_each_connector where possible - use the new top-level lock consistently, i.e. in all the places we're currently acquiring mode_config.mutex. - small polish to the kerneldoc Tested-by: John Stultz Signed-off-by: Thierry Reding (v1) Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170704151833.17304-4-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_fb_helper.c | 47 ++++++++++++++++++++++++++++++--- include/drm/drm_fb_helper.h | 19 ++++++++++++- 2 files changed, 61 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 7d0d50e404ee..30188ec809b5 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -119,7 +119,8 @@ static int __drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, if (!drm_fbdev_emulation) return 0; - WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex)); + lockdep_assert_held(&fb_helper->lock); + lockdep_assert_held(&fb_helper->dev->mode_config.mutex); count = fb_helper->connector_count + 1; @@ -150,11 +151,13 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, { int err; + mutex_lock(&fb_helper->lock); mutex_lock(&fb_helper->dev->mode_config.mutex); err = __drm_fb_helper_add_one_connector(fb_helper, connector); mutex_unlock(&fb_helper->dev->mode_config.mutex); + mutex_unlock(&fb_helper->lock); return err; } @@ -184,6 +187,7 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) if (!drm_fbdev_emulation) return 0; + mutex_lock(&fb_helper->lock); mutex_lock(&dev->mode_config.mutex); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { @@ -207,6 +211,7 @@ fail: out: drm_connector_list_iter_end(&conn_iter); mutex_unlock(&dev->mode_config.mutex); + mutex_unlock(&fb_helper->lock); return ret; } @@ -221,9 +226,9 @@ static int __drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, if (!drm_fbdev_emulation) return 0; - WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex)); + lockdep_assert_held(&fb_helper->lock); - for (i = 0; i < fb_helper->connector_count; i++) { + drm_fb_helper_for_each_connector(fb_helper, i) { if (fb_helper->connector_info[i]->connector == connector) break; } @@ -247,11 +252,13 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, { int err; + mutex_lock(&fb_helper->lock); mutex_lock(&fb_helper->dev->mode_config.mutex); err = __drm_fb_helper_remove_one_connector(fb_helper, connector); mutex_unlock(&fb_helper->dev->mode_config.mutex); + mutex_unlock(&fb_helper->lock); return err; } @@ -503,16 +510,21 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) if (!drm_fbdev_emulation) return -ENODEV; + mutex_lock(&fb_helper->lock); drm_modeset_lock_all(dev); + ret = restore_fbdev_mode(fb_helper); do_delayed = fb_helper->delayed_hotplug; if (do_delayed) fb_helper->delayed_hotplug = false; + drm_modeset_unlock_all(dev); + mutex_unlock(&fb_helper->lock); if (do_delayed) drm_fb_helper_hotplug_event(fb_helper); + return ret; } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); @@ -562,11 +574,13 @@ static bool drm_fb_helper_force_kernel_mode(void) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) continue; + mutex_lock(&helper->lock); drm_modeset_lock_all(dev); ret = restore_fbdev_mode(helper); if (ret) error = true; drm_modeset_unlock_all(dev); + mutex_unlock(&helper->lock); } return error; } @@ -606,9 +620,11 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) /* * For each CRTC in this fb, turn the connectors on/off. */ + mutex_lock(&fb_helper->lock); drm_modeset_lock_all(dev); if (!drm_fb_helper_is_bound(fb_helper)) { drm_modeset_unlock_all(dev); + mutex_unlock(&fb_helper->lock); return; } @@ -627,6 +643,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) } } drm_modeset_unlock_all(dev); + mutex_unlock(&fb_helper->lock); } /** @@ -748,6 +765,7 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker); INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work); helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0; + mutex_init(&helper->lock); helper->funcs = funcs; helper->dev = dev; } @@ -913,6 +931,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) } mutex_unlock(&kernel_fb_helper_lock); + mutex_destroy(&fb_helper->lock); drm_fb_helper_crtc_free(fb_helper); } @@ -1243,9 +1262,11 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) if (oops_in_progress) return -EBUSY; + mutex_lock(&fb_helper->lock); drm_modeset_lock_all(dev); if (!drm_fb_helper_is_bound(fb_helper)) { drm_modeset_unlock_all(dev); + mutex_unlock(&fb_helper->lock); return -EBUSY; } @@ -1278,6 +1299,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) } out: drm_modeset_unlock_all(dev); + mutex_unlock(&fb_helper->lock); return rc; } EXPORT_SYMBOL(drm_fb_helper_setcmap); @@ -1300,6 +1322,7 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, struct drm_crtc *crtc; int ret = 0; + mutex_lock(&fb_helper->lock); mutex_lock(&dev->mode_config.mutex); if (!drm_fb_helper_is_bound(fb_helper)) { ret = -EBUSY; @@ -1346,6 +1369,7 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, unlock: mutex_unlock(&dev->mode_config.mutex); + mutex_unlock(&fb_helper->lock); return ret; } EXPORT_SYMBOL(drm_fb_helper_ioctl); @@ -1575,9 +1599,11 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, if (oops_in_progress) return -EBUSY; + mutex_lock(&fb_helper->lock); drm_modeset_lock_all(dev); if (!drm_fb_helper_is_bound(fb_helper)) { drm_modeset_unlock_all(dev); + mutex_unlock(&fb_helper->lock); return -EBUSY; } @@ -1586,6 +1612,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, else ret = pan_display_legacy(var, info); drm_modeset_unlock_all(dev); + mutex_unlock(&fb_helper->lock); return ret; } @@ -2252,6 +2279,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper, DRM_DEBUG_KMS("No connectors reported connected with modes\n"); /* prevent concurrent modification of connector_count by hotplug */ + lockdep_assert_held(&fb_helper->lock); lockdep_assert_held(&fb_helper->dev->mode_config.mutex); crtcs = kcalloc(fb_helper->connector_count, @@ -2378,12 +2406,14 @@ int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) if (!drm_fbdev_emulation) return 0; + mutex_lock(&fb_helper->lock); mutex_lock(&dev->mode_config.mutex); drm_setup_crtcs(fb_helper, dev->mode_config.max_width, dev->mode_config.max_height); ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); mutex_unlock(&dev->mode_config.mutex); + mutex_unlock(&fb_helper->lock); if (ret) return ret; @@ -2431,25 +2461,34 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config); int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; + int err = 0; if (!drm_fbdev_emulation) return 0; + mutex_lock(&fb_helper->lock); mutex_lock(&dev->mode_config.mutex); + if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) { fb_helper->delayed_hotplug = true; mutex_unlock(&dev->mode_config.mutex); - return 0; + goto unlock; } + DRM_DEBUG_KMS("\n"); drm_setup_crtcs(fb_helper, fb_helper->fb->width, fb_helper->fb->height); mutex_unlock(&dev->mode_config.mutex); + mutex_unlock(&fb_helper->lock); drm_fb_helper_set_par(fb_helper->fbdev); return 0; + +unlock: + mutex_unlock(&fb_helper->lock); + return err; } EXPORT_SYMBOL(drm_fb_helper_hotplug_event); diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 119e5e4609c7..ea170b96e88d 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -169,7 +169,6 @@ struct drm_fb_helper_connector { * @crtc_info: per-CRTC helper state (mode, x/y offset, etc) * @connector_count: number of connected connectors * @connector_info_alloc_count: size of connector_info - * @connector_info: array of per-connector information * @funcs: driver callbacks for fb helper * @fbdev: emulated fbdev device info struct * @pseudo_palette: fake palette of 16 colors @@ -191,6 +190,12 @@ struct drm_fb_helper { struct drm_fb_helper_crtc *crtc_info; int connector_count; int connector_info_alloc_count; + /** + * @connector_info: + * + * Array of per-connector information. Do not iterate directly, but use + * drm_fb_helper_for_each_connector. + */ struct drm_fb_helper_connector **connector_info; const struct drm_fb_helper_funcs *funcs; struct fb_info *fbdev; @@ -200,6 +205,18 @@ struct drm_fb_helper { struct work_struct dirty_work; struct work_struct resume_work; + /** + * @lock: + * + * Top-level FBDEV helper lock. This protects all internal data + * structures and lists, such as @connector_info and @crtc_info. + * + * FIXME: fbdev emulation locking is a mess and long term we want to + * protect all helper internal state with this lock as well as reduce + * core KMS locking as much as possible. + */ + struct mutex lock; + /** * @kernel_fb_list: * From bdac4a052a47920eeae22441ab608612dc0ef4e5 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 4 Jul 2017 17:18:24 +0200 Subject: [PATCH 0175/1795] drm/fb-helper: Push locking in fb_is_bound That function only needs to take the individual crtc locks, not all the kms locks. Push down the locking and then minimize it. Cc: John Stultz Cc: Thierry Reding Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170704151833.17304-5-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_fb_helper.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 30188ec809b5..08a5e0d23a42 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -543,10 +543,12 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) return false; drm_for_each_crtc(crtc, dev) { + drm_modeset_lock(&crtc->mutex, NULL); if (crtc->primary->fb) crtcs_bound++; if (crtc->primary->fb == fb_helper->fb) bound++; + drm_modeset_unlock(&crtc->mutex); } if (bound < crtcs_bound) @@ -621,13 +623,12 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) * For each CRTC in this fb, turn the connectors on/off. */ mutex_lock(&fb_helper->lock); - drm_modeset_lock_all(dev); if (!drm_fb_helper_is_bound(fb_helper)) { - drm_modeset_unlock_all(dev); mutex_unlock(&fb_helper->lock); return; } + drm_modeset_lock_all(dev); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; @@ -1263,13 +1264,12 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) return -EBUSY; mutex_lock(&fb_helper->lock); - drm_modeset_lock_all(dev); if (!drm_fb_helper_is_bound(fb_helper)) { - drm_modeset_unlock_all(dev); mutex_unlock(&fb_helper->lock); return -EBUSY; } + drm_modeset_lock_all(dev); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; crtc_funcs = crtc->helper_private; @@ -1323,12 +1323,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, int ret = 0; mutex_lock(&fb_helper->lock); - mutex_lock(&dev->mode_config.mutex); if (!drm_fb_helper_is_bound(fb_helper)) { ret = -EBUSY; goto unlock; } + mutex_lock(&dev->mode_config.mutex); switch (cmd) { case FBIO_WAITFORVSYNC: /* @@ -1600,13 +1600,12 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, return -EBUSY; mutex_lock(&fb_helper->lock); - drm_modeset_lock_all(dev); if (!drm_fb_helper_is_bound(fb_helper)) { - drm_modeset_unlock_all(dev); mutex_unlock(&fb_helper->lock); return -EBUSY; } + drm_modeset_lock_all(dev); if (drm_drv_uses_atomic_modeset(dev)) ret = pan_display_atomic(var, info); else @@ -2467,16 +2466,15 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) return 0; mutex_lock(&fb_helper->lock); - mutex_lock(&dev->mode_config.mutex); - if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) { fb_helper->delayed_hotplug = true; - mutex_unlock(&dev->mode_config.mutex); - goto unlock; + mutex_unlock(&fb_helper->lock); + return err; } DRM_DEBUG_KMS("\n"); + mutex_lock(&dev->mode_config.mutex); drm_setup_crtcs(fb_helper, fb_helper->fb->width, fb_helper->fb->height); mutex_unlock(&dev->mode_config.mutex); @@ -2485,10 +2483,6 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) drm_fb_helper_set_par(fb_helper->fbdev); return 0; - -unlock: - mutex_unlock(&fb_helper->lock); - return err; } EXPORT_SYMBOL(drm_fb_helper_hotplug_event); From 235d3e4fa115b39a72112f29ec5c84b847111edd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 4 Jul 2017 17:18:25 +0200 Subject: [PATCH 0176/1795] drm/fb-helper: Drop locking from the vsync wait ioctl code Like with the drm-native vblank wait ioctl we can entirely rely on the spinlocks in drm_vblank.c, no need at all to take expensive mutexes. The only reason we had to take mode_config.mutex was to protect the fbdev helper's data-structures, but that's now done by fb_helper->lock. Cc: John Stultz Cc: Thierry Reding Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170704151833.17304-6-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_fb_helper.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 08a5e0d23a42..9efb17c3cbe8 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1328,7 +1328,6 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, goto unlock; } - mutex_lock(&dev->mode_config.mutex); switch (cmd) { case FBIO_WAITFORVSYNC: /* @@ -1368,7 +1367,6 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, } unlock: - mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&fb_helper->lock); return ret; } From 5c2e3448d104dc85250467af9ecc1e4779c56090 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 4 Jul 2017 17:18:26 +0200 Subject: [PATCH 0177/1795] drm/fb-helper: Push locking into pan_display_atomic|legacy For the legacy path we'll keep drm_modeset_lock_all, for the atomic one we drop the use of the magic implicit context and wire it up properly. Cc: John Stultz Cc: Thierry Reding Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170704151833.17304-7-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_fb_helper.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 9efb17c3cbe8..7c24c8cd2e23 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1317,7 +1317,6 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; struct drm_mode_set *mode_set; struct drm_crtc *crtc; int ret = 0; @@ -1508,12 +1507,17 @@ static int pan_display_atomic(struct fb_var_screeninfo *var, struct drm_plane *plane; int i, ret; unsigned int plane_mask; + struct drm_modeset_acquire_ctx ctx; + + drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(dev); - if (!state) - return -ENOMEM; + if (!state) { + ret = -ENOMEM; + goto out_ctx; + } - state->acquire_ctx = dev->mode_config.acquire_ctx; + state->acquire_ctx = &ctx; retry: plane_mask = 0; for (i = 0; i < fb_helper->crtc_count; i++) { @@ -1526,7 +1530,7 @@ retry: ret = __drm_atomic_helper_set_config(mode_set, state); if (ret != 0) - goto fail; + goto out_state; plane = mode_set->crtc->primary; plane_mask |= (1 << drm_plane_index(plane)); @@ -1535,23 +1539,27 @@ retry: ret = drm_atomic_commit(state); if (ret != 0) - goto fail; + goto out_state; info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; -fail: +out_state: drm_atomic_clean_old_fb(dev, plane_mask, ret); if (ret == -EDEADLK) goto backoff; drm_atomic_state_put(state); +out_ctx: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return ret; backoff: drm_atomic_state_clear(state); - drm_atomic_legacy_backoff(state); + drm_modeset_backoff(&ctx); goto retry; } @@ -1564,6 +1572,7 @@ static int pan_display_legacy(struct fb_var_screeninfo *var, int ret = 0; int i; + drm_modeset_lock_all(fb_helper->dev); for (i = 0; i < fb_helper->crtc_count; i++) { modeset = &fb_helper->crtc_info[i].mode_set; @@ -1578,6 +1587,7 @@ static int pan_display_legacy(struct fb_var_screeninfo *var, } } } + drm_modeset_unlock_all(fb_helper->dev); return ret; } @@ -1603,12 +1613,10 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, return -EBUSY; } - drm_modeset_lock_all(dev); if (drm_drv_uses_atomic_modeset(dev)) ret = pan_display_atomic(var, info); else ret = pan_display_legacy(var, info); - drm_modeset_unlock_all(dev); mutex_unlock(&fb_helper->lock); return ret; From 1d0c641091805b9ca382ac44d022a4963e47f33e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 4 Jul 2017 17:18:27 +0200 Subject: [PATCH 0178/1795] drm/fb-helper: Push locking into restore_fbdev_mode_atomic|legacy Same game as with the panning function, use drm_modeset_lock_all for legacy paths, and a proper acquire ctx w/w mutex dance for atomic. Cc: John Stultz Cc: Thierry Reding Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170704151833.17304-8-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_fb_helper.c | 48 +++++++++++++++++---------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 7c24c8cd2e23..c5bf37f3bcdd 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -382,12 +382,17 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper) struct drm_atomic_state *state; int i, ret; unsigned int plane_mask; + struct drm_modeset_acquire_ctx ctx; + + drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(dev); - if (!state) - return -ENOMEM; + if (!state) { + ret = -ENOMEM; + goto out_ctx; + } - state->acquire_ctx = dev->mode_config.acquire_ctx; + state->acquire_ctx = &ctx; retry: plane_mask = 0; drm_for_each_plane(plane, dev) { @@ -396,7 +401,7 @@ retry: plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { ret = PTR_ERR(plane_state); - goto fail; + goto out_state; } plane_state->rotation = DRM_MODE_ROTATE_0; @@ -410,7 +415,7 @@ retry: ret = __drm_atomic_helper_disable_plane(plane, plane_state); if (ret != 0) - goto fail; + goto out_state; } for (i = 0; i < fb_helper->crtc_count; i++) { @@ -418,23 +423,27 @@ retry: ret = __drm_atomic_helper_set_config(mode_set, state); if (ret != 0) - goto fail; + goto out_state; } ret = drm_atomic_commit(state); -fail: +out_state: drm_atomic_clean_old_fb(dev, plane_mask, ret); if (ret == -EDEADLK) goto backoff; drm_atomic_state_put(state); +out_ctx: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return ret; backoff: drm_atomic_state_clear(state); - drm_atomic_legacy_backoff(state); + drm_modeset_backoff(&ctx); goto retry; } @@ -443,8 +452,9 @@ static int restore_fbdev_mode_legacy(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; struct drm_plane *plane; - int i; + int i, ret = 0; + drm_modeset_lock_all(fb_helper->dev); drm_for_each_plane(plane, dev) { if (plane->type != DRM_PLANE_TYPE_PRIMARY) drm_plane_force_disable(plane); @@ -458,32 +468,31 @@ static int restore_fbdev_mode_legacy(struct drm_fb_helper *fb_helper) for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; struct drm_crtc *crtc = mode_set->crtc; - int ret; if (crtc->funcs->cursor_set2) { ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); if (ret) - return ret; + goto out; } else if (crtc->funcs->cursor_set) { ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); if (ret) - return ret; + goto out; } ret = drm_mode_set_config_internal(mode_set); if (ret) - return ret; + goto out; } +out: + drm_modeset_unlock_all(fb_helper->dev); - return 0; + return ret; } static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; - drm_warn_on_modeset_not_all_locked(dev); - if (drm_drv_uses_atomic_modeset(dev)) return restore_fbdev_mode_atomic(fb_helper); else @@ -503,7 +512,6 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) */ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) { - struct drm_device *dev = fb_helper->dev; bool do_delayed; int ret; @@ -511,15 +519,11 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) return -ENODEV; mutex_lock(&fb_helper->lock); - drm_modeset_lock_all(dev); - ret = restore_fbdev_mode(fb_helper); do_delayed = fb_helper->delayed_hotplug; if (do_delayed) fb_helper->delayed_hotplug = false; - - drm_modeset_unlock_all(dev); mutex_unlock(&fb_helper->lock); if (do_delayed) @@ -577,11 +581,9 @@ static bool drm_fb_helper_force_kernel_mode(void) continue; mutex_lock(&helper->lock); - drm_modeset_lock_all(dev); ret = restore_fbdev_mode(helper); if (ret) error = true; - drm_modeset_unlock_all(dev); mutex_unlock(&helper->lock); } return error; From e13a058310509b22b2b45cbdd82d8797e173c3db Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 5 Jul 2017 06:56:29 +0200 Subject: [PATCH 0179/1795] drm/fb-helper: Stop using mode_config.mutex for internals MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Those are now all protected using fb_helper->lock. v2: We still need to hold mode_config.mutex right around calling connector->fill_modes. v3: I forgot to hold mode_config.mutex while looking at connector->status and the mode list. Also, we need to patch up the i915 ->initial_config callback to grab the locks it needs to inspect the modeset state recovered from the fw. v4: Don't reorder the probe too much (Ville). Cc: Ville Syrjälä Cc: John Stultz Cc: Thierry Reding Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170705045629.31265-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_fb_helper.c | 33 ++++++++---------------------- drivers/gpu/drm/drm_vblank.c | 2 +- drivers/gpu/drm/i915/intel_fbdev.c | 16 +++++++++++---- 3 files changed, 21 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index c5bf37f3bcdd..5d3d776508b3 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -106,7 +106,7 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); */ #define drm_fb_helper_for_each_connector(fbh, i__) \ - for (({ lockdep_assert_held(&(fbh)->dev->mode_config.mutex); }), \ + for (({ lockdep_assert_held(&(fbh)->lock); }), \ i__ = 0; i__ < (fbh)->connector_count; i__++) static int __drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, @@ -120,7 +120,6 @@ static int __drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, return 0; lockdep_assert_held(&fb_helper->lock); - lockdep_assert_held(&fb_helper->dev->mode_config.mutex); count = fb_helper->connector_count + 1; @@ -152,11 +151,7 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, int err; mutex_lock(&fb_helper->lock); - mutex_lock(&fb_helper->dev->mode_config.mutex); - err = __drm_fb_helper_add_one_connector(fb_helper, connector); - - mutex_unlock(&fb_helper->dev->mode_config.mutex); mutex_unlock(&fb_helper->lock); return err; @@ -188,7 +183,6 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) return 0; mutex_lock(&fb_helper->lock); - mutex_lock(&dev->mode_config.mutex); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { ret = __drm_fb_helper_add_one_connector(fb_helper, connector); @@ -210,7 +204,6 @@ fail: fb_helper->connector_count = 0; out: drm_connector_list_iter_end(&conn_iter); - mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&fb_helper->lock); return ret; @@ -253,11 +246,7 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, int err; mutex_lock(&fb_helper->lock); - mutex_lock(&fb_helper->dev->mode_config.mutex); - err = __drm_fb_helper_remove_one_connector(fb_helper, connector); - - mutex_unlock(&fb_helper->dev->mode_config.mutex); mutex_unlock(&fb_helper->lock); return err; @@ -1879,12 +1868,11 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe EXPORT_SYMBOL(drm_fb_helper_fill_var); static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper, - uint32_t maxX, - uint32_t maxY) + uint32_t maxX, + uint32_t maxY) { struct drm_connector *connector; - int count = 0; - int i; + int i, count = 0; drm_fb_helper_for_each_connector(fb_helper, i) { connector = fb_helper->connector_info[i]->connector; @@ -2282,12 +2270,8 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper, int i; DRM_DEBUG_KMS("\n"); - if (drm_fb_helper_probe_connector_modes(fb_helper, width, height) == 0) - DRM_DEBUG_KMS("No connectors reported connected with modes\n"); - /* prevent concurrent modification of connector_count by hotplug */ lockdep_assert_held(&fb_helper->lock); - lockdep_assert_held(&fb_helper->dev->mode_config.mutex); crtcs = kcalloc(fb_helper->connector_count, sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); @@ -2302,6 +2286,9 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper, goto out; } + mutex_lock(&fb_helper->dev->mode_config.mutex); + if (drm_fb_helper_probe_connector_modes(fb_helper, width, height) == 0) + DRM_DEBUG_KMS("No connectors reported connected with modes\n"); drm_enable_connectors(fb_helper, enabled); if (!(fb_helper->funcs->initial_config && @@ -2323,6 +2310,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper, drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height); } + mutex_unlock(&fb_helper->dev->mode_config.mutex); /* need to set the modesets up here for use later */ /* fill out the connector<->crtc mappings into the modesets */ @@ -2414,12 +2402,10 @@ int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) return 0; mutex_lock(&fb_helper->lock); - mutex_lock(&dev->mode_config.mutex); drm_setup_crtcs(fb_helper, dev->mode_config.max_width, dev->mode_config.max_height); ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); - mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&fb_helper->lock); if (ret) return ret; @@ -2482,10 +2468,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) DRM_DEBUG_KMS("\n"); - mutex_lock(&dev->mode_config.mutex); drm_setup_crtcs(fb_helper, fb_helper->fb->width, fb_helper->fb->height); - - mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&fb_helper->lock); drm_fb_helper_set_par(fb_helper->fbdev); diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 05d043e9219f..8099574c8a11 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -836,7 +836,7 @@ static void send_vblank_event(struct drm_device *dev, * NOTE: Drivers using this to send out the &drm_crtc_state.event as part of an * atomic commit must ensure that the next vblank happens at exactly the same * time as the atomic commit is committed to the hardware. This function itself - * does **not** protect again the next vblank interrupt racing with either this + * does **not** protect against the next vblank interrupt racing with either this * function call or the atomic commit operation. A possible sequence could be: * * 1. Driver commits new hardware state into vblank-synchronized registers. diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 03347c6ae599..460ca0b3fb88 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -352,14 +352,20 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); int i, j; bool *save_enabled; - bool fallback = true; + bool fallback = true, ret = true; int num_connectors_enabled = 0; int num_connectors_detected = 0; + struct drm_modeset_acquire_ctx ctx; save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); if (!save_enabled) return false; + drm_modeset_acquire_init(&ctx, 0); + + while (drm_modeset_lock_all_ctx(fb_helper->dev, &ctx) != 0) + drm_modeset_backoff(&ctx); + memcpy(save_enabled, enabled, count); mask = GENMASK(count - 1, 0); conn_configured = 0; @@ -509,12 +515,14 @@ retry: bail: DRM_DEBUG_KMS("Not using firmware configuration\n"); memcpy(enabled, save_enabled, count); - kfree(save_enabled); - return false; + ret = false; } + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + kfree(save_enabled); - return true; + return ret; } static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { From 6b7dc6e9f82615836b389cb5f806914048b132cd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 4 Jul 2017 17:18:29 +0200 Subject: [PATCH 0180/1795] drm/fb-helper: Split dpms handling into legacy and atomic paths Like with panning and modesetting, and like with those, stick with simple drm_modeset_locking_all for the legacy path, and the full atomic dance for atomic drivers. This means a bit more boilerplate since setting up the atomic state machinery is rather verbose, but then this is shared code for 30+ drivers or so, so meh. After this patch there's only the LUT/cmap path which is still using drm_modeset_lock_all for an atomic driver. But Peter is already locking into reworking that, so I'll leave that code as-is for now. v2: Squash in patches from Maarten to unify all the various atomic paths into just one atomic update function for fbdev overall. On top do one s/restore_fbdev_mode/restore_fbdev_mode_atomic/ so that we have all-atomic callchains after the first check. Cc: Peter Rosin Cc: John Stultz Cc: Thierry Reding Cc: Maarten Lankhorst Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170704151833.17304-10-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_fb_helper.c | 115 ++++++++++++++------------------ 1 file changed, 50 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 5d3d776508b3..1915e64c6e69 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -364,7 +364,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info) } EXPORT_SYMBOL(drm_fb_helper_debug_leave); -static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper) +static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool active) { struct drm_device *dev = fb_helper->dev; struct drm_plane *plane; @@ -413,6 +413,17 @@ retry: ret = __drm_atomic_helper_set_config(mode_set, state); if (ret != 0) goto out_state; + + /* + * __drm_atomic_helper_set_config() sets active when a + * mode is set, unconditionally clear it if we force DPMS off + */ + if (!active) { + struct drm_crtc *crtc = mode_set->crtc; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + + crtc_state->active = false; + } } ret = drm_atomic_commit(state); @@ -483,7 +494,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) struct drm_device *dev = fb_helper->dev; if (drm_drv_uses_atomic_modeset(dev)) - return restore_fbdev_mode_atomic(fb_helper); + return restore_fbdev_mode_atomic(fb_helper, true); else return restore_fbdev_mode_legacy(fb_helper); } @@ -602,23 +613,13 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; #endif -static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) +static void dpms_legacy(struct drm_fb_helper *fb_helper, int dpms_mode) { - struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct drm_crtc *crtc; struct drm_connector *connector; int i, j; - /* - * For each CRTC in this fb, turn the connectors on/off. - */ - mutex_lock(&fb_helper->lock); - if (!drm_fb_helper_is_bound(fb_helper)) { - mutex_unlock(&fb_helper->lock); - return; - } - drm_modeset_lock_all(dev); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; @@ -635,6 +636,25 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) } } drm_modeset_unlock_all(dev); +} + +static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) +{ + struct drm_fb_helper *fb_helper = info->par; + + /* + * For each CRTC in this fb, turn the connectors on/off. + */ + mutex_lock(&fb_helper->lock); + if (!drm_fb_helper_is_bound(fb_helper)) { + mutex_unlock(&fb_helper->lock); + return; + } + + if (drm_drv_uses_atomic_modeset(fb_helper->dev)) + restore_fbdev_mode_atomic(fb_helper, dpms_mode == DRM_MODE_DPMS_ON); + else + dpms_legacy(fb_helper, dpms_mode); mutex_unlock(&fb_helper->lock); } @@ -1489,70 +1509,36 @@ int drm_fb_helper_set_par(struct fb_info *info) } EXPORT_SYMBOL(drm_fb_helper_set_par); -static int pan_display_atomic(struct fb_var_screeninfo *var, - struct fb_info *info) +static void pan_set(struct drm_fb_helper *fb_helper, int x, int y) { - struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; - struct drm_atomic_state *state; - struct drm_plane *plane; - int i, ret; - unsigned int plane_mask; - struct drm_modeset_acquire_ctx ctx; + int i; - drm_modeset_acquire_init(&ctx, 0); - - state = drm_atomic_state_alloc(dev); - if (!state) { - ret = -ENOMEM; - goto out_ctx; - } - - state->acquire_ctx = &ctx; -retry: - plane_mask = 0; for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_mode_set *mode_set; mode_set = &fb_helper->crtc_info[i].mode_set; - mode_set->x = var->xoffset; - mode_set->y = var->yoffset; - - ret = __drm_atomic_helper_set_config(mode_set, state); - if (ret != 0) - goto out_state; - - plane = mode_set->crtc->primary; - plane_mask |= (1 << drm_plane_index(plane)); - plane->old_fb = plane->fb; + mode_set->x = x; + mode_set->y = y; } +} - ret = drm_atomic_commit(state); - if (ret != 0) - goto out_state; +static int pan_display_atomic(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + int ret; - info->var.xoffset = var->xoffset; - info->var.yoffset = var->yoffset; + pan_set(fb_helper, var->xoffset, var->yoffset); -out_state: - drm_atomic_clean_old_fb(dev, plane_mask, ret); - - if (ret == -EDEADLK) - goto backoff; - - drm_atomic_state_put(state); -out_ctx: - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); + ret = restore_fbdev_mode_atomic(fb_helper, true); + if (!ret) { + info->var.xoffset = var->xoffset; + info->var.yoffset = var->yoffset; + } else + pan_set(fb_helper, info->var.xoffset, info->var.yoffset); return ret; - -backoff: - drm_atomic_state_clear(state); - drm_modeset_backoff(&ctx); - - goto retry; } static int pan_display_legacy(struct fb_var_screeninfo *var, @@ -2453,7 +2439,6 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config); */ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) { - struct drm_device *dev = fb_helper->dev; int err = 0; if (!drm_fbdev_emulation) From b8e2b0199cc377617dc238f5106352c06dcd3fa2 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Tue, 4 Jul 2017 12:36:57 +0200 Subject: [PATCH 0181/1795] drm/fb-helper: factor out pseudo-palette The pseudo-palette has nothing to do with the crtc, so move it out of the crtc loop and update the palette once, then break out early. Signed-off-by: Peter Rosin Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1499164632-5582-2-git-send-email-peda@axentia.se --- drivers/gpu/drm/drm_fb_helper.c | 60 ++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1915e64c6e69..02a0fb880241 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1218,29 +1218,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; - if (info->fix.visual == FB_VISUAL_TRUECOLOR) { - u32 *palette; - u32 value; - /* place color in psuedopalette */ - if (regno > 16) - return -EINVAL; - palette = (u32 *)info->pseudo_palette; - red >>= (16 - info->var.red.length); - green >>= (16 - info->var.green.length); - blue >>= (16 - info->var.blue.length); - value = (red << info->var.red.offset) | - (green << info->var.green.offset) | - (blue << info->var.blue.offset); - if (info->var.transp.length > 0) { - u32 mask = (1 << info->var.transp.length) - 1; - - mask <<= info->var.transp.offset; - value |= mask; - } - palette[regno] = value; - return 0; - } - /* * The driver really shouldn't advertise pseudo/directcolor * visuals if it can't deal with the palette. @@ -1256,6 +1233,38 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, return 0; } +static int setcmap_pseudo_palette(struct fb_cmap *cmap, struct fb_info *info) +{ + u32 *palette = (u32 *)info->pseudo_palette; + int i; + + if (cmap->start + cmap->len > 16) + return -EINVAL; + + for (i = 0; i < cmap->len; ++i) { + u16 red = cmap->red[i]; + u16 green = cmap->green[i]; + u16 blue = cmap->blue[i]; + u32 value; + + red >>= 16 - info->var.red.length; + green >>= 16 - info->var.green.length; + blue >>= 16 - info->var.blue.length; + value = (red << info->var.red.offset) | + (green << info->var.green.offset) | + (blue << info->var.blue.offset); + if (info->var.transp.length > 0) { + u32 mask = (1 << info->var.transp.length) - 1; + + mask <<= info->var.transp.offset; + value |= mask; + } + palette[cmap->start + i] = value; + } + + return 0; +} + /** * drm_fb_helper_setcmap - implementation for &fb_ops.fb_setcmap * @cmap: cmap to set @@ -1281,6 +1290,11 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) } drm_modeset_lock_all(dev); + if (info->fix.visual == FB_VISUAL_TRUECOLOR) { + rc = setcmap_pseudo_palette(cmap, info); + goto out; + } + for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; crtc_funcs = crtc->helper_private; From a3562a0e471df02234f74ab4e0625042f44a76e9 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Tue, 4 Jul 2017 12:36:58 +0200 Subject: [PATCH 0182/1795] drm/fb-helper: keep the .gamma_store updated in drm_fb_helper_setcmap I think the gamma_store can end up invalid on error. But the way I read it, that can happen in drm_mode_gamma_set_ioctl as well, so why should this pesky legacy fbdev stuff be any better? Signed-off-by: Peter Rosin Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1499164632-5582-3-git-send-email-peda@axentia.se --- drivers/gpu/drm/drm_fb_helper.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 02a0fb880241..bcf227fa3e28 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1277,6 +1277,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) const struct drm_crtc_helper_funcs *crtc_funcs; u16 *red, *green, *blue, *transp; struct drm_crtc *crtc; + u16 *r, *g, *b; int i, j, rc = 0; int start; @@ -1305,6 +1306,24 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) transp = cmap->transp; start = cmap->start; + if (!crtc->gamma_size) { + rc = -EINVAL; + goto out; + } + + if (cmap->start + cmap->len > crtc->gamma_size) { + rc = -EINVAL; + goto out; + } + + r = crtc->gamma_store; + g = r + crtc->gamma_size; + b = g + crtc->gamma_size; + + memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r)); + memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g)); + memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b)); + for (j = 0; j < cmap->len; j++) { u16 hred, hgreen, hblue, htransp = 0xffff; From 08c992c55dffdffcaeb25ebaaf28c7cfc0c21ac1 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Tue, 4 Jul 2017 12:36:59 +0200 Subject: [PATCH 0183/1795] drm/fb-helper: remove drm_fb_helper_save_lut_atomic drm_fb_helper_save_lut_atomic is redundant since the .gamma_store is now always kept up to date by drm_fb_helper_setcmap. Signed-off-by: Peter Rosin Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1499164632-5582-4-git-send-email-peda@axentia.se --- drivers/gpu/drm/drm_fb_helper.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index bcf227fa3e28..721511da4de6 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -253,22 +253,6 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, } EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); -static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper) -{ - uint16_t *r_base, *g_base, *b_base; - int i; - - if (helper->funcs->gamma_get == NULL) - return; - - r_base = crtc->gamma_store; - g_base = r_base + crtc->gamma_size; - b_base = g_base + crtc->gamma_size; - - for (i = 0; i < crtc->gamma_size; i++) - helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i); -} - static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) { uint16_t *r_base, *g_base, *b_base; @@ -309,7 +293,6 @@ int drm_fb_helper_debug_enter(struct fb_info *info) if (drm_drv_uses_atomic_modeset(mode_set->crtc->dev)) continue; - drm_fb_helper_save_lut_atomic(mode_set->crtc, helper); funcs->mode_set_base_atomic(mode_set->crtc, mode_set->fb, mode_set->x, From 9a120848d53b2abd6a91d1140081e883c748735a Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Mon, 3 Jul 2017 17:42:18 +0900 Subject: [PATCH 0184/1795] drm/bridge: adv7511: clean up drm_bridge_add call This patch removes unnecessary checking of return value. Signed-off-by: Inki Dae Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/1499071350-25168-3-git-send-email-inki.dae@samsung.com --- drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index f75ab6278113..ff9792d350c8 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -1126,11 +1126,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) adv7511->bridge.funcs = &adv7511_bridge_funcs; adv7511->bridge.of_node = dev->of_node; - ret = drm_bridge_add(&adv7511->bridge); - if (ret) { - dev_err(dev, "failed to add adv7511 bridge\n"); - goto err_unregister_cec; - } + drm_bridge_add(&adv7511->bridge); adv7511_audio_init(dev, adv7511); From dd2adf743bc47ac14999bb375fed390af6524f29 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Mon, 3 Jul 2017 17:42:19 +0900 Subject: [PATCH 0185/1795] drm/bridge: analogix-anx78xx: clean up drm_bridge_add call This patch removes unnecessary checking of return value. Signed-off-by: Inki Dae Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/1499071350-25168-4-git-send-email-inki.dae@samsung.com --- drivers/gpu/drm/bridge/analogix-anx78xx.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c index 9006578b9789..c2fac3947006 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c @@ -1438,11 +1438,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client, anx78xx->bridge.funcs = &anx78xx_bridge_funcs; - err = drm_bridge_add(&anx78xx->bridge); - if (err < 0) { - DRM_ERROR("Failed to add drm bridge: %d\n", err); - goto err_poweroff; - } + drm_bridge_add(&anx78xx->bridge); /* If cable is pulled out, just poweroff and wait for HPD event */ if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd)) From 47e34278549ca44f093bfcc427f4713a17b1a8c8 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Mon, 3 Jul 2017 17:42:20 +0900 Subject: [PATCH 0186/1795] drm/bridge: vga-dac: clean up drm_bridge_add call This patch removes unnecessary checking of return value. Signed-off-by: Inki Dae Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/1499071350-25168-5-git-send-email-inki.dae@samsung.com --- drivers/gpu/drm/bridge/dumb-vga-dac.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c index 831a606c4706..8a52539e618e 100644 --- a/drivers/gpu/drm/bridge/dumb-vga-dac.c +++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c @@ -177,7 +177,6 @@ static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev) static int dumb_vga_probe(struct platform_device *pdev) { struct dumb_vga *vga; - int ret; vga = devm_kzalloc(&pdev->dev, sizeof(*vga), GFP_KERNEL); if (!vga) @@ -186,7 +185,7 @@ static int dumb_vga_probe(struct platform_device *pdev) vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd"); if (IS_ERR(vga->vdd)) { - ret = PTR_ERR(vga->vdd); + int ret = PTR_ERR(vga->vdd); if (ret == -EPROBE_DEFER) return -EPROBE_DEFER; vga->vdd = NULL; @@ -207,11 +206,9 @@ static int dumb_vga_probe(struct platform_device *pdev) vga->bridge.funcs = &dumb_vga_bridge_funcs; vga->bridge.of_node = pdev->dev.of_node; - ret = drm_bridge_add(&vga->bridge); - if (ret && !IS_ERR(vga->ddc)) - i2c_put_adapter(vga->ddc); + drm_bridge_add(&vga->bridge); - return ret; + return 0; } static int dumb_vga_remove(struct platform_device *pdev) From 830dcb44511170ec9ffc91855464156ce13d63ab Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Mon, 3 Jul 2017 17:42:21 +0900 Subject: [PATCH 0187/1795] drm/bridge: nxp-ptn3460: clean up drm_bridge_add call This patch removes unnecessary checking of return value. Signed-off-by: Inki Dae Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/1499071350-25168-6-git-send-email-inki.dae@samsung.com --- drivers/gpu/drm/bridge/nxp-ptn3460.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index 4f64e717e01b..f0b5d0fc8594 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -332,11 +332,7 @@ static int ptn3460_probe(struct i2c_client *client, ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs; ptn_bridge->bridge.of_node = dev->of_node; - ret = drm_bridge_add(&ptn_bridge->bridge); - if (ret) { - DRM_ERROR("Failed to add bridge\n"); - return ret; - } + drm_bridge_add(&ptn_bridge->bridge); i2c_set_clientdata(client, ptn_bridge); From 3a45d25dcfc7cfd699707fc1a4f719a8d4f93442 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Mon, 3 Jul 2017 17:42:22 +0900 Subject: [PATCH 0188/1795] drm/bridge: panel: clean up drm_bridge_add call This patch removes unnecessary checking of return value. Signed-off-by: Inki Dae Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/1499071350-25168-7-git-send-email-inki.dae@samsung.com --- drivers/gpu/drm/bridge/panel.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 99f9a4beb859..65ab28cc2946 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -158,7 +158,6 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel, u32 connector_type) { struct panel_bridge *panel_bridge; - int ret; if (!panel) return ERR_PTR(EINVAL); @@ -176,9 +175,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel, panel_bridge->bridge.of_node = panel->dev->of_node; #endif - ret = drm_bridge_add(&panel_bridge->bridge); - if (ret) - return ERR_PTR(ret); + drm_bridge_add(&panel_bridge->bridge); return &panel_bridge->bridge; } From d0ceb3ec4a014a5465aae4952e9c18d7722dc50f Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Mon, 3 Jul 2017 17:42:23 +0900 Subject: [PATCH 0189/1795] drm/bridge: ps8622: clean up drm_bridge_add call This patch removes unnecessary checking of return value. Signed-off-by: Inki Dae Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/1499071350-25168-8-git-send-email-inki.dae@samsung.com --- drivers/gpu/drm/bridge/parade-ps8622.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index 6f22f9fec9bf..4f7725d4a309 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -598,11 +598,7 @@ static int ps8622_probe(struct i2c_client *client, ps8622->bridge.funcs = &ps8622_bridge_funcs; ps8622->bridge.of_node = dev->of_node; - ret = drm_bridge_add(&ps8622->bridge); - if (ret) { - DRM_ERROR("Failed to add bridge\n"); - return ret; - } + drm_bridge_add(&ps8622->bridge); i2c_set_clientdata(client, ps8622); From d341a640db81cfe7a4a4e93a9fc79c66dd781834 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Mon, 3 Jul 2017 17:42:24 +0900 Subject: [PATCH 0190/1795] drm/bridge: sii902x: clean up drm_bridge_add call This patch removes unnecessary checking of return value. Signed-off-by: Inki Dae Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/1499071350-25168-9-git-send-email-inki.dae@samsung.com --- drivers/gpu/drm/bridge/sii902x.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 9b87067c022c..b8d10e599df0 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -418,11 +418,7 @@ static int sii902x_probe(struct i2c_client *client, sii902x->bridge.funcs = &sii902x_bridge_funcs; sii902x->bridge.of_node = dev->of_node; - ret = drm_bridge_add(&sii902x->bridge); - if (ret) { - dev_err(dev, "Failed to add drm_bridge\n"); - return ret; - } + drm_bridge_add(&sii902x->bridge); i2c_set_clientdata(client, sii902x); From b678682e846d91a105d2ca8a9bf3831ebd7c9d9b Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Mon, 3 Jul 2017 17:42:25 +0900 Subject: [PATCH 0191/1795] drm/bridge: synopsys: dw-hdmi: clean up drm_bridge_add call This patch removes unnecessary checking of return value. Signed-off-by: Inki Dae Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/1499071350-25168-10-git-send-email-inki.dae@samsung.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index ead11242c4b9..de1308b61390 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2485,17 +2485,12 @@ int dw_hdmi_probe(struct platform_device *pdev, const struct dw_hdmi_plat_data *plat_data) { struct dw_hdmi *hdmi; - int ret; hdmi = __dw_hdmi_probe(pdev, plat_data); if (IS_ERR(hdmi)) return PTR_ERR(hdmi); - ret = drm_bridge_add(&hdmi->bridge); - if (ret < 0) { - __dw_hdmi_remove(hdmi); - return ret; - } + drm_bridge_add(&hdmi->bridge); return 0; } From dc01732eee8c029bf148be2e9476b9526727b56f Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Mon, 3 Jul 2017 17:42:26 +0900 Subject: [PATCH 0192/1795] drm/bridge: tc358767: clean up drm_bridge_add call This patch removes unnecessary checking of return value. Signed-off-by: Inki Dae Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/1499071350-25168-11-git-send-email-inki.dae@samsung.com --- drivers/gpu/drm/bridge/tc358767.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 5c26488e7a2d..12a35f9c3adc 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1325,11 +1325,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) tc->bridge.funcs = &tc_bridge_funcs; tc->bridge.of_node = dev->of_node; - ret = drm_bridge_add(&tc->bridge); - if (ret) { - dev_err(dev, "Failed to add drm_bridge: %d\n", ret); - goto err_unregister_aux; - } + drm_bridge_add(&tc->bridge); i2c_set_clientdata(client, tc); From f74c527591b02a296e34d94c181b423587523f9e Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Mon, 3 Jul 2017 17:42:27 +0900 Subject: [PATCH 0193/1795] drm/bridge: ti-tfp410: clean up drm_bridge_add call This patch removes unnecessary checking of return value. Signed-off-by: Inki Dae Signed-off-by: Archit Taneja Link: http://patchwork.freedesktop.org/patch/msgid/1499071350-25168-12-git-send-email-inki.dae@samsung.com --- drivers/gpu/drm/bridge/ti-tfp410.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index eee4efda829e..7ea2a15e8807 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -237,11 +237,7 @@ static int tfp410_init(struct device *dev) } } - ret = drm_bridge_add(&dvi->bridge); - if (ret) { - dev_err(dev, "drm_bridge_add() failed: %d\n", ret); - goto fail; - } + drm_bridge_add(&dvi->bridge); return 0; fail: From 0a7a0986559d85c9926cbe5b1570212e09008a7c Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 3 Jul 2017 16:38:25 +0530 Subject: [PATCH 0194/1795] drm: i915: sysfs: constify attribute_group structures. attribute_groups are not supposed to change at runtime. All functions working with attribute_groups provided by work with const attribute_group. So mark the non-const structs as const. File size before: text data bss dec hex filename 4028 1088 0 5116 13fc drivers/gpu/drm/i915/i915_sysfs.o File size After adding 'const': text data bss dec hex filename 4196 928 0 5124 1404 drivers/gpu/drm/i915/i915_sysfs.o Signed-off-by: Arvind Yadav Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/13b5c875e677c10e6257be4fac31b2b6c77a494f.1499079914.git.arvind.yadav.cs@gmail.com --- drivers/gpu/drm/i915/i915_sysfs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 3a481062f219..3736c9f79197 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -96,7 +96,7 @@ static struct attribute *rc6_attrs[] = { NULL }; -static struct attribute_group rc6_attr_group = { +static const struct attribute_group rc6_attr_group = { .name = power_group_name, .attrs = rc6_attrs }; @@ -107,7 +107,7 @@ static struct attribute *rc6p_attrs[] = { NULL }; -static struct attribute_group rc6p_attr_group = { +static const struct attribute_group rc6p_attr_group = { .name = power_group_name, .attrs = rc6p_attrs }; @@ -117,7 +117,7 @@ static struct attribute *media_rc6_attrs[] = { NULL }; -static struct attribute_group media_rc6_attr_group = { +static const struct attribute_group media_rc6_attr_group = { .name = power_group_name, .attrs = media_rc6_attrs }; From 817aef5d86f124e94bfc871ecd6fb2afa4f55417 Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Thu, 15 Jun 2017 11:11:45 +0800 Subject: [PATCH 0195/1795] drm/i915: Setting pch_id for HSW/BDW in virtual environment In a IGD passthrough environment, the real ISA bridge may doesn't exist. then pch_id couldn't be correctly gotten from ISA bridge, but pch_id is used to identify LPT_H and LPT_LP. Currently i915 treat all LPT pch as LPT_H,then errors occur when i915 runs on LPT_LP machines with igd passthrough. This patch set pch_id for HSW/BDW according to IGD type and isn't fully correct. But it solves such issue on HSW/BDW ult/ulx machines. QA CI system is blocked by this issue for a long time, it's better that we could merge it to unblock QA CI system. We know the root cause is in device model of virtual passthrough, and will resolve it in the future with several parts cooperation in kernel, qemu and xen. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=99938 Signed-off-by: Xiong Zhang Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1497496305-5364-1-git-send-email-xiong.y.zhang@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a6bef9ee8703..6f750efe9c3d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -135,6 +135,10 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Assuming CougarPoint PCH\n"); } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { ret = PCH_LPT; + if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) + dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; + else + dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE; DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { ret = PCH_SPT; From e4d5dc218c4a1d1f555448825f5a8d7b1ac82959 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 5 Jul 2017 15:26:31 +0100 Subject: [PATCH 0196/1795] drm/i915: Check new context against kernel_context after reporting an error Avoid any pointer dereference in inspecting a potential PTR_ERR by checking for the error pointer before checking for an invalid context. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170705142634.18554-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_context.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 2eb5d8203999..98d2ce98f467 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -571,14 +571,13 @@ int i915_gem_context_open(struct drm_i915_private *i915, mutex_lock(&i915->drm.struct_mutex); ctx = i915_gem_create_context(i915, file_priv); mutex_unlock(&i915->drm.struct_mutex); - - GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); - if (IS_ERR(ctx)) { idr_destroy(&file_priv->context_idr); return PTR_ERR(ctx); } + GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); + return 0; } From ddfc925851b4f0ae36b21a5b545e4002c4259c42 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 5 Jul 2017 15:26:32 +0100 Subject: [PATCH 0197/1795] drm/i915: Move stale context reaping to common i915_gem_context_create We need to reap the stale contexts for all new contexts, be they created by user in i915_gem_context_ioctl or from opening a new file in i915_gem_context_open. Both paths may be called very frequently accumulating many stale contexts before any worker has a chance to run and free their memory. Fixes: 1acfc104cdf8 ("drm/i915: Enable rcu-only context lookups") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170705142634.18554-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_context.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 98d2ce98f467..c58a95c33c25 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -383,6 +383,10 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->drm.struct_mutex); + /* Reap stale contexts */ + i915_gem_retire_requests(dev_priv); + contexts_free(dev_priv); + ctx = __create_hw_context(dev_priv, file_priv); if (IS_ERR(ctx)) return ctx; @@ -989,10 +993,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - /* Reap stale contexts */ - i915_gem_retire_requests(dev_priv); - contexts_free(dev_priv); - ctx = i915_gem_create_context(dev_priv, file_priv); mutex_unlock(&dev->struct_mutex); if (IS_ERR(ctx)) From 6b6573d114050ee4ee1c54a60a92ae1b68ce1b6e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 5 Jul 2017 15:26:33 +0100 Subject: [PATCH 0198/1795] drm/i915: Drop request retirement before reaping stale contexts Before we create a new context, we try and reap all the stale contexts (i.e. those that are freed but waiting for a worker to come and return their allocations to the system). Before we do this, we retire all requests so that we clear any inflight no longer used contexts (who are only being kept alived by those inflght requests). However, any context that is finally unreferenced by this retirement is put onto an RCU list and not available for immediately reaping, we stall for no immediate benefit. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170705142634.18554-3-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_gem_context.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index c58a95c33c25..4f6773ea7d85 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -384,7 +384,6 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->drm.struct_mutex); /* Reap stale contexts */ - i915_gem_retire_requests(dev_priv); contexts_free(dev_priv); ctx = __create_hw_context(dev_priv, file_priv); From cb0aeaa81842948e32f39838f0ec113e3bb52291 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 5 Jul 2017 15:26:34 +0100 Subject: [PATCH 0199/1795] drm/i915: Only free the oldest stale context before allocating Currently, we move all unreferenced contexts to an RCU free list and then onto a worker for eventual reaping. To compensate against this growing into a long list with frequent allocations starving the system of available memory, before we allocate a new context we reap all the stale contexts. This puts all the cost of destroying the context into the next allocator, which is presumably more sensitive to syscall latency and unfair. We can limit the number of contexts being freed by the new allocator to both keep the list trimmed and to allow the allocator to be reasonably fast. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170705142634.18554-4-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_gem_context.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 4f6773ea7d85..1a87d04e7937 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -201,6 +201,21 @@ static void contexts_free(struct drm_i915_private *i915) i915_gem_context_free(ctx); } +static void contexts_free_first(struct drm_i915_private *i915) +{ + struct i915_gem_context *ctx; + struct llist_node *freed; + + lockdep_assert_held(&i915->drm.struct_mutex); + + freed = llist_del_first(&i915->contexts.free_list); + if (!freed) + return; + + ctx = container_of(freed, typeof(*ctx), free_link); + i915_gem_context_free(ctx); +} + static void contexts_free_worker(struct work_struct *work) { struct drm_i915_private *i915 = @@ -383,8 +398,8 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->drm.struct_mutex); - /* Reap stale contexts */ - contexts_free(dev_priv); + /* Reap the most stale context */ + contexts_free_first(dev_priv); ctx = __create_hw_context(dev_priv, file_priv); if (IS_ERR(ctx)) From 846c6b26d38e56e5004f1d71d4c13226d2514750 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 29 Jun 2017 18:36:58 +0300 Subject: [PATCH 0200/1795] drm/i915/gen9+: Add 10 us delay after power well 1/AUX IO pw disabling Bspec requires a 10 us delay after disabling power well 1 and - if not toggled on-demand - the AUX IO power wells during display uninit. Signed-off-by: Imre Deak Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1498750622-14023-2-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index efe80ed5fd4d..fd59016191ff 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -2701,6 +2701,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) intel_power_well_disable(dev_priv, well); mutex_unlock(&power_domains->lock); + + usleep_range(10, 30); /* 10 us delay per Bspec */ } void bxt_display_core_init(struct drm_i915_private *dev_priv, @@ -2758,6 +2760,8 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv) intel_power_well_disable(dev_priv, well); mutex_unlock(&power_domains->lock); + + usleep_range(10, 30); /* 10 us delay per Bspec */ } #define CNL_PROCMON_IDX(val) \ @@ -2859,6 +2863,8 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) intel_power_well_disable(dev_priv, well); mutex_unlock(&power_domains->lock); + usleep_range(10, 30); /* 10 us delay per Bspec */ + /* 5. Disable Comp */ val = I915_READ(CHICKEN_MISC_2); val |= COMP_PWR_DOWN; From edfda8e37ae9ec530434c3a014c8f0155a72acbd Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 29 Jun 2017 18:36:59 +0300 Subject: [PATCH 0201/1795] drm/i915/skl: Don't disable misc IO power well during display uninit Bspec requires leaving the misc IO power well enabled during display uninit, so align the code accordingly. Signed-off-by: Imre Deak Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1498750622-14023-3-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index fd59016191ff..8418879c287a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -2694,9 +2694,10 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) mutex_lock(&power_domains->lock); - well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); - intel_power_well_disable(dev_priv, well); - + /* + * BSpec says to keep the MISC IO power well enabled here, only + * remove our request for power well 1. + */ well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_disable(dev_priv, well); From e8a3a2a3d7f7a194e0f0ad92c5dd636f908e7601 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 29 Jun 2017 18:37:00 +0300 Subject: [PATCH 0202/1795] drm/i915/bxt, glk: Fix assert on conditions for DC9 enabling What we want to assert based on the conditions required by Bspec is that power well 2 is disabled, so no need to check for other power wells. In addition we can only check if the driver's request is removed, the actual state depends on whether the other request bits are set or not (BIOS, KVMR, DEBUG). So check only the driver's request bit. Signed-off-by: Imre Deak Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1498750622-14023-4-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 8418879c287a..1fc75e6769bc 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -549,7 +549,9 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) "DC9 already programmed to be enabled.\n"); WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, "DC5 still not disabled to enable DC9.\n"); - WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); + WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER) & + SKL_POWER_WELL_REQ(SKL_DISP_PW_2), + "Power well 2 on.\n"); WARN_ONCE(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); From 42d9366d41a992631abaa15f5a881ae1235a8203 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 29 Jun 2017 18:37:01 +0300 Subject: [PATCH 0203/1795] drm/i915/gen9+: Don't remove secondary power well requests So far in an attempt to make sure all power wells get disabled during display uninitialization the driver removed any secondary request bits (BIOS, KVMR, DEBUG) that were set for a given power well. The known source for these requests was DMC's request on power well 1 and the misc IO power well. Since DMC is inactive (DC states are disabled) at the point we disable these power wells, there shouldn't be any reason to leave them on. However there are two problems with the above assumption: Bspec requires that the misc IO power well stays enabled (without providing a reason) and there can be KVMR requests that we can't remove anyway (the KVMR request register is R/O). Atm, a KVMR request can trigger a timeout WARN when trying to disable power wells. To make the code aligned to Bspec and to get rid of the KVMR WARN, don't try to remove the secondary requests, only detect them and stop polling for the power well disabled state when any one is set. Also add a comment about the timeout values required by Bspec when enabling power wells and the fact that waiting for them to get disabled is not required by Bspec. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=98564 Signed-off-by: Imre Deak Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1498750622-14023-5-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 109 ++++++++++++++---------- 1 file changed, 63 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 1fc75e6769bc..2fe715b25f9e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -341,6 +341,59 @@ static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv, 1 << PIPE_C | 1 << PIPE_B); } +static void gen9_wait_for_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int id = power_well->id; + + /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ + WARN_ON(intel_wait_for_register(dev_priv, + HSW_PWR_WELL_DRIVER, + SKL_POWER_WELL_STATE(id), + SKL_POWER_WELL_STATE(id), + 1)); +} + +static u32 gen9_power_well_requesters(struct drm_i915_private *dev_priv, int id) +{ + u32 req_mask = SKL_POWER_WELL_REQ(id); + u32 ret; + + ret = I915_READ(HSW_PWR_WELL_BIOS) & req_mask ? 1 : 0; + ret |= I915_READ(HSW_PWR_WELL_DRIVER) & req_mask ? 2 : 0; + ret |= I915_READ(HSW_PWR_WELL_KVMR) & req_mask ? 4 : 0; + ret |= I915_READ(HSW_PWR_WELL_DEBUG) & req_mask ? 8 : 0; + + return ret; +} + +static void gen9_wait_for_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int id = power_well->id; + bool disabled; + u32 reqs; + + /* + * Bspec doesn't require waiting for PWs to get disabled, but still do + * this for paranoia. The known cases where a PW will be forced on: + * - a KVMR request on any power well via the KVMR request register + * - a DMC request on PW1 and MISC_IO power wells via the BIOS and + * DEBUG request registers + * Skip the wait in case any of the request bits are set and print a + * diagnostic message. + */ + wait_for((disabled = !(I915_READ(HSW_PWR_WELL_DRIVER) & + SKL_POWER_WELL_STATE(id))) || + (reqs = gen9_power_well_requesters(dev_priv, id)), 1); + if (disabled) + return; + + DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", + power_well->name, + !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); +} + static void hsw_set_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { @@ -746,45 +799,6 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv) gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); } -static void -gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum skl_disp_power_wells power_well_id = power_well->id; - u32 val; - u32 mask; - - mask = SKL_POWER_WELL_REQ(power_well_id); - - val = I915_READ(HSW_PWR_WELL_KVMR); - if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n", - power_well->name)) - I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask); - - val = I915_READ(HSW_PWR_WELL_BIOS); - val |= I915_READ(HSW_PWR_WELL_DEBUG); - - if (!(val & mask)) - return; - - /* - * DMC is known to force on the request bits for power well 1 on SKL - * and BXT and the misc IO power well on SKL but we don't expect any - * other request bits to be set, so WARN for those. - */ - if (power_well_id == SKL_DISP_PW_1 || - (IS_GEN9_BC(dev_priv) && - power_well_id == SKL_DISP_PW_MISC_IO)) - DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on " - "by DMC\n", power_well->name); - else - WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n", - power_well->name); - - I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask); - I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask); -} - static void skl_set_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { @@ -848,6 +862,8 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("Enabling %s\n", power_well->name); check_fuse_status = true; } + + gen9_wait_for_power_well_enable(dev_priv, power_well); } else { if (enable_requested) { I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); @@ -855,14 +871,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("Disabling %s\n", power_well->name); } - gen9_sanitize_power_well_requests(dev_priv, power_well); + gen9_wait_for_power_well_disable(dev_priv, power_well); } - if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable, - 1)) - DRM_ERROR("%s %s timeout\n", - power_well->name, enable ? "enable" : "disable"); - if (check_fuse_status) { if (power_well->id == SKL_DISP_PW_1) { if (intel_wait_for_register(dev_priv, @@ -2699,6 +2710,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) /* * BSpec says to keep the MISC IO power well enabled here, only * remove our request for power well 1. + * Note that even though the driver's request is removed power well 1 + * may stay enabled after this due to DMC's own request on it. */ well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_disable(dev_priv, well); @@ -2756,7 +2769,11 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv) /* The spec doesn't call for removing the reset handshake flag */ - /* Disable PG1 */ + /* + * Disable PW1 (PG1). + * Note that even though the driver's request is removed power well 1 + * may stay enabled after this due to DMC's own request on it. + */ mutex_lock(&power_domains->lock); well = lookup_power_well(dev_priv, SKL_DISP_PW_1); From b38131fbcba5e16e94db7f0a6334446ffd157204 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 29 Jun 2017 18:37:02 +0300 Subject: [PATCH 0204/1795] drm/i915/cnl: Fix comment about AUX IO power well enable/disable The comments match an earlier version of the patch, fix them to match the current state. Signed-off-by: Imre Deak Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1498750622-14023-6-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 2fe715b25f9e..5eb9c5ec9c85 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -2845,7 +2845,10 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume val |= CL_POWER_DOWN_ENABLE; I915_WRITE(CNL_PORT_CL1CM_DW5, val); - /* 4. Enable Power Well 1 (PG1) and Aux IO Power */ + /* + * 4. Enable Power Well 1 (PG1). + * The AUX IO power wells will be enabled on demand. + */ mutex_lock(&power_domains->lock); well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_enable(dev_priv, well); @@ -2877,7 +2880,11 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) /* 3. Disable CD clock */ cnl_uninit_cdclk(dev_priv); - /* 4. Disable Power Well 1 (PG1) and Aux IO Power */ + /* + * 4. Disable Power Well 1 (PG1). + * The AUX IO power wells are toggled on demand, so they are already + * disabled at this point. + */ mutex_lock(&power_domains->lock); well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_disable(dev_priv, well); From 2e1e9d48939edad49a9f06762ecfc9c73666d489 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Wed, 5 Jul 2017 18:00:45 -0700 Subject: [PATCH 0205/1795] x86/gpu: CNL uses the same GMS values as SKL So don't forget to reserve its stolen memory bits. v2: Add ack and remove "TODO" from commit message. Signed-off-by: Paulo Zanoni Signed-off-by: Rodrigo Vivi Acked-by: Thomas Gleixner Link: http://patchwork.freedesktop.org/patch/msgid/1499302845-17856-1-git-send-email-rodrigo.vivi@intel.com --- arch/x86/kernel/early-quirks.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index d907c3d8633f..a4516ca4c4f3 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -527,6 +527,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_BXT_IDS(&gen9_early_ops), INTEL_KBL_IDS(&gen9_early_ops), INTEL_GLK_IDS(&gen9_early_ops), + INTEL_CNL_IDS(&gen9_early_ops), }; static void __init From 3d16ca58908312a1d837972123217ee12bdbda24 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Wed, 5 Jul 2017 18:00:31 -0700 Subject: [PATCH 0206/1795] drm/i915/cnl: Add force wake for gen10+. By spec there is no change on force wake registers for Cannonlake. Let's reuse gen9 one. v2: Adding missing case for the write part. (Tvrtko) v3: Rebase on recent tree. v4: Make it for gen9+ instead adding gen10 only. (by Joonas). Cc: Tvrtko Ursulin Signed-off-by: Rodrigo Vivi Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/1499302831-17773-1-git-send-email-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 1ed3dd8df850..deb4430541cf 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -643,7 +643,7 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) { .start = (s), .end = (e), .domains = (d) } #define HAS_FWTABLE(dev_priv) \ - (IS_GEN9(dev_priv) || \ + (INTEL_GEN(dev_priv) >= 9 || \ IS_CHERRYVIEW(dev_priv) || \ IS_VALLEYVIEW(dev_priv)) @@ -1072,7 +1072,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); } - if (IS_GEN9(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; dev_priv->uncore.funcs.force_wake_put = fw_domains_put; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, From 6602be0e2c6163bd747d490d8875ef0812c11560 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 6 Jul 2017 14:01:13 -0700 Subject: [PATCH 0207/1795] drm/i915/cnl: Cannonlake color init. Cannonlake has same color setup as Geminilake. Legacy color load luts doesn't work anymore on Cannonlake+. Cc: Clint Taylor Cc: Ander Conselvan de Oliveira Signed-off-by: Rodrigo Vivi Reviewed-by: Clinton Taylor Link: http://patchwork.freedesktop.org/patch/msgid/1499374873-2454-1-git-send-email-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 1 + drivers/gpu/drm/i915/intel_color.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 4 ++-- drivers/gpu/drm/i915/intel_sprite.c | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 04aaf553e3fa..a1e6b696bcfa 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -449,6 +449,7 @@ static const struct intel_device_info intel_cannonlake_info = { .gen = 10, .ddb_size = 1024, .has_csr = 1, + .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } }; /* diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 306c6b06b330..f85d57555957 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -615,7 +615,7 @@ void intel_color_init(struct drm_crtc *crtc) IS_BROXTON(dev_priv)) { dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix; dev_priv->display.load_luts = broadwell_load_luts; - } else if (IS_GEMINILAKE(dev_priv)) { + } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix; dev_priv->display.load_luts = glk_load_luts; } else { diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ede3c6c02ec5..9a3919b19413 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3311,7 +3311,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, plane_ctl = PLANE_CTL_ENABLE; - if (!IS_GEMINILAKE(dev_priv)) { + if (!IS_GEMINILAKE(dev_priv) && !IS_CANNONLAKE(dev_priv)) { plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE | PLANE_CTL_PIPE_CSC_ENABLE | @@ -3367,7 +3367,7 @@ static void skylake_update_primary_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - if (IS_GEMINILAKE(dev_priv)) { + if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), PLANE_COLOR_PIPE_GAMMA_ENABLE | PLANE_COLOR_PIPE_CSC_ENABLE | diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0c650c2cbca8..94f9a1332dbf 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -262,7 +262,7 @@ skl_update_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - if (IS_GEMINILAKE(dev_priv)) { + if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), PLANE_COLOR_PIPE_GAMMA_ENABLE | PLANE_COLOR_PIPE_CSC_ENABLE | From 17369ba08c065967b7b8c48253f20b73acc0b9f5 Mon Sep 17 00:00:00 2001 From: Chuanxiao Dong Date: Fri, 7 Jul 2017 17:50:59 +0800 Subject: [PATCH 0208/1795] drm/i915: Fix the kernel panic when using aliasing ppgtt The ppgtt should be get directly from i915_address_space *vm instead of vma->vm. v2: - add one more fix for bxt. (Chris) Fixes: 4a234c5fae16 ("drm/i915: pass the vma to insert_entries") Bugzilla:https://bugs.freedesktop.org/show_bug.cgi?id=101713 Signed-off-by: Chuanxiao Dong Reviewed-by: Matthew Auld v1 Cc: Matthew Auld Cc: Chris Wilson Cc: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/1499421059-18262-1-git-send-email-chuanxiao.dong@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index de67084d5fcf..10aa7762d9a6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -910,7 +910,7 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, enum i915_cache_level cache_level, u32 unused) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = { .sg = vma->pages->sgl, .dma = sg_dma_address(iter.sg), @@ -2242,7 +2242,7 @@ static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, enum i915_cache_level level, u32 unused) { - struct insert_entries arg = { vma->vm, vma, level }; + struct insert_entries arg = { vm, vma, level }; stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); } From 75be7756bc21ac1f9f3082850deff809ac3c5d0c Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 6 Jul 2017 14:08:15 -0700 Subject: [PATCH 0209/1795] drm/i915/cnl: Don't trust VBT's alternate pin for port D for now. Cannon Lake's VBT that is currently available for B0 stepping states that port D uses alternate pin 3 messing up with the default pin-port mapping table. Using that information we cannot get HDMI working properly. So for now we don't relly on VBT for this information. Cc: Clint Taylor Signed-off-by: Rodrigo Vivi Reviewed-by: Clinton Taylor Link: http://patchwork.freedesktop.org/patch/msgid/1499375295-6454-1-git-send-email-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_bios.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 639d45c1dd2e..82b144cdfa1d 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1187,6 +1187,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, if (is_dvi) { info->alternate_ddc_pin = ddc_pin; + /* + * All VBTs that we got so far for B Stepping has this + * information wrong for Port D. So, let's just ignore for now. + */ + if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0) && + port == PORT_D) { + info->alternate_ddc_pin = 0; + } + sanitize_ddc_pin(dev_priv, port); } From f65f84178999c2c0227f9ae7dafd8af62c0ce5ad Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 6 Jul 2017 14:06:24 -0700 Subject: [PATCH 0210/1795] drm/i915/cnl: Gen10 render context size. No change on render context size is required for Gen10. So this patch doesn't change the default behaviour, but only avoid the missing_case message. Cc: Ben Widawsky Signed-off-by: Rodrigo Vivi Reviewed-by: Ben Widawsky Link: http://patchwork.freedesktop.org/patch/msgid/1499375184-5725-1-git-send-email-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_engine_cs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index a55cd72aeeff..24db316e0fd1 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -149,6 +149,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) switch (INTEL_GEN(dev_priv)) { default: MISSING_CASE(INTEL_GEN(dev_priv)); + case 10: case 9: return GEN9_LR_CONTEXT_RENDER_SIZE; case 8: From 35ceabf3cdb557b23bbc09f0b6f7bb2b545185b1 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 6 Jul 2017 13:41:13 -0700 Subject: [PATCH 0211/1795] drm/i915/cnl: Inherit RPS stuff from previous platforms. Apparently no change on RPS stuff from previous platforms. v2: Merging to rps related patches in one and also adding missed cases. Cc: David Weinehall Signed-off-by: Rodrigo Vivi Reviewed-by: David Weinehall Link: http://patchwork.freedesktop.org/patch/msgid/1499373673-25066-1-git-send-email-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 20 ++++++++++++-------- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- drivers/gpu/drm/i915/i915_sysfs.c | 2 +- drivers/gpu/drm/i915/intel_pm.c | 18 +++++++++--------- 4 files changed, 24 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 643f56b8b87c..ca2e34b1c798 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1159,7 +1159,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); reqf = I915_READ(GEN6_RPNSWREQ); - if (IS_GEN9(dev_priv)) + if (INTEL_GEN(dev_priv) >= 9) reqf >>= 23; else { reqf &= ~GEN6_TURBO_DISABLE; @@ -1181,7 +1181,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; - if (IS_GEN9(dev_priv)) + if (INTEL_GEN(dev_priv) >= 9) cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; @@ -1210,7 +1210,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) dev_priv->rps.pm_intrmsk_mbz); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "Render p-state ratio: %d\n", - (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8); + (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8); seq_printf(m, "Render p-state VID: %d\n", gt_perf_status & 0xff); seq_printf(m, "Render p-state limit: %d\n", @@ -1241,18 +1241,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused) max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : rp_state_cap >> 16) & 0xff; - max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1); + max_freq *= (IS_GEN9_BC(dev_priv) || + IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); max_freq = (rp_state_cap & 0xff00) >> 8; - max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1); + max_freq *= (IS_GEN9_BC(dev_priv) || + IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : rp_state_cap >> 0) & 0xff; - max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1); + max_freq *= (IS_GEN9_BC(dev_priv) || + IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); seq_printf(m, "Max overclocked frequency: %dMHz\n", @@ -1855,7 +1858,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) if (ret) goto out; - if (IS_GEN9_BC(dev_priv)) { + if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { /* Convert GT frequency to 50 HZ units */ min_gpu_freq = dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; @@ -1875,7 +1878,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) &ia_freq); seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", intel_gpu_freq(dev_priv, (gpu_freq * - (IS_GEN9_BC(dev_priv) ? + (IS_GEN9_BC(dev_priv) || + IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1))), ((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 8) & 0xff) * 100); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 64cc674b652a..21ab12f4e72a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3522,7 +3522,7 @@ enum skl_disp_power_wells { #define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25) #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) #define INTERVAL_0_833_US(us) (((us) * 6) / 5) -#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ +#define GT_INTERVAL_FROM_US(dev_priv, us) (INTEL_GEN(dev_priv) >= 9 ? \ (IS_GEN9_LP(dev_priv) ? \ INTERVAL_0_833_US(us) : \ INTERVAL_1_33_US(us)) : \ @@ -3531,7 +3531,7 @@ enum skl_disp_power_wells { #define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100) #define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3) #define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6) -#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \ +#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (INTEL_GEN(dev_priv) >= 9 ? \ (IS_GEN9_LP(dev_priv) ? \ INTERVAL_0_833_TO_US(interval) : \ INTERVAL_1_33_TO_US(interval)) : \ diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 3736c9f79197..7fcf00622c4c 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -253,7 +253,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); } else { u32 rpstat = I915_READ(GEN6_RPSTAT1); - if (IS_GEN9(dev_priv)) + if (INTEL_GEN(dev_priv) >= 9) ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c3fcadfa0ae7..6db833e6dcbd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5852,7 +5852,7 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) * the hw runs at the minimal clock before selecting the desired * frequency, if the down threshold expires in that window we will not * receive a down interrupt. */ - if (IS_GEN9(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9) { limits = (dev_priv->rps.max_freq_softlimit) << 23; if (val <= dev_priv->rps.min_freq_softlimit) limits |= (dev_priv->rps.min_freq_softlimit) << 14; @@ -5994,7 +5994,7 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val) if (val != dev_priv->rps.cur_freq) { gen6_set_rps_thresholds(dev_priv, val); - if (IS_GEN9(dev_priv)) + if (INTEL_GEN(dev_priv) >= 9) I915_WRITE(GEN6_RPNSWREQ, GEN9_FREQUENCY(val)); else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) @@ -6353,7 +6353,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || - IS_GEN9_BC(dev_priv)) { + IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { u32 ddcc_status = 0; if (sandybridge_pcode_read(dev_priv, @@ -6366,7 +6366,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) dev_priv->rps.max_freq); } - if (IS_GEN9_BC(dev_priv)) { + if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { /* Store the frequency values in 16.66 MHZ units, which is * the natural hardware unit for SKL */ @@ -6672,7 +6672,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) /* convert DDR frequency from units of 266.6MHz to bandwidth */ min_ring_freq = mult_frac(min_ring_freq, 8, 3); - if (IS_GEN9_BC(dev_priv)) { + if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { /* Convert GT frequency to 50 HZ units */ min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; @@ -6690,7 +6690,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) int diff = max_gpu_freq - gpu_freq; unsigned int ia_freq = 0, ring_freq = 0; - if (IS_GEN9_BC(dev_priv)) { + if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { /* * ring_freq = 2 * GT. ring_freq is in 100MHz units * No floor required for ring frequency on SKL. @@ -7821,7 +7821,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) } else if (INTEL_GEN(dev_priv) >= 9) { gen9_enable_rc6(dev_priv); gen9_enable_rps(dev_priv); - if (IS_GEN9_BC(dev_priv)) + if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) gen6_update_ring_freq(dev_priv); } else if (IS_BROADWELL(dev_priv)) { gen8_enable_rps(dev_priv); @@ -9066,7 +9066,7 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) { - if (IS_GEN9(dev_priv)) + if (INTEL_GEN(dev_priv) >= 9) return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER); else if (IS_CHERRYVIEW(dev_priv)) @@ -9079,7 +9079,7 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) { - if (IS_GEN9(dev_priv)) + if (INTEL_GEN(dev_priv) >= 9) return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, GT_FREQUENCY_MULTIPLIER); else if (IS_CHERRYVIEW(dev_priv)) From a9701a897067db118f1a0fcf59ce8391e3612687 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 6 Jul 2017 13:52:01 -0700 Subject: [PATCH 0212/1795] drm/i915/cnl: Get DDI clock based on PLLs. PLLs are the source clocks for the DDIs so in order to determine the ddi clock we need to check the PLL configuration. v2: Mika pointed out that 24 was hardcoded while it should consider ref clock that can be either 24KHz or 19.2KHz on CNL. Reviewed-by: Mika Kahola Signed-off-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1499374321-31152-1-git-send-email-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 2 + drivers/gpu/drm/i915/intel_ddi.c | 111 +++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 21ab12f4e72a..c712d01f92ab 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8343,6 +8343,7 @@ enum { #define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25) #define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25) #define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10) +#define DPLL_CFGCR0_DCO_FRAC_SHIFT (10) #define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10) #define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff) #define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0) @@ -8350,6 +8351,7 @@ enum { #define _CNL_DPLL0_CFGCR1 0x6C004 #define _CNL_DPLL1_CFGCR1 0x6C084 #define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10) +#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10) #define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10) #define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9) #define DPLL_CFGCR1_KDIV_MASK (7 << 6) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 80e96f1f49d2..241decf2a7d9 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1103,6 +1103,62 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, return dco_freq / (p0 * p1 * p2 * 5); } +static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, + uint32_t pll_id) +{ + uint32_t cfgcr0, cfgcr1; + uint32_t p0, p1, p2, dco_freq, ref_clock; + + cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id)); + cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id)); + + p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK; + p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK; + + if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) + p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> + DPLL_CFGCR1_QDIV_RATIO_SHIFT; + else + p1 = 1; + + + switch (p0) { + case DPLL_CFGCR1_PDIV_2: + p0 = 2; + break; + case DPLL_CFGCR1_PDIV_3: + p0 = 3; + break; + case DPLL_CFGCR1_PDIV_5: + p0 = 5; + break; + case DPLL_CFGCR1_PDIV_7: + p0 = 7; + break; + } + + switch (p2) { + case DPLL_CFGCR1_KDIV_1: + p2 = 1; + break; + case DPLL_CFGCR1_KDIV_2: + p2 = 2; + break; + case DPLL_CFGCR1_KDIV_4: + p2 = 4; + break; + } + + ref_clock = dev_priv->cdclk.hw.ref; + + dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock; + + dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> + DPLL_CFGCR0_DCO_FRAC_SHIFT) * ref_clock) / 0x8000; + + return dco_freq / (p0 * p1 * p2 * 5); +} + static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) { int dotclock; @@ -1124,6 +1180,59 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) pipe_config->base.adjusted_mode.crtc_clock = dotclock; } +static void cnl_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + int link_clock = 0; + uint32_t cfgcr0, pll_id; + + pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); + + cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id)); + + if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { + link_clock = cnl_calc_wrpll_link(dev_priv, pll_id); + } else { + link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK; + + switch (link_clock) { + case DPLL_CFGCR0_LINK_RATE_810: + link_clock = 81000; + break; + case DPLL_CFGCR0_LINK_RATE_1080: + link_clock = 108000; + break; + case DPLL_CFGCR0_LINK_RATE_1350: + link_clock = 135000; + break; + case DPLL_CFGCR0_LINK_RATE_1620: + link_clock = 162000; + break; + case DPLL_CFGCR0_LINK_RATE_2160: + link_clock = 216000; + break; + case DPLL_CFGCR0_LINK_RATE_2700: + link_clock = 270000; + break; + case DPLL_CFGCR0_LINK_RATE_3240: + link_clock = 324000; + break; + case DPLL_CFGCR0_LINK_RATE_4050: + link_clock = 405000; + break; + default: + WARN(1, "Unsupported link rate\n"); + break; + } + link_clock *= 2; + } + + pipe_config->port_clock = link_clock; + + ddi_dotclock_get(pipe_config); +} + static void skl_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -1267,6 +1376,8 @@ void intel_ddi_clock_get(struct intel_encoder *encoder, skl_ddi_clock_get(encoder, pipe_config); else if (IS_GEN9_LP(dev_priv)) bxt_ddi_clock_get(encoder, pipe_config); + else if (IS_CANNONLAKE(dev_priv)) + cnl_ddi_clock_get(encoder, pipe_config); } void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) From 77d1f615c78a73a04254fa2bff07ee9fa27145d9 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Mon, 26 Jun 2017 10:33:49 +0200 Subject: [PATCH 0213/1795] drm/i915: Make DP-MST connector info work Commit 9a148a96fc3a ("drm/i915/debugfs: add dp mst info") adds support for DP-MST to intel_connector_info, but forgot to remove the early return for DP-MST. Remove it, and print out MST connectors directly. Fixes: 9a148a96fc3a ("drm/i915/debugfs: add dp mst info") Cc: # v4.11+ Cc: Dhinakaran Pandiyan Cc: Libin Yang Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/20170626083349.24389-1-maarten.lankhorst@linux.intel.com Reviewed-by: Dhinakaran Pandiyan --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index ca2e34b1c798..620c9218d1c1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3108,7 +3108,7 @@ static void intel_connector_info(struct seq_file *m, connector->display_info.cea_rev); } - if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) + if (!intel_encoder) return; switch (connector->connector_type) { From 6d6a89708188823d676c9b84e7e2534b822465e3 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 6 Jul 2017 13:45:08 -0700 Subject: [PATCH 0214/1795] drm/i915/cnl: Add max allowed Cannonlake DC. This is a follow-up after enabling DC states with commit: "drm/i915/DMC/CNL: Load DMC on CNL". Cc: Anusha Srivatsa Cc: Imre Deak Signed-off-by: Rodrigo Vivi Reviewed-by: Imre Deak Link: http://patchwork.freedesktop.org/patch/msgid/1499373908-26761-1-git-send-email-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 5eb9c5ec9c85..f630d632a976 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -2492,7 +2492,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, int requested_dc; int max_dc; - if (IS_GEN9_BC(dev_priv)) { + if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { max_dc = 2; mask = 0; } else if (IS_GEN9_LP(dev_priv)) { From 371c2279aa705c32730e738b3f173fb2e62b33e7 Mon Sep 17 00:00:00 2001 From: Alexandru Moise <00moses.alexander00@gmail.com> Date: Sat, 8 Jul 2017 23:43:52 +0200 Subject: [PATCH 0215/1795] drm: inhibit drm drivers register to uninitialized drm core If the DRM core fails to init for whatever reason, ensure that no driver ever calls drm_dev_register(). This is best done at drm_dev_init() as it covers drivers that call drm_dev_alloc() as well as drivers that prefer to embed struct drm_device into their own device struct and call drm_dev_init() themselves. In my case I had so many dynamic device majors used that the major number for DRM (226) was stolen, causing DRM core init to fail after failing to register a chrdev, and ultimately calling debugfs_remove() on drm_debugfs_root in drm_core_exit(). After drm core failed to init, VGEM was still calling drm_dev_register(), ultimately leading to drm_debugfs_init(), with drm_debugfs_root passed as the root for the new debugfs dir at debugfs_create_dir(). This led to a kernel panic once we were either derefencing root->d_inode while it was NULL or calling root->d_inode->i_op->lookup() while it was NULL in debugfs at inode_lock() or lookup_*(). Signed-off-by: Alexandru Moise <00moses.alexander00@gmail.com> Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170708214352.GA27205@gmail.com --- drivers/gpu/drm/drm_drv.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 37b8ad3e30d8..2ed2d919beae 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -63,6 +63,15 @@ module_param_named(debug, drm_debug, int, 0600); static DEFINE_SPINLOCK(drm_minor_lock); static struct idr drm_minors_idr; +/* + * If the drm core fails to init for whatever reason, + * we should prevent any drivers from registering with it. + * It's best to check this at drm_dev_init(), as some drivers + * prefer to embed struct drm_device into their own device + * structure and call drm_dev_init() themselves. + */ +static bool drm_core_init_complete = false; + static struct dentry *drm_debugfs_root; #define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV" @@ -484,6 +493,11 @@ int drm_dev_init(struct drm_device *dev, { int ret; + if (!drm_core_init_complete) { + DRM_ERROR("DRM core is not initialized\n"); + return -ENODEV; + } + kref_init(&dev->ref); dev->dev = parent; dev->driver = driver; @@ -966,6 +980,8 @@ static int __init drm_core_init(void) if (ret < 0) goto error; + drm_core_init_complete = true; + DRM_DEBUG("Initialized\n"); return 0; From 61f3e7704897188d85e5698980316f2929a7c753 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Mon, 10 Jul 2017 13:58:52 -0700 Subject: [PATCH 0216/1795] drm/i915/cnl: Add missing type case. Paulo had noticed that inside cnl_ddi_vswing_program the case was handling voltage but with no indication of type where a missing type could also take us to that path. So my first attempt was to add a message to let clear who trigger that path. However DK had a better idea that is to handle the missed type sooner before it might take to that path. So this v2 here uses his approach. v2: Handle missed type sooner. Cc: Dhinakaran Pandiyan Cc: Paulo Zanoni Signed-off-by: Rodrigo Vivi Reviewed-by: Dhinakaran Pandiyan Link: http://patchwork.freedesktop.org/patch/msgid/20170710205852.28352-1-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_ddi.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 241decf2a7d9..efb13582dc73 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1979,9 +1979,12 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level) if ((intel_dp) && (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)) { width = intel_dp->lane_count; rate = intel_dp->link_rate; - } else { + } else if (type == INTEL_OUTPUT_HDMI) { width = 4; /* Rate is always < than 6GHz for HDMI */ + } else { + MISSING_CASE(type); + return; } /* From e6194923237f3952b955c343b65b211f36bce01c Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Thu, 13 Jul 2017 09:13:30 +0200 Subject: [PATCH 0217/1795] esp: Fix memleaks on error paths. We leak the temporary allocated resources in error paths, fix this by freeing them. Fixes: fca11ebde3f ("esp4: Reorganize esp_output") Fixes: 383d0350f2c ("esp6: Reorganize esp_output") Fixes: 3f29770723f ("ipsec: check return value of skb_to_sgvec always") Signed-off-by: Steffen Klassert --- net/ipv4/esp4.c | 13 ++++++++----- net/ipv6/esp6.c | 9 +++++---- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 0cbee0a666ff..dbb31a942dfa 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -381,7 +381,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * (unsigned char *)esph - skb->data, assoclen + ivlen + esp->clen + alen); if (unlikely(err < 0)) - goto error; + goto error_free; if (!esp->inplace) { int allocsize; @@ -392,7 +392,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * spin_lock_bh(&x->lock); if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { spin_unlock_bh(&x->lock); - goto error; + goto error_free; } skb_shinfo(skb)->nr_frags = 1; @@ -409,7 +409,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * (unsigned char *)esph - skb->data, assoclen + ivlen + esp->clen + alen); if (unlikely(err < 0)) - goto error; + goto error_free; } if ((x->props.flags & XFRM_STATE_ESN)) @@ -442,8 +442,9 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * if (sg != dsg) esp_ssg_unref(x, tmp); - kfree(tmp); +error_free: + kfree(tmp); error: return err; } @@ -695,8 +696,10 @@ skip_cow: sg_init_table(sg, nfrags); err = skb_to_sgvec(skb, sg, 0, skb->len); - if (unlikely(err < 0)) + if (unlikely(err < 0)) { + kfree(tmp); goto out; + } skb->ip_summed = CHECKSUM_NONE; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 9ed35473dcb5..392def1fcf21 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -345,7 +345,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info (unsigned char *)esph - skb->data, assoclen + ivlen + esp->clen + alen); if (unlikely(err < 0)) - goto error; + goto error_free; if (!esp->inplace) { int allocsize; @@ -356,7 +356,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info spin_lock_bh(&x->lock); if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { spin_unlock_bh(&x->lock); - goto error; + goto error_free; } skb_shinfo(skb)->nr_frags = 1; @@ -373,7 +373,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info (unsigned char *)esph - skb->data, assoclen + ivlen + esp->clen + alen); if (unlikely(err < 0)) - goto error; + goto error_free; } if ((x->props.flags & XFRM_STATE_ESN)) @@ -406,8 +406,9 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info if (sg != dsg) esp_ssg_unref(x, tmp); - kfree(tmp); +error_free: + kfree(tmp); error: return err; } From 765831dc27ab141b3a0be1ab55b922b012427902 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Jul 2017 10:13:29 +0200 Subject: [PATCH 0218/1795] drm/simple-kms-helper: Fix the check for the mismatch between plane and CRTC enabled. You can enable the CRTC and without adding the plane to the state and it will succeed. This should be prevented in the crtc check instead of the plane check, because the plane check may never run for atomic enable, but the crtc check always will. This is based on a similar check in vmwgfx. Signed-off-by: Maarten Lankhorst Cc: Daniel Vetter Cc: Jani Nikula Cc: Sean Paul Cc: David Airlie Link: http://patchwork.freedesktop.org/patch/msgid/20170712081344.25495-2-maarten.lankhorst@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/drm_simple_kms_helper.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index 98250854af75..39c203ad59db 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -37,6 +37,13 @@ static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = { static int drm_simple_kms_crtc_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { + bool has_primary = state->plane_mask & + BIT(drm_plane_index(crtc->primary)); + + /* We always want to have an active plane with an active CRTC */ + if (has_primary != state->enable) + return -EINVAL; + return drm_atomic_add_affected_planes(state->state, crtc); } @@ -90,9 +97,6 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane, pipe = container_of(plane, struct drm_simple_display_pipe, plane); crtc_state = drm_atomic_get_new_crtc_state(plane_state->state, &pipe->crtc); - if (crtc_state->enable != !!plane_state->crtc) - return -EINVAL; /* plane must match crtc enable state */ - if (!crtc_state->enable) return 0; /* nothing to check when disabling or disabled */ From 0c3eb12324277eac3334ef04c9780b9e734f6f27 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Jul 2017 10:13:30 +0200 Subject: [PATCH 0219/1795] drm/atomic: Use the correct iterator macro in atomic_remove_fb for_each_obj_in_state will be removed, so use the new state here. Signed-off-by: Maarten Lankhorst Cc: Daniel Vetter Cc: Jani Nikula Cc: Sean Paul Cc: David Airlie Link: http://patchwork.freedesktop.org/patch/msgid/20170712081344.25495-3-maarten.lankhorst@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/drm_framebuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index fc8ef42203ec..607ef3a97c42 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -817,7 +817,7 @@ retry: plane->old_fb = plane->fb; } - for_each_connector_in_state(state, conn, conn_state, i) { + for_each_new_connector_in_state(state, conn, conn_state, i) { ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); if (ret) From dfb8bb3bd4bbb6c525ee1baa202eae01aa305109 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Jul 2017 10:13:31 +0200 Subject: [PATCH 0220/1795] drm/atomic: Use the new helpers in drm_atomic_helper_disable_all() for_each_obj_in_state will be removed, so don't use it here. Signed-off-by: Maarten Lankhorst Cc: Daniel Vetter Cc: Jani Nikula Cc: Sean Paul Cc: David Airlie Link: http://patchwork.freedesktop.org/patch/msgid/20170712081344.25495-4-maarten.lankhorst@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/drm_atomic_helper.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 667ec97d4efb..391cd887f922 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2715,13 +2715,13 @@ int drm_atomic_helper_disable_all(struct drm_device *dev, goto free; } - for_each_connector_in_state(state, conn, conn_state, i) { + for_each_new_connector_in_state(state, conn, conn_state, i) { ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); if (ret < 0) goto free; } - for_each_plane_in_state(state, plane, plane_state, i) { + for_each_new_plane_in_state(state, plane, plane_state, i) { ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); if (ret < 0) goto free; From 2d705c0b367622d382d80c38240b3069c19f4eeb Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Jul 2017 10:13:32 +0200 Subject: [PATCH 0221/1795] drm/atomic: Use new iterator macros in drm_atomic_helper_wait_for_flip_done for_each_obj_in_state is about to be removed, so use the correct new iterator macro. Signed-off-by: Maarten Lankhorst Cc: Daniel Vetter Cc: Jani Nikula Cc: Sean Paul Cc: David Airlie Link: http://patchwork.freedesktop.org/patch/msgid/20170712081344.25495-5-maarten.lankhorst@linux.intel.com [mlankhorst: Based on danvet's feedback, only apply first hunk and rename crtc_state variable to unused] Reviewed-by: Daniel Vetter --- drivers/gpu/drm/drm_atomic_helper.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 391cd887f922..0fed20692df4 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1266,11 +1266,11 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, struct drm_atomic_state *old_state) { - struct drm_crtc_state *crtc_state; + struct drm_crtc_state *unused; struct drm_crtc *crtc; int i; - for_each_crtc_in_state(old_state, crtc, crtc_state, i) { + for_each_crtc_in_state(old_state, crtc, unused, i) { struct drm_crtc_commit *commit = old_state->crtcs[i].commit; int ret; From bdc362f631e2ac140e2b5ddfe5ba9705a5b1a6a0 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Jul 2017 10:13:33 +0200 Subject: [PATCH 0222/1795] drm/vmwgfx: Make check_modeset() use the new atomic iterator macros. I don't think the checking of resources in this function is very atomic-like, but it should definitely not use a macro that's about to be removed. Signed-off-by: Maarten Lankhorst Cc: VMware Graphics Cc: Sinclair Yeh Cc: Thomas Hellstrom Link: http://patchwork.freedesktop.org/patch/msgid/20170712081344.25495-6-maarten.lankhorst@linux.intel.com Reviewed-by: Sinclair Yeh Reviewed-by: Daniel Vetter [mlankhorst: Make function static (danvet)] --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 1cd67b10a0d9..620180df1303 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1527,7 +1527,7 @@ err_out: * RETURNS * Zero for success or -errno */ -int +static int vmw_kms_atomic_check_modeset(struct drm_device *dev, struct drm_atomic_state *state) { @@ -1536,8 +1536,7 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev, struct vmw_private *dev_priv = vmw_priv(dev); int i; - - for_each_crtc_in_state(state, crtc, crtc_state, i) { + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { unsigned long requested_bb_mem = 0; if (dev_priv->active_display_unit == vmw_du_screen_target) { From fe5f6b1fa02fbee34d95578db41f8e7117469b41 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Jul 2017 10:13:34 +0200 Subject: [PATCH 0223/1795] drm/i915: Use correct iterator macro for_each_connector_in_state will be removed, so use the right state here. Signed-off-by: Maarten Lankhorst Cc: Daniel Vetter Cc: Jani Nikula Cc: intel-gfx@lists.freedesktop.org Link: http://patchwork.freedesktop.org/patch/msgid/20170712081344.25495-7-maarten.lankhorst@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ec0779a52d53..916340f03882 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1321,7 +1321,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI) return false; - for_each_connector_in_state(state, connector, connector_state, i) { + for_each_new_connector_in_state(state, connector, connector_state, i) { const struct drm_display_info *info = &connector->display_info; if (connector_state->crtc != crtc_state->base.crtc) From d57da16f57f44d3cdf26a9b233287057866d4979 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Jul 2017 10:13:36 +0200 Subject: [PATCH 0224/1795] drm/atmel-hlcdec: Use for_each_new_connector_in_state for_each_obj_in_state is about to be removed, so use the new iterator macros. Signed-off-by: Maarten Lankhorst Cc: Boris Brezillon Link: http://patchwork.freedesktop.org/patch/msgid/20170712081344.25495-9-maarten.lankhorst@linux.intel.com Reviewed-by: Daniel Vetter Acked-by: Boris Brezillon --- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 441769c5bcd4..4fbbeab5c5d4 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -237,7 +237,7 @@ static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state) crtc = drm_crtc_to_atmel_hlcdc_crtc(state->crtc); - for_each_connector_in_state(state->state, connector, cstate, i) { + for_each_new_connector_in_state(state->state, connector, cstate, i) { struct drm_display_info *info = &connector->display_info; unsigned int supported_fmts = 0; int j; From e741f2b182e6d6203dfbf294affdfb9eb1009ddf Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Jul 2017 10:13:37 +0200 Subject: [PATCH 0225/1795] drm/rockchip: Use for_each_oldnew_plane_in_state in vop_crtc_atomic_flush for_each_obj_in_state is about to be removed, so use the new atomic iterator macros. Signed-off-by: Maarten Lankhorst Cc: Mark Yao Cc: Heiko Stuebner Cc: linux-arm-kernel@lists.infradead.org Cc: linux-rockchip@lists.infradead.org Link: http://patchwork.freedesktop.org/patch/msgid/20170712081344.25495-10-maarten.lankhorst@linux.intel.com Reviewed-by: Daniel Vetter Acked-by: Mark Yao --- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index ee876a9631f0..f90088b1a247 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1029,7 +1029,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct drm_atomic_state *old_state = old_crtc_state->state; - struct drm_plane_state *old_plane_state; + struct drm_plane_state *old_plane_state, *new_plane_state; struct vop *vop = to_vop(crtc); struct drm_plane *plane; int i; @@ -1060,11 +1060,12 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc, } spin_unlock_irq(&crtc->dev->event_lock); - for_each_plane_in_state(old_state, plane, old_plane_state, i) { + for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, + new_plane_state, i) { if (!old_plane_state->fb) continue; - if (old_plane_state->fb == plane->state->fb) + if (old_plane_state->fb == new_plane_state->fb) continue; drm_framebuffer_reference(old_plane_state->fb); From a8e3fb5508fe559c2318c2b6966867110e4de98c Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Jul 2017 10:13:39 +0200 Subject: [PATCH 0226/1795] drm/mali: Use new atomic iterator macros for_each_obj_in_state is about to be removed, so use the new iterator macros. Signed-off-by: Maarten Lankhorst Cc: Liviu Dudau Cc: Brian Starkey Cc: Mali DP Maintainers Link: http://patchwork.freedesktop.org/patch/msgid/20170712081344.25495-12-maarten.lankhorst@linux.intel.com Reviewed-by: Daniel Vetter Acked-by: Liviu Dudau --- drivers/gpu/drm/arm/malidp_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 01b13d219917..a6a05a768dd1 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -225,7 +225,7 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_commit_modeset_disables(drm, state); - for_each_crtc_in_state(state, crtc, old_crtc_state, i) { + for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { malidp_atomic_commit_update_gamma(crtc, old_crtc_state); malidp_atomic_commit_update_coloradj(crtc, old_crtc_state); malidp_atomic_commit_se_config(crtc, old_crtc_state); From 30ea752146e147c5a1f0367aa5303929f7bfd697 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Jul 2017 10:13:40 +0200 Subject: [PATCH 0227/1795] drm/imx: Use atomic iterator macros for_each_obj_in_state is about to be removed, so use the new atomic iterator macros. Signed-off-by: Maarten Lankhorst Cc: Philipp Zabel Link: http://patchwork.freedesktop.org/patch/msgid/20170712081344.25495-13-maarten.lankhorst@linux.intel.com Reviewed-by: Daniel Vetter Reviewed-by: Philipp Zabel Tested-by: Philipp Zabel --- drivers/gpu/drm/imx/imx-drm-core.c | 8 ++++---- drivers/gpu/drm/imx/ipuv3-plane.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 95e2181963d9..f5c621219113 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -115,7 +115,7 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct drm_plane *plane; - struct drm_plane_state *old_plane_state; + struct drm_plane_state *old_plane_state, *new_plane_state; bool plane_disabling = false; int i; @@ -127,15 +127,15 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_commit_modeset_enables(dev, state); - for_each_plane_in_state(state, plane, old_plane_state, i) { - if (drm_atomic_plane_disabling(old_plane_state, plane->state)) + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + if (drm_atomic_plane_disabling(old_plane_state, new_plane_state)) plane_disabling = true; } if (plane_disabling) { drm_atomic_helper_wait_for_vblanks(dev, state); - for_each_plane_in_state(state, plane, old_plane_state, i) + for_each_old_plane_in_state(state, plane, old_plane_state, i) ipu_plane_disable_deferred(plane); } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 49546222c6d3..ff53c8dec633 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -675,7 +675,7 @@ int ipu_planes_assign_pre(struct drm_device *dev, int available_pres = ipu_prg_max_active_channels(); int i; - for_each_plane_in_state(state, plane, plane_state, i) { + for_each_new_plane_in_state(state, plane, plane_state, i) { struct ipu_plane_state *ipu_state = to_ipu_plane_state(plane_state); struct ipu_plane *ipu_plane = to_ipu_plane(plane); From e27986853a168b31c9801359a7a70a5226b558c3 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Jul 2017 10:13:41 +0200 Subject: [PATCH 0228/1795] drm/mediatek: Convert to new iterator macros for_each_obj_in_state is about to be removed, so use the new atomic iterator macros. Signed-off-by: Maarten Lankhorst Cc: CK Hu Cc: Philipp Zabel Cc: Matthias Brugger Cc: linux-arm-kernel@lists.infradead.org Cc: linux-mediatek@lists.infradead.org Link: http://patchwork.freedesktop.org/patch/msgid/20170712081344.25495-14-maarten.lankhorst@linux.intel.com Reviewed-by: Daniel Vetter Acked-by: Philipp Zabel --- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 56f802d0a51c..be0741638f94 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -48,11 +48,11 @@ static void mtk_atomic_schedule(struct mtk_drm_private *private, static void mtk_atomic_wait_for_fences(struct drm_atomic_state *state) { struct drm_plane *plane; - struct drm_plane_state *plane_state; + struct drm_plane_state *new_plane_state; int i; - for_each_plane_in_state(state, plane, plane_state, i) - mtk_fb_wait(plane->state->fb); + for_each_new_plane_in_state(state, plane, new_plane_state, i) + mtk_fb_wait(new_plane_state->fb); } static void mtk_atomic_complete(struct mtk_drm_private *private, From 07ab976d1971a91a7ac25d4782dc8985a2314b87 Mon Sep 17 00:00:00 2001 From: "Kumar, Mahesh" Date: Wed, 5 Jul 2017 20:01:44 +0530 Subject: [PATCH 0229/1795] drm/i915: take-out common clamping code of fixed16 wrappers This patch creates a new function for clamping u64 to fixed16. And make use of this function in other fixed16 wrappers. Signed-off-by: Mahesh Kumar Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/20170705143154.32132-2-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5e70f5711fc8..1b525051bf5f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -160,6 +160,14 @@ static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1, return max; } +static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val) +{ + uint_fixed_16_16_t fp; + WARN_ON(val >> 32); + fp.val = clamp_t(uint32_t, val, 0, ~0); + return fp; +} + static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val, uint_fixed_16_16_t d) { @@ -170,26 +178,21 @@ static inline uint32_t mul_round_up_u32_fixed16(uint32_t val, uint_fixed_16_16_t mul) { uint64_t intermediate_val; - uint32_t result; intermediate_val = (uint64_t) val * mul.val; intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16); WARN_ON(intermediate_val >> 32); - result = clamp_t(uint32_t, intermediate_val, 0, ~0); - return result; + return clamp_t(uint32_t, intermediate_val, 0, ~0); } static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, uint_fixed_16_16_t mul) { uint64_t intermediate_val; - uint_fixed_16_16_t fp; intermediate_val = (uint64_t) val.val * mul.val; intermediate_val = intermediate_val >> 16; - WARN_ON(intermediate_val >> 32); - fp.val = clamp_t(uint32_t, intermediate_val, 0, ~0); - return fp; + return clamp_u64_to_fixed16(intermediate_val); } static inline uint_fixed_16_16_t fixed_16_16_div(uint32_t val, uint32_t d) @@ -203,15 +206,11 @@ static inline uint_fixed_16_16_t fixed_16_16_div(uint32_t val, uint32_t d) static inline uint_fixed_16_16_t fixed_16_16_div_u64(uint32_t val, uint32_t d) { - uint_fixed_16_16_t res; uint64_t interm_val; interm_val = (uint64_t)val << 16; interm_val = DIV_ROUND_UP_ULL(interm_val, d); - WARN_ON(interm_val >> 32); - res.val = (uint32_t) interm_val; - - return res; + return clamp_u64_to_fixed16(interm_val); } static inline uint32_t div_round_up_u32_fixed16(uint32_t val, @@ -229,12 +228,9 @@ static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val, uint_fixed_16_16_t mul) { uint64_t intermediate_val; - uint_fixed_16_16_t fp; intermediate_val = (uint64_t) val * mul.val; - WARN_ON(intermediate_val >> 32); - fp.val = (uint32_t) intermediate_val; - return fp; + return clamp_u64_to_fixed16(intermediate_val); } static inline const char *yesno(bool v) From eed02a7b53131abb796ba8a8cf2886cee366a89f Mon Sep 17 00:00:00 2001 From: "Kumar, Mahesh" Date: Wed, 5 Jul 2017 20:01:45 +0530 Subject: [PATCH 0230/1795] drm/i915: Always perform internal fixed16 division in 64 bits This patch combines fixed_16_16_div & fixed_16_16_div_u64 wrappers. And new fixed_16_16_div wrapper always performs division operation in u64 internally, to avoid any data loss which was happening in earlier version of wrapper. earlier wrapper was converting u32 to fixed16 in 32 bit so we were losing 16-MSB data. Signed-off-by: Mahesh Kumar Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/20170705143154.32132-3-mahesh1.kumar@intel.com [mlankhorst: Fix typo in commit message.] --- drivers/gpu/drm/i915/i915_drv.h | 9 --------- drivers/gpu/drm/i915/intel_pm.c | 4 ++-- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1b525051bf5f..95d5328a26e9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -196,15 +196,6 @@ static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, } static inline uint_fixed_16_16_t fixed_16_16_div(uint32_t val, uint32_t d) -{ - uint_fixed_16_16_t fp, res; - - fp = u32_to_fixed_16_16(val); - res.val = DIV_ROUND_UP(fp.val, d); - return res; -} - -static inline uint_fixed_16_16_t fixed_16_16_div_u64(uint32_t val, uint32_t d) { uint64_t interm_val; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6db833e6dcbd..05eabadaa23d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4276,7 +4276,7 @@ static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, return FP_16_16_MAX; wm_intermediate_val = latency * pixel_rate * cpp; - ret = fixed_16_16_div_u64(wm_intermediate_val, 1000 * 512); + ret = fixed_16_16_div(wm_intermediate_val, 1000 * 512); return ret; } @@ -4314,7 +4314,7 @@ intel_get_linetime_us(struct intel_crtc_state *cstate) return u32_to_fixed_16_16(0); crtc_htotal = cstate->base.adjusted_mode.crtc_htotal; - linetime_us = fixed_16_16_div_u64(crtc_htotal * 1000, pixel_rate); + linetime_us = fixed_16_16_div(crtc_htotal * 1000, pixel_rate); return linetime_us; } From eac2cb81fb87223198c2be93bfd49357d71be669 Mon Sep 17 00:00:00 2001 From: "Kumar, Mahesh" Date: Wed, 5 Jul 2017 20:01:46 +0530 Subject: [PATCH 0231/1795] drm/i915: cleanup fixed-point wrappers naming This patch make naming of fixed-point wrappers consistent operation__<1st operand>_<2nd operand> also shorten the name for fixed_16_16 to fixed16 s/u32_to_fixed_16_16/u32_to_fixed16 s/fixed_16_16_to_u32/fixed16_to_u32 s/fixed_16_16_to_u32_round_up/fixed16_to_u32_round_up s/min_fixed_16_16/min_fixed16 s/max_fixed_16_16/max_fixed16 s/mul_u32_fixed_16_16/mul_u32_fixed16 s/fixed_16_16_div/div_fixed16 Changes Since V1: - Split the patch in more logical patches (Maarten) Changes Since V2: - Rebase Signed-off-by: Mahesh Kumar Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/20170705143154.32132-4-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 14 ++++---- drivers/gpu/drm/i915/intel_pm.c | 63 ++++++++++++++++----------------- 2 files changed, 38 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 95d5328a26e9..1fc25bd5c904 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -122,7 +122,7 @@ static inline bool is_fixed16_zero(uint_fixed_16_16_t val) return false; } -static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val) +static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val) { uint_fixed_16_16_t fp; @@ -132,17 +132,17 @@ static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val) return fp; } -static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp) +static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp) { return DIV_ROUND_UP(fp.val, 1 << 16); } -static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp) +static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp) { return fp.val >> 16; } -static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1, +static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1, uint_fixed_16_16_t min2) { uint_fixed_16_16_t min; @@ -151,7 +151,7 @@ static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1, return min; } -static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1, +static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1, uint_fixed_16_16_t max2) { uint_fixed_16_16_t max; @@ -195,7 +195,7 @@ static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, return clamp_u64_to_fixed16(intermediate_val); } -static inline uint_fixed_16_16_t fixed_16_16_div(uint32_t val, uint32_t d) +static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d) { uint64_t interm_val; @@ -215,7 +215,7 @@ static inline uint32_t div_round_up_u32_fixed16(uint32_t val, return clamp_t(uint32_t, interm_val, 0, ~0); } -static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val, +static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val, uint_fixed_16_16_t mul) { uint64_t intermediate_val; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 05eabadaa23d..2603df15b4e1 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3837,7 +3837,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate, uint_fixed_16_16_t downscale_h, downscale_w; if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) - return u32_to_fixed_16_16(0); + return u32_to_fixed16(0); /* n.b., src is 16.16 fixed point, dst is whole integer */ if (plane->id == PLANE_CURSOR) { @@ -3861,10 +3861,10 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate, dst_h = drm_rect_height(&pstate->base.dst); } - fp_w_ratio = fixed_16_16_div(src_w, dst_w); - fp_h_ratio = fixed_16_16_div(src_h, dst_h); - downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1)); - downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1)); + fp_w_ratio = div_fixed16(src_w, dst_w); + fp_h_ratio = div_fixed16(src_h, dst_h); + downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1)); + downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1)); return mul_fixed16(downscale_w, downscale_h); } @@ -3872,7 +3872,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate, static uint_fixed_16_16_t skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state) { - uint_fixed_16_16_t pipe_downscale = u32_to_fixed_16_16(1); + uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1); if (!crtc_state->base.enable) return pipe_downscale; @@ -3891,10 +3891,10 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state) if (!dst_w || !dst_h) return pipe_downscale; - fp_w_ratio = fixed_16_16_div(src_w, dst_w); - fp_h_ratio = fixed_16_16_div(src_h, dst_h); - downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1)); - downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1)); + fp_w_ratio = div_fixed16(src_w, dst_w); + fp_h_ratio = div_fixed16(src_h, dst_h); + downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1)); + downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1)); pipe_downscale = mul_fixed16(downscale_w, downscale_h); } @@ -3913,14 +3913,14 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, int crtc_clock, dotclk; uint32_t pipe_max_pixel_rate; uint_fixed_16_16_t pipe_downscale; - uint_fixed_16_16_t max_downscale = u32_to_fixed_16_16(1); + uint_fixed_16_16_t max_downscale = u32_to_fixed16(1); if (!cstate->base.enable) return 0; drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { uint_fixed_16_16_t plane_downscale; - uint_fixed_16_16_t fp_9_div_8 = fixed_16_16_div(9, 8); + uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8); int bpp; if (!intel_wm_plane_visible(cstate, @@ -3938,7 +3938,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, plane_downscale = mul_fixed16(plane_downscale, fp_9_div_8); - max_downscale = max_fixed_16_16(plane_downscale, max_downscale); + max_downscale = max_fixed16(plane_downscale, max_downscale); } pipe_downscale = skl_pipe_downscale_amount(cstate); @@ -4276,7 +4276,7 @@ static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, return FP_16_16_MAX; wm_intermediate_val = latency * pixel_rate * cpp; - ret = fixed_16_16_div(wm_intermediate_val, 1000 * 512); + ret = div_fixed16(wm_intermediate_val, 1000 * 512); return ret; } @@ -4294,7 +4294,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate, wm_intermediate_val = latency * pixel_rate; wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000); - ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line); + ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line); return ret; } @@ -4306,15 +4306,15 @@ intel_get_linetime_us(struct intel_crtc_state *cstate) uint_fixed_16_16_t linetime_us; if (!cstate->base.active) - return u32_to_fixed_16_16(0); + return u32_to_fixed16(0); pixel_rate = cstate->pixel_rate; if (WARN_ON(pixel_rate == 0)) - return u32_to_fixed_16_16(0); + return u32_to_fixed16(0); crtc_htotal = cstate->base.adjusted_mode.crtc_htotal; - linetime_us = fixed_16_16_div(crtc_htotal * 1000, pixel_rate); + linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); return linetime_us; } @@ -4434,14 +4434,14 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if (y_tiled) { interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512); - plane_blocks_per_line = fixed_16_16_div(interm_pbpl, + plane_blocks_per_line = div_fixed16(interm_pbpl, y_min_scanlines); } else if (x_tiled) { interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512); - plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl); + plane_blocks_per_line = u32_to_fixed16(interm_pbpl); } else { interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; - plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl); + plane_blocks_per_line = u32_to_fixed16(interm_pbpl); } method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); @@ -4450,35 +4450,35 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, latency, plane_blocks_per_line); - y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines, - plane_blocks_per_line); + y_tile_minimum = mul_u32_fixed16(y_min_scanlines, + plane_blocks_per_line); if (y_tiled) { - selected_result = max_fixed_16_16(method2, y_tile_minimum); + selected_result = max_fixed16(method2, y_tile_minimum); } else { uint32_t linetime_us; - linetime_us = fixed_16_16_to_u32_round_up( + linetime_us = fixed16_to_u32_round_up( intel_get_linetime_us(cstate)); if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && (plane_bytes_per_line / 512 < 1)) selected_result = method2; else if ((ddb_allocation && ddb_allocation / - fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1) - selected_result = min_fixed_16_16(method1, method2); + fixed16_to_u32_round_up(plane_blocks_per_line)) >= 1) + selected_result = min_fixed16(method1, method2); else if (latency >= linetime_us) - selected_result = min_fixed_16_16(method1, method2); + selected_result = min_fixed16(method1, method2); else selected_result = method1; } - res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1; + res_blocks = fixed16_to_u32_round_up(selected_result) + 1; res_lines = div_round_up_fixed16(selected_result, plane_blocks_per_line); if (level >= 1 && level <= 7) { if (y_tiled) { - res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum); + res_blocks += fixed16_to_u32_round_up(y_tile_minimum); res_lines += y_min_scanlines; } else { res_blocks++; @@ -4563,8 +4563,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate) if (is_fixed16_zero(linetime_us)) return 0; - linetime_wm = fixed_16_16_to_u32_round_up(mul_u32_fixed_16_16(8, - linetime_us)); + linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us)); /* Display WA #1135: bxt. */ if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled) From 6ea593c029d8f7c847fd68392eeb5a284ee96bd7 Mon Sep 17 00:00:00 2001 From: "Kumar, Mahesh" Date: Wed, 5 Jul 2017 20:01:47 +0530 Subject: [PATCH 0232/1795] drm/i915: Addition wrapper for fixed16.16 operation This patch introduce addition wrapper for fixed point 16.16 operations. Which will be used by later patches to avoid direct member variables access of fixed_16_16_t structure. add_fixed16 : takes 2 fixed_16_16_t variable & returns fixed_16_16_t add_fixed16_u32 : takes fixed_16_16_t & u32 variable & returns fixed_16_16_t Signed-off-by: Mahesh Kumar Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/20170705143154.32132-5-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1fc25bd5c904..b4716ce32ca2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -224,6 +224,25 @@ static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val, return clamp_u64_to_fixed16(intermediate_val); } +static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1, + uint_fixed_16_16_t add2) +{ + uint64_t interm_sum; + + interm_sum = (uint64_t) add1.val + add2.val; + return clamp_u64_to_fixed16(interm_sum); +} + +static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1, + uint32_t add2) +{ + uint64_t interm_sum; + uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2); + + interm_sum = (uint64_t) add1.val + interm_add2.val; + return clamp_u64_to_fixed16(interm_sum); +} + static inline const char *yesno(bool v) { return v ? "yes" : "no"; From 129eaa957dd5a717edd70cfaf0626c143c03e54e Mon Sep 17 00:00:00 2001 From: "Kumar, Mahesh" Date: Wed, 5 Jul 2017 20:01:48 +0530 Subject: [PATCH 0233/1795] drm/i915/skl+: WM calculation don't require height height of plane was require to swap width/height in case of 90/270 rotation. Now src structure contains already swapped values, So we don't have to calculate height of the plane. Signed-off-by: Mahesh Kumar Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/20170705143154.32132-6-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2603df15b4e1..81e77f073d8c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4361,7 +4361,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint32_t plane_bytes_per_line; uint32_t res_blocks, res_lines; uint8_t cpp; - uint32_t width = 0, height = 0; + uint32_t width = 0; uint32_t plane_pixel_rate; uint_fixed_16_16_t y_tile_minimum; uint32_t y_min_scanlines; @@ -4390,7 +4390,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if (plane->id == PLANE_CURSOR) { width = intel_pstate->base.crtc_w; - height = intel_pstate->base.crtc_h; } else { /* * Src coordinates are already rotated by 270 degrees for @@ -4398,7 +4397,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, * GTT mapping), hence no need to account for rotation here. */ width = drm_rect_width(&intel_pstate->base.src) >> 16; - height = drm_rect_height(&intel_pstate->base.src) >> 16; } cpp = fb->format->cpp[0]; From b064be0784530d2a98b589b40793e3d421fb93ba Mon Sep 17 00:00:00 2001 From: "Kumar, Mahesh" Date: Wed, 5 Jul 2017 20:01:49 +0530 Subject: [PATCH 0234/1795] drm/i915/skl+: unify cpp value in WM calculation use same cpp value in different phase of plane WM caluclation. Signed-off-by: Mahesh Kumar Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/20170705143154.32132-7-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 81e77f073d8c..ee2a349cfe68 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4399,13 +4399,11 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, width = drm_rect_width(&intel_pstate->base.src) >> 16; } - cpp = fb->format->cpp[0]; + cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] : + fb->format->cpp[0]; plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); if (drm_rotation_90_or_270(pstate->rotation)) { - int cpp = (fb->format->format == DRM_FORMAT_NV12) ? - fb->format->cpp[1] : - fb->format->cpp[0]; switch (cpp) { case 1: From 56a91c4932bd038f3d1f6555ddc349ca4e6933b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 12 Jul 2017 18:51:00 +0300 Subject: [PATCH 0235/1795] drm/dp/mst: Handle errors from drm_atomic_get_private_obj_state() correctly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On failure drm_atomic_get_private_obj_state() returns and error pointer instead of NULL. Adjust the checks in the callers to match. Cc: stable@vger.kernel.org Cc: Dhinakaran Pandiyan Cc: Harry Wentland Cc: Maarten Lankhorst Fixes: edb1ed1ab7d3 ("drm/dp: Add DP MST helpers to atomically find and release vcpi slots") Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170712155102.26276-1-ville.syrjala@linux.intel.com Reviewed-by: Dhinakaran Pandiyan --- drivers/gpu/drm/drm_dp_mst_topology.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index bfd237c15e76..18cecd94acb6 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2515,8 +2515,8 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, int req_slots; topology_state = drm_atomic_get_mst_topology_state(state, mgr); - if (topology_state == NULL) - return -ENOMEM; + if (IS_ERR(topology_state)) + return PTR_ERR(topology_state); port = drm_dp_get_validated_port_ref(mgr, port); if (port == NULL) @@ -2555,8 +2555,8 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_state *topology_state; topology_state = drm_atomic_get_mst_topology_state(state, mgr); - if (topology_state == NULL) - return -ENOMEM; + if (IS_ERR(topology_state)) + return PTR_ERR(topology_state); /* We cannot rely on port->vcpi.num_slots to update * topology_state->avail_slots as the port may not exist if the parent From 178e32c224d2772d3862828dc6f81e4d8953b2f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 12 Jul 2017 18:51:01 +0300 Subject: [PATCH 0236/1795] drm/atomic: Remove pointless private object NULL state check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We will never add private objects with a NULL state into the atomic state, hence checking for that is pointless. Cc: Dhinakaran Pandiyan Reviewed-by: Daniel Vetter Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170712155102.26276-2-ville.syrjala@linux.intel.com --- drivers/gpu/drm/drm_atomic.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 09ca662fcd35..f0482247b31f 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1013,8 +1013,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state, void *obj, struct __drm_private_objs_state *arr; for (i = 0; i < state->num_private_objs; i++) - if (obj == state->private_objs[i].obj && - state->private_objs[i].obj_state) + if (obj == state->private_objs[i].obj) return state->private_objs[i].obj_state; num_objs = state->num_private_objs + 1; From a4370c777406c2810e37fafd166ccddecdb2a60c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 12 Jul 2017 18:51:02 +0300 Subject: [PATCH 0237/1795] drm/atomic: Make private objs proper objects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the atomic private object stuff less special by introducing proper base classes for the object and its state. Drivers can embed these in their own appropriate objects, after which these things will work exactly like the plane/crtc/connector states during atomic operations. v2: Reorder to not depend on drm_dynarray (Daniel) Cc: Dhinakaran Pandiyan Cc: Daniel Vetter Reviewed-by: Daniel Vetter #v1 Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170712155102.26276-3-ville.syrjala@linux.intel.com --- drivers/gpu/drm/drm_atomic.c | 78 +++++++++++++---- drivers/gpu/drm/drm_atomic_helper.c | 30 ++++++- drivers/gpu/drm/drm_dp_mst_topology.c | 63 +++++++------- include/drm/drm_atomic.h | 120 ++++++++++++++++---------- include/drm/drm_atomic_helper.h | 4 + include/drm/drm_dp_mst_helper.h | 10 +++ 6 files changed, 203 insertions(+), 102 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f0482247b31f..b59fd33c5786 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -187,12 +187,15 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) } for (i = 0; i < state->num_private_objs; i++) { - void *obj_state = state->private_objs[i].obj_state; + struct drm_private_obj *obj = state->private_objs[i].ptr; - state->private_objs[i].funcs->destroy_state(obj_state); - state->private_objs[i].obj = NULL; - state->private_objs[i].obj_state = NULL; - state->private_objs[i].funcs = NULL; + if (!obj) + continue; + + obj->funcs->atomic_destroy_state(obj, + state->private_objs[i].state); + state->private_objs[i].ptr = NULL; + state->private_objs[i].state = NULL; } state->num_private_objs = 0; @@ -989,12 +992,45 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, plane->funcs->atomic_print_state(p, state); } +/** + * drm_atomic_private_obj_init - initialize private object + * @obj: private object + * @state: initial private object state + * @funcs: pointer to the struct of function pointers that identify the object + * type + * + * Initialize the private object, which can be embedded into any + * driver private object that needs its own atomic state. + */ +void +drm_atomic_private_obj_init(struct drm_private_obj *obj, + struct drm_private_state *state, + const struct drm_private_state_funcs *funcs) +{ + memset(obj, 0, sizeof(*obj)); + + obj->state = state; + obj->funcs = funcs; +} +EXPORT_SYMBOL(drm_atomic_private_obj_init); + +/** + * drm_atomic_private_obj_fini - finalize private object + * @obj: private object + * + * Finalize the private object. + */ +void +drm_atomic_private_obj_fini(struct drm_private_obj *obj) +{ + obj->funcs->atomic_destroy_state(obj, obj->state); +} +EXPORT_SYMBOL(drm_atomic_private_obj_fini); + /** * drm_atomic_get_private_obj_state - get private object state * @state: global atomic state * @obj: private object to get the state for - * @funcs: pointer to the struct of function pointers that identify the object - * type * * This function returns the private object state for the given private object, * allocating the state if needed. It does not grab any locks as the caller is @@ -1004,17 +1040,18 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, * * Either the allocated state or the error code encoded into a pointer. */ -void * -drm_atomic_get_private_obj_state(struct drm_atomic_state *state, void *obj, - const struct drm_private_state_funcs *funcs) +struct drm_private_state * +drm_atomic_get_private_obj_state(struct drm_atomic_state *state, + struct drm_private_obj *obj) { int index, num_objs, i; size_t size; struct __drm_private_objs_state *arr; + struct drm_private_state *obj_state; for (i = 0; i < state->num_private_objs; i++) - if (obj == state->private_objs[i].obj) - return state->private_objs[i].obj_state; + if (obj == state->private_objs[i].ptr) + return state->private_objs[i].state; num_objs = state->num_private_objs + 1; size = sizeof(*state->private_objs) * num_objs; @@ -1026,18 +1063,21 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state, void *obj, index = state->num_private_objs; memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); - state->private_objs[index].obj_state = funcs->duplicate_state(state, obj); - if (!state->private_objs[index].obj_state) + obj_state = obj->funcs->atomic_duplicate_state(obj); + if (!obj_state) return ERR_PTR(-ENOMEM); - state->private_objs[index].obj = obj; - state->private_objs[index].funcs = funcs; + state->private_objs[index].state = obj_state; + state->private_objs[index].old_state = obj->state; + state->private_objs[index].new_state = obj_state; + state->private_objs[index].ptr = obj; + state->num_private_objs = num_objs; - DRM_DEBUG_ATOMIC("Added new private object state %p to %p\n", - state->private_objs[index].obj_state, state); + DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", + obj, obj_state, state); - return state->private_objs[index].obj_state; + return obj_state; } EXPORT_SYMBOL(drm_atomic_get_private_obj_state); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 0fed20692df4..fa64b31ae579 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2267,8 +2267,8 @@ void drm_atomic_helper_swap_state(struct drm_atomic_state *state, struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; struct drm_crtc_commit *commit; - void *obj, *obj_state; - const struct drm_private_state_funcs *funcs; + struct drm_private_obj *obj; + struct drm_private_state *old_obj_state, *new_obj_state; if (stall) { for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { @@ -2330,8 +2330,15 @@ void drm_atomic_helper_swap_state(struct drm_atomic_state *state, plane->state = new_plane_state; } - __for_each_private_obj(state, obj, obj_state, i, funcs) - funcs->swap_state(obj, &state->private_objs[i].obj_state); + for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) { + WARN_ON(obj->state != old_obj_state); + + old_obj_state->state = state; + new_obj_state->state = NULL; + + state->private_objs[i].state = old_obj_state; + obj->state = new_obj_state; + } } EXPORT_SYMBOL(drm_atomic_helper_swap_state); @@ -3828,3 +3835,18 @@ fail: return ret; } EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set); + +/** + * __drm_atomic_helper_private_duplicate_state - copy atomic private state + * @obj: CRTC object + * @state: new private object state + * + * Copies atomic state from a private objects's current state and resets inferred values. + * This is useful for drivers that subclass the private state. + */ +void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + memcpy(state, obj->state, sizeof(*state)); +} +EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 18cecd94acb6..552e71d5aa5f 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -31,6 +31,8 @@ #include #include +#include +#include /** * DOC: dp mst helper @@ -2992,41 +2994,32 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) (*mgr->cbs->hotplug)(mgr); } -void *drm_dp_mst_duplicate_state(struct drm_atomic_state *state, void *obj) +static struct drm_private_state * +drm_dp_mst_duplicate_state(struct drm_private_obj *obj) { - struct drm_dp_mst_topology_mgr *mgr = obj; - struct drm_dp_mst_topology_state *new_mst_state; + struct drm_dp_mst_topology_state *state; - if (WARN_ON(!mgr->state)) + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) return NULL; - new_mst_state = kmemdup(mgr->state, sizeof(*new_mst_state), GFP_KERNEL); - if (new_mst_state) - new_mst_state->state = state; - return new_mst_state; + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; } -void drm_dp_mst_swap_state(void *obj, void **obj_state_ptr) +static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) { - struct drm_dp_mst_topology_mgr *mgr = obj; - struct drm_dp_mst_topology_state **topology_state_ptr; + struct drm_dp_mst_topology_state *mst_state = + to_dp_mst_topology_state(state); - topology_state_ptr = (struct drm_dp_mst_topology_state **)obj_state_ptr; - - mgr->state->state = (*topology_state_ptr)->state; - swap(*topology_state_ptr, mgr->state); - mgr->state->state = NULL; -} - -void drm_dp_mst_destroy_state(void *obj_state) -{ - kfree(obj_state); + kfree(mst_state); } static const struct drm_private_state_funcs mst_state_funcs = { - .duplicate_state = drm_dp_mst_duplicate_state, - .swap_state = drm_dp_mst_swap_state, - .destroy_state = drm_dp_mst_destroy_state, + .atomic_duplicate_state = drm_dp_mst_duplicate_state, + .atomic_destroy_state = drm_dp_mst_destroy_state, }; /** @@ -3050,8 +3043,7 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a struct drm_device *dev = mgr->dev; WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); - return drm_atomic_get_private_obj_state(state, mgr, - &mst_state_funcs); + return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); } EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); @@ -3071,6 +3063,8 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id) { + struct drm_dp_mst_topology_state *mst_state; + mutex_init(&mgr->lock); mutex_init(&mgr->qlock); mutex_init(&mgr->payload_lock); @@ -3099,14 +3093,18 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, if (test_calc_pbn_mode() < 0) DRM_ERROR("MST PBN self-test failed\n"); - mgr->state = kzalloc(sizeof(*mgr->state), GFP_KERNEL); - if (mgr->state == NULL) + mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL); + if (mst_state == NULL) return -ENOMEM; - mgr->state->mgr = mgr; + + mst_state->mgr = mgr; /* max. time slots - one slot for MTP header */ - mgr->state->avail_slots = 63; - mgr->funcs = &mst_state_funcs; + mst_state->avail_slots = 63; + + drm_atomic_private_obj_init(&mgr->base, + &mst_state->base, + &mst_state_funcs); return 0; } @@ -3128,8 +3126,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) mutex_unlock(&mgr->payload_lock); mgr->dev = NULL; mgr->aux = NULL; - kfree(mgr->state); - mgr->state = NULL; + drm_atomic_private_obj_fini(&mgr->base); mgr->funcs = NULL; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index dcc8e0cdb7ff..7cd0f303f5a3 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -154,6 +154,9 @@ struct __drm_connnectors_state { struct drm_connector_state *state, *old_state, *new_state; }; +struct drm_private_obj; +struct drm_private_state; + /** * struct drm_private_state_funcs - atomic state functions for private objects * @@ -166,7 +169,7 @@ struct __drm_connnectors_state { */ struct drm_private_state_funcs { /** - * @duplicate_state: + * @atomic_duplicate_state: * * Duplicate the current state of the private object and return it. It * is an error to call this before obj->state has been initialized. @@ -176,29 +179,30 @@ struct drm_private_state_funcs { * Duplicated atomic state or NULL when obj->state is not * initialized or allocation failed. */ - void *(*duplicate_state)(struct drm_atomic_state *state, void *obj); + struct drm_private_state *(*atomic_duplicate_state)(struct drm_private_obj *obj); /** - * @swap_state: + * @atomic_destroy_state: * - * This function swaps the existing state of a private object @obj with - * it's newly created state, the pointer to which is passed as - * @obj_state_ptr. + * Frees the private object state created with @atomic_duplicate_state. */ - void (*swap_state)(void *obj, void **obj_state_ptr); + void (*atomic_destroy_state)(struct drm_private_obj *obj, + struct drm_private_state *state); +}; - /** - * @destroy_state: - * - * Frees the private object state created with @duplicate_state. - */ - void (*destroy_state)(void *obj_state); +struct drm_private_obj { + struct drm_private_state *state; + + const struct drm_private_state_funcs *funcs; +}; + +struct drm_private_state { + struct drm_atomic_state *state; }; struct __drm_private_objs_state { - void *obj; - void *obj_state; - const struct drm_private_state_funcs *funcs; + struct drm_private_obj *ptr; + struct drm_private_state *state, *old_state, *new_state; }; /** @@ -321,10 +325,14 @@ int drm_atomic_connector_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, uint64_t val); -void * __must_check +void drm_atomic_private_obj_init(struct drm_private_obj *obj, + struct drm_private_state *state, + const struct drm_private_state_funcs *funcs); +void drm_atomic_private_obj_fini(struct drm_private_obj *obj); + +struct drm_private_state * __must_check drm_atomic_get_private_obj_state(struct drm_atomic_state *state, - void *obj, - const struct drm_private_state_funcs *funcs); + struct drm_private_obj *obj); /** * drm_atomic_get_existing_crtc_state - get crtc state, if it exists @@ -811,43 +819,63 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); for_each_if (plane) /** - * __for_each_private_obj - iterate over all private objects + * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update * @__state: &struct drm_atomic_state pointer - * @obj: private object iteration cursor - * @obj_state: private object state iteration cursor + * @obj: &struct drm_private_obj iteration cursor + * @old_obj_state: &struct drm_private_state iteration cursor for the old state + * @new_obj_state: &struct drm_private_state iteration cursor for the new state * @__i: int iteration cursor, for macro-internal use - * @__funcs: &struct drm_private_state_funcs iteration cursor * - * This macro iterates over the array containing private object data in atomic - * state + * This iterates over all private objects in an atomic update, tracking both + * old and new state. This is useful in places where the state delta needs + * to be considered, for example in atomic check functions. */ -#define __for_each_private_obj(__state, obj, obj_state, __i, __funcs) \ - for ((__i) = 0; \ - (__i) < (__state)->num_private_objs && \ - ((obj) = (__state)->private_objs[__i].obj, \ - (__funcs) = (__state)->private_objs[__i].funcs, \ - (obj_state) = (__state)->private_objs[__i].obj_state, \ - 1); \ - (__i)++) \ +#define for_each_oldnew_private_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (old_obj_state) = (__state)->private_objs[__i].old_state, \ + (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ + (__i)++) \ + for_each_if (obj) /** - * for_each_private_obj - iterate over a specify type of private object + * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update * @__state: &struct drm_atomic_state pointer - * @obj_funcs: &struct drm_private_state_funcs function table to filter - * private objects - * @obj: private object iteration cursor - * @obj_state: private object state iteration cursor + * @obj: &struct drm_private_obj iteration cursor + * @old_obj_state: &struct drm_private_state iteration cursor for the old state * @__i: int iteration cursor, for macro-internal use - * @__funcs: &struct drm_private_state_funcs iteration cursor * - * This macro iterates over the private objects state array while filtering the - * objects based on the vfunc table that is passed as @obj_funcs. New macros - * can be created by passing in the vfunc table associated with a specific - * private object. + * This iterates over all private objects in an atomic update, tracking only + * the old state. This is useful in disable functions, where we need the old + * state the hardware is still in. */ -#define for_each_private_obj(__state, obj_funcs, obj, obj_state, __i, __funcs) \ - __for_each_private_obj(__state, obj, obj_state, __i, __funcs) \ - for_each_if (__funcs == obj_funcs) +#define for_each_old_private_obj_in_state(__state, obj, old_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \ + (__i)++) \ + for_each_if (obj) + +/** + * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update + * @__state: &struct drm_atomic_state pointer + * @obj: &struct drm_private_obj iteration cursor + * @new_obj_state: &struct drm_private_state iteration cursor for the new state + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all private objects in an atomic update, tracking only + * the new state. This is useful in enable functions, where we need the new state the + * hardware should be in when the atomic commit operation has completed. + */ +#define for_each_new_private_obj_in_state(__state, obj, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs && \ + ((obj) = (__state)->private_objs[__i].ptr, \ + (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ + (__i)++) \ + for_each_if (obj) /** * drm_atomic_crtc_needs_modeset - compute combined modeset need diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index dd196cc0afd7..7db3438ff735 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -33,6 +33,8 @@ #include struct drm_atomic_state; +struct drm_private_obj; +struct drm_private_state; int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state); @@ -185,6 +187,8 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size, struct drm_modeset_acquire_ctx *ctx); +void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, + struct drm_private_state *state); /** * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 177ab6f86855..d55abb75f29a 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -404,12 +404,17 @@ struct drm_dp_payload { int vcpi; }; +#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) + struct drm_dp_mst_topology_state { + struct drm_private_state base; int avail_slots; struct drm_atomic_state *state; struct drm_dp_mst_topology_mgr *mgr; }; +#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) + /** * struct drm_dp_mst_topology_mgr - DisplayPort MST manager * @@ -418,6 +423,11 @@ struct drm_dp_mst_topology_state { * on the GPU. */ struct drm_dp_mst_topology_mgr { + /** + * @base: Base private object for atomic + */ + struct drm_private_obj base; + /** * @dev: device pointer for adding i2c devices etc. */ From e38e12895022d7148220fc35782b5bd6461f1736 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 12 Jul 2017 18:52:54 +0300 Subject: [PATCH 0238/1795] drm/dp/mst: Use memchr_inv() instead of memcmp() against a zeroed array MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have memch_inv(), so no need to memcmp() against a zeroed temp array. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20170712155254.26455-1-ville.syrjala@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/drm_dp_mst_topology.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 552e71d5aa5f..f7e292bf2baf 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1337,15 +1337,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, u8 *guid) { - static u8 zero_guid[16]; + u64 salt; - if (!memcmp(guid, zero_guid, 16)) { - u64 salt = get_jiffies_64(); - memcpy(&guid[0], &salt, sizeof(u64)); - memcpy(&guid[8], &salt, sizeof(u64)); - return false; - } - return true; + if (memchr_inv(guid, 0, 16)) + return true; + + salt = get_jiffies_64(); + + memcpy(&guid[0], &salt, sizeof(u64)); + memcpy(&guid[8], &salt, sizeof(u64)); + + return false; } #if 0 From 88be58be886f1215cc73dc8c273c985eecd7385c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 6 Jul 2017 15:00:19 +0200 Subject: [PATCH 0239/1795] drm/i915/fbdev: Always forward hotplug events With deferred fbdev setup we always need to forward hotplug events, even if fbdev isn't fully set up yet. Otherwise the deferred setup will neer happen. Originally this check was added in commit c45eb4fed12d278d3619f1904885bd0d7bcbf036 (tag: drm-intel-next-fixes-2016-08-05) Author: Chris Wilson Date: Wed Jul 13 18:34:45 2016 +0100 drm/i915/fbdev: Check for the framebuffer before use But the specific case of the hotplug function blowing up was fixed in commit 50c3dc970a09b3b60422a58934cc27a413288bab Author: Daniel Vetter Date: Fri Jun 27 17:19:22 2014 +0200 drm/fb-helper: Fix hpd vs. initial config races Cc: Maarten Lankhorst Cc: Mika Kuoppala Cc: Chris Wilson Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170706130023.28417-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/i915/intel_fbdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 0c4cde6b2e6f..ee1a5b937590 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -813,7 +813,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; - if (ifbdev && ifbdev->vma) + if (ifbdev) drm_fb_helper_hotplug_event(&ifbdev->helper); } From 346fb4e0b9660e2fe888f870608d287e1980f665 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 6 Jul 2017 15:00:20 +0200 Subject: [PATCH 0240/1795] drm/i915: Protect against deferred fbdev setup We could probably hit this already with our current async fbdev init, but it's much easier to hit this with the new deferred fbdev setup that I'm working on polishing. Cc: Maarten Lankhorst Reported-by: Maarten Lankhorst Reviewed-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170706130023.28417-2-daniel.vetter@ffwll.ch --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 620c9218d1c1..2ef75c1a6119 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1935,7 +1935,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) return ret; #ifdef CONFIG_DRM_FBDEV_EMULATION - if (dev_priv->fbdev) { + if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", From 5f057ffd6da26c939dff976a2acc76233bbe4467 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Thu, 13 Jul 2017 18:25:25 +0200 Subject: [PATCH 0241/1795] drm: rename, adjust and export drm_atomic_replace_property_blob The function has little to do with atomic, it's just where it has so far been needed. So, rename it to drm_property_replace_blob, move it to drm_property.c and export it. Change the semantics to return whether the blob was replaced instead of using an extra argument for that. Signed-off-by: Peter Rosin Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170713162538.22788-2-peda@axentia.se --- drivers/gpu/drm/drm_atomic.c | 30 +----------------------------- drivers/gpu/drm/drm_property.c | 23 +++++++++++++++++++++++ include/drm/drm_property.h | 2 ++ 3 files changed, 26 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index b59fd33c5786..01192dd3ed79 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -411,34 +411,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, } EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); -/** - * drm_atomic_replace_property_blob - replace a blob property - * @blob: a pointer to the member blob to be replaced - * @new_blob: the new blob to replace with - * @replaced: whether the blob has been replaced - * - * RETURNS: - * Zero on success, error code on failure - */ -static void -drm_atomic_replace_property_blob(struct drm_property_blob **blob, - struct drm_property_blob *new_blob, - bool *replaced) -{ - struct drm_property_blob *old_blob = *blob; - - if (old_blob == new_blob) - return; - - drm_property_blob_put(old_blob); - if (new_blob) - drm_property_blob_get(new_blob); - *blob = new_blob; - *replaced = true; - - return; -} - static int drm_atomic_replace_property_blob_from_id(struct drm_device *dev, struct drm_property_blob **blob, @@ -459,7 +431,7 @@ drm_atomic_replace_property_blob_from_id(struct drm_device *dev, } } - drm_atomic_replace_property_blob(blob, new_blob, replaced); + *replaced |= drm_property_replace_blob(blob, new_blob); drm_property_blob_put(new_blob); return 0; diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 3e88fa24eab3..bc5128203056 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -709,6 +709,29 @@ err_created: } EXPORT_SYMBOL(drm_property_replace_global_blob); +/** + * drm_property_replace_blob - replace a blob property + * @blob: a pointer to the member blob to be replaced + * @new_blob: the new blob to replace with + * + * Return: true if the blob was in fact replaced. + */ +bool drm_property_replace_blob(struct drm_property_blob **blob, + struct drm_property_blob *new_blob) +{ + struct drm_property_blob *old_blob = *blob; + + if (old_blob == new_blob) + return false; + + drm_property_blob_put(old_blob); + if (new_blob) + drm_property_blob_get(new_blob); + *blob = new_blob; + return true; +} +EXPORT_SYMBOL(drm_property_replace_blob); + int drm_mode_getblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index 619868dc08d8..37355c623e6c 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -273,6 +273,8 @@ int drm_property_replace_global_blob(struct drm_device *dev, const void *data, struct drm_mode_object *obj_holds_id, struct drm_property *prop_holds_id); +bool drm_property_replace_blob(struct drm_property_blob **blob, + struct drm_property_blob *new_blob); struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob); void drm_property_blob_put(struct drm_property_blob *blob); From e2b9dd304aed9e1415d4e001b81a10e2a6aeef7b Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Thu, 13 Jul 2017 18:25:26 +0200 Subject: [PATCH 0242/1795] drm/atomic-helper: update lut props directly in ..._legacy_gamma_set Do not waste cycles looking up the property id when we have the actual property already. Signed-off-by: Peter Rosin Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170713162538.22788-3-peda@axentia.se --- drivers/gpu/drm/drm_atomic_helper.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index fa64b31ae579..2f675112e225 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3776,12 +3776,12 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) { struct drm_device *dev = crtc->dev; - struct drm_mode_config *config = &dev->mode_config; struct drm_atomic_state *state; struct drm_crtc_state *crtc_state; struct drm_property_blob *blob = NULL; struct drm_color_lut *blob_data; int i, ret = 0; + bool replaced; state = drm_atomic_state_alloc(crtc->dev); if (!state) @@ -3812,20 +3812,10 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, } /* Reset DEGAMMA_LUT and CTM properties. */ - ret = drm_atomic_crtc_set_property(crtc, crtc_state, - config->degamma_lut_property, 0); - if (ret) - goto fail; - - ret = drm_atomic_crtc_set_property(crtc, crtc_state, - config->ctm_property, 0); - if (ret) - goto fail; - - ret = drm_atomic_crtc_set_property(crtc, crtc_state, - config->gamma_lut_property, blob->base.id); - if (ret) - goto fail; + replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); + replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); + replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob); + crtc_state->color_mgmt_changed |= replaced; ret = drm_atomic_commit(state); From 964c60063bff6ae7631eb4d9c0bac3913749ca9f Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Thu, 13 Jul 2017 18:25:27 +0200 Subject: [PATCH 0243/1795] drm/fb-helper: separate the fb_setcmap helper into atomic and legacy paths The legacy path implements setcmap in terms of crtc .gamma_set. The atomic path implements setcmap by directly updating the crtc gamma_lut property. This has a couple of benefits: - it makes the redundant fb helpers .load_lut, .gamma_set and .gamma_get completely obsolete. They are now unused and subject for removal. - atomic drivers that support clut modes get fbdev support for those from the drm core. This includes atmel-hlcdc, but perhaps others as well? Signed-off-by: Peter Rosin Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20170713162538.22788-4-peda@axentia.se --- drivers/gpu/drm/drm_fb_helper.c | 253 +++++++++++++++++++++----------- 1 file changed, 171 insertions(+), 82 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 721511da4de6..42090fe00ef9 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1195,27 +1195,6 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, } EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked); -static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, - u16 blue, u16 regno, struct fb_info *info) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_framebuffer *fb = fb_helper->fb; - - /* - * The driver really shouldn't advertise pseudo/directcolor - * visuals if it can't deal with the palette. - */ - if (WARN_ON(!fb_helper->funcs->gamma_set || - !fb_helper->funcs->gamma_get)) - return -EINVAL; - - WARN_ON(fb->format->cpp[0] != 1); - - fb_helper->funcs->gamma_set(crtc, red, green, blue, regno); - - return 0; -} - static int setcmap_pseudo_palette(struct fb_cmap *cmap, struct fb_info *info) { u32 *palette = (u32 *)info->pseudo_palette; @@ -1248,56 +1227,21 @@ static int setcmap_pseudo_palette(struct fb_cmap *cmap, struct fb_info *info) return 0; } -/** - * drm_fb_helper_setcmap - implementation for &fb_ops.fb_setcmap - * @cmap: cmap to set - * @info: fbdev registered by the helper - */ -int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) +static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; - const struct drm_crtc_helper_funcs *crtc_funcs; - u16 *red, *green, *blue, *transp; struct drm_crtc *crtc; u16 *r, *g, *b; - int i, j, rc = 0; - int start; - - if (oops_in_progress) - return -EBUSY; - - mutex_lock(&fb_helper->lock); - if (!drm_fb_helper_is_bound(fb_helper)) { - mutex_unlock(&fb_helper->lock); - return -EBUSY; - } - - drm_modeset_lock_all(dev); - if (info->fix.visual == FB_VISUAL_TRUECOLOR) { - rc = setcmap_pseudo_palette(cmap, info); - goto out; - } + int i, ret = 0; + drm_modeset_lock_all(fb_helper->dev); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; - crtc_funcs = crtc->helper_private; + if (!crtc->funcs->gamma_set || !crtc->gamma_size) + return -EINVAL; - red = cmap->red; - green = cmap->green; - blue = cmap->blue; - transp = cmap->transp; - start = cmap->start; - - if (!crtc->gamma_size) { - rc = -EINVAL; - goto out; - } - - if (cmap->start + cmap->len > crtc->gamma_size) { - rc = -EINVAL; - goto out; - } + if (cmap->start + cmap->len > crtc->gamma_size) + return -EINVAL; r = crtc->gamma_store; g = r + crtc->gamma_size; @@ -1307,27 +1251,172 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g)); memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b)); - for (j = 0; j < cmap->len; j++) { - u16 hred, hgreen, hblue, htransp = 0xffff; - - hred = *red++; - hgreen = *green++; - hblue = *blue++; - - if (transp) - htransp = *transp++; - - rc = setcolreg(crtc, hred, hgreen, hblue, start++, info); - if (rc) - goto out; - } - if (crtc_funcs->load_lut) - crtc_funcs->load_lut(crtc); + ret = crtc->funcs->gamma_set(crtc, r, g, b, + crtc->gamma_size, NULL); + if (ret) + return ret; } - out: - drm_modeset_unlock_all(dev); + drm_modeset_unlock_all(fb_helper->dev); + + return ret; +} + +static struct drm_property_blob *setcmap_new_gamma_lut(struct drm_crtc *crtc, + struct fb_cmap *cmap) +{ + struct drm_device *dev = crtc->dev; + struct drm_property_blob *gamma_lut; + struct drm_color_lut *lut; + int size = crtc->gamma_size; + int i; + + if (!size || cmap->start + cmap->len > size) + return ERR_PTR(-EINVAL); + + gamma_lut = drm_property_create_blob(dev, sizeof(*lut) * size, NULL); + if (IS_ERR(gamma_lut)) + return gamma_lut; + + lut = (struct drm_color_lut *)gamma_lut->data; + if (cmap->start || cmap->len != size) { + u16 *r = crtc->gamma_store; + u16 *g = r + crtc->gamma_size; + u16 *b = g + crtc->gamma_size; + + for (i = 0; i < cmap->start; i++) { + lut[i].red = r[i]; + lut[i].green = g[i]; + lut[i].blue = b[i]; + } + for (i = cmap->start + cmap->len; i < size; i++) { + lut[i].red = r[i]; + lut[i].green = g[i]; + lut[i].blue = b[i]; + } + } + + for (i = 0; i < cmap->len; i++) { + lut[cmap->start + i].red = cmap->red[i]; + lut[cmap->start + i].green = cmap->green[i]; + lut[cmap->start + i].blue = cmap->blue[i]; + } + + return gamma_lut; +} + +static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_device *dev = fb_helper->dev; + struct drm_property_blob *gamma_lut = NULL; + struct drm_modeset_acquire_ctx ctx; + struct drm_crtc_state *crtc_state; + struct drm_atomic_state *state; + struct drm_crtc *crtc; + u16 *r, *g, *b; + int i, ret = 0; + bool replaced; + + drm_modeset_acquire_init(&ctx, 0); + + state = drm_atomic_state_alloc(dev); + if (!state) { + ret = -ENOMEM; + goto out_ctx; + } + + state->acquire_ctx = &ctx; +retry: + for (i = 0; i < fb_helper->crtc_count; i++) { + crtc = fb_helper->crtc_info[i].mode_set.crtc; + + if (!gamma_lut) + gamma_lut = setcmap_new_gamma_lut(crtc, cmap); + if (IS_ERR(gamma_lut)) { + ret = PTR_ERR(gamma_lut); + gamma_lut = NULL; + goto out_state; + } + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto out_state; + } + + replaced = drm_property_replace_blob(&crtc_state->degamma_lut, + NULL); + replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); + replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, + gamma_lut); + crtc_state->color_mgmt_changed |= replaced; + } + + ret = drm_atomic_commit(state); + if (ret) + goto out_state; + + for (i = 0; i < fb_helper->crtc_count; i++) { + crtc = fb_helper->crtc_info[i].mode_set.crtc; + + r = crtc->gamma_store; + g = r + crtc->gamma_size; + b = g + crtc->gamma_size; + + memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r)); + memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g)); + memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b)); + } + +out_state: + if (ret == -EDEADLK) + goto backoff; + + drm_property_blob_put(gamma_lut); + drm_atomic_state_put(state); +out_ctx: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; + +backoff: + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; +} + +/** + * drm_fb_helper_setcmap - implementation for &fb_ops.fb_setcmap + * @cmap: cmap to set + * @info: fbdev registered by the helper + */ +int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + int ret; + + if (oops_in_progress) + return -EBUSY; + + mutex_lock(&fb_helper->lock); + + if (!drm_fb_helper_is_bound(fb_helper)) { + ret = -EBUSY; + goto out; + } + + if (info->fix.visual == FB_VISUAL_TRUECOLOR) + ret = setcmap_pseudo_palette(cmap, info); + else if (drm_drv_uses_atomic_modeset(fb_helper->dev)) + ret = setcmap_atomic(cmap, info); + else + ret = setcmap_legacy(cmap, info); + +out: mutex_unlock(&fb_helper->lock); - return rc; + + return ret; } EXPORT_SYMBOL(drm_fb_helper_setcmap); From 7611750784664db46d0db95631e322aeb263dde7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 21 Jun 2017 12:31:41 -0400 Subject: [PATCH 0244/1795] drm/amdgpu: use kernel is_power_of_2 rather than local version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the kernel provided version. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2fe1e0a20c17..f3811f6197e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1031,19 +1031,6 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } -/** - * amdgpu_check_pot_argument - check that argument is a power of two - * - * @arg: value to check - * - * Validates that a certain argument is a power of two (all asics). - * Returns true if argument is valid. - */ -static bool amdgpu_check_pot_argument(int arg) -{ - return (arg & (arg - 1)) == 0; -} - static void amdgpu_check_block_size(struct amdgpu_device *adev) { /* defines number of bits in page table versus page directory, @@ -1077,7 +1064,7 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev) if (amdgpu_vm_size == -1) return; - if (!amdgpu_check_pot_argument(amdgpu_vm_size)) { + if (!is_power_of_2(amdgpu_vm_size)) { dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", amdgpu_vm_size); goto def_value; @@ -1118,7 +1105,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", amdgpu_sched_jobs); amdgpu_sched_jobs = 4; - } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){ + } else if (!is_power_of_2(amdgpu_sched_jobs)){ dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", amdgpu_sched_jobs); amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); @@ -1138,7 +1125,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_check_block_size(adev); if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || - !amdgpu_check_pot_argument(amdgpu_vram_page_split))) { + !is_power_of_2(amdgpu_vram_page_split))) { dev_warn(adev->dev, "invalid VRAM page split (%d)\n", amdgpu_vram_page_split); amdgpu_vram_page_split = 1024; From fd66560b803e32c873164105a0864fdfc1163633 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 22 Jun 2017 13:09:43 +0800 Subject: [PATCH 0245/1795] drm/amdgpu: enable 4 level page table on raven (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v1: enable 4 level-page table on raven v2: add back legacy 2 level page table on raven v3: set num_level in initial switch statement Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 32 ++++++++++++++++++--------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 175ba5f9691c..c6394ea69f96 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -537,10 +537,20 @@ static int gmc_v9_0_sw_init(void *handle) spin_lock_init(&adev->mc.invalidate_lock); - if (adev->flags & AMD_IS_APU) { + switch (adev->asic_type) { + case CHIP_RAVEN: adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; - amdgpu_vm_adjust_size(adev, 64); - } else { + if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { + adev->vm_manager.vm_size = 1U << 18; + adev->vm_manager.block_size = 9; + adev->vm_manager.num_level = 3; + } else { + /* vm_size is 64GB for legacy 2-level page support*/ + amdgpu_vm_adjust_size(adev, 64); + adev->vm_manager.num_level = 1; + } + break; + case CHIP_VEGA10: /* XXX Don't know how to get VRAM type yet. */ adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM; /* @@ -550,11 +560,16 @@ static int gmc_v9_0_sw_init(void *handle) */ adev->vm_manager.vm_size = 1U << 18; adev->vm_manager.block_size = 9; - DRM_INFO("vm size is %llu GB, block size is %u-bit\n", - adev->vm_manager.vm_size, - adev->vm_manager.block_size); + adev->vm_manager.num_level = 3; + break; + default: + break; } + DRM_INFO("vm size is %llu GB, block size is %u-bit\n", + adev->vm_manager.vm_size, + adev->vm_manager.block_size); + /* This interrupt is VMC page fault.*/ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0, &adev->mc.vm_fault); @@ -619,11 +634,6 @@ static int gmc_v9_0_sw_init(void *handle) adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS; adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS; - /* TODO: fix num_level for APU when updating vm size and block size */ - if (adev->flags & AMD_IS_APU) - adev->vm_manager.num_level = 1; - else - adev->vm_manager.num_level = 3; amdgpu_vm_manager_init(adev); return 0; From dd0792c1be56843b12008ccc625dc7bad8efd7ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 27 Jun 2017 14:48:15 -0400 Subject: [PATCH 0246/1795] drm/amdgpu: simplify VM shadow handling v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we don't join PTE updates any more we don't need to call the update function twice for this. v2: rebased Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 31 +++++++++----------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5795f81369f0..cda9e5d8b831 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -77,8 +77,6 @@ struct amdgpu_pte_update_params { void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags); - /* indicate update pt or its shadow */ - bool shadow; /* The next two are used during VM update by CPU * DMA addresses to use for mapping * Kernel pointer of PD/PT BO that needs to be updated @@ -1299,16 +1297,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, return -EINVAL; } - if (params->shadow) { - if (WARN_ONCE(use_cpu_update, - "CPU VM update doesn't suuport shadow pages")) - return 0; - - if (!pt->shadow) - return 0; - pt = pt->shadow; - } - if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else @@ -1318,11 +1306,20 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, r = amdgpu_bo_kmap(pt, (void *)&pe_start); if (r) return r; - } else + + WARN_ONCE(pt->shadow, + "CPU VM update doesn't support shadow pages"); + } else { + if (pt->shadow) { + pe_start = amdgpu_bo_gpu_offset(pt->shadow); + pe_start += (addr & mask) * 8; + params->func(params, pe_start, dst, nptes, + AMDGPU_GPU_PAGE_SIZE, flags); + } pe_start = amdgpu_bo_gpu_offset(pt); + } pe_start += (addr & mask) * 8; - params->func(params, pe_start, dst, nptes, AMDGPU_GPU_PAGE_SIZE, flags); @@ -1459,7 +1456,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, params.func = amdgpu_vm_cpu_set_ptes; params.pages_addr = pages_addr; - params.shadow = false; return amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); } @@ -1542,11 +1538,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - params.shadow = true; - r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); - if (r) - goto error_free; - params.shadow = false; r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); if (r) goto error_free; From 84b5d3d10d6c7b8ff53e756217405a535691ff7d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Jun 2017 17:00:38 -0400 Subject: [PATCH 0247/1795] drm/amdgpu: disable vga render in dce hw_init This got dropped accidently with the fb location changes, but for some reason, this doesn't seem to cause an issue on all cards which is why I never saw it despite extensive testing. I suspect it may only be an issue on systems with a legacy sbios that enables vga. Tested-by: Andres Rodriguez Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 2 ++ 4 files changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9f78c03a2e31..9bda363e02be 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -3025,6 +3025,8 @@ static int dce_v10_0_hw_init(void *handle) dce_v10_0_init_golden_registers(adev); + /* disable vga render */ + dce_v10_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_encoder_init_dig(adev); amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 4bcf01dc567a..c02c7fb75c63 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3086,6 +3086,8 @@ static int dce_v11_0_hw_init(void *handle) dce_v11_0_init_golden_registers(adev); + /* disable vga render */ + dce_v11_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_crtc_powergate_init(adev); amdgpu_atombios_encoder_init_dig(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index fd134a4629d7..0a8ad0fc13ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2873,6 +2873,8 @@ static int dce_v6_0_hw_init(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* disable vga render */ + dce_v6_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_encoder_init_dig(adev); amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index a9e869554627..7e06c36b6ee9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2870,6 +2870,8 @@ static int dce_v8_0_hw_init(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* disable vga render */ + dce_v8_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_encoder_init_dig(adev); amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); From 9081c4cf7d4ea40cf7e77e8efe3c2bedb924b830 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 1 Nov 2016 13:08:33 -0400 Subject: [PATCH 0248/1795] drm/amdgpu/gmc8: use the vram location programmed by the vbios MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes mc programming much simpler in future patches. Since evergreen, the vbios has been programming the fb location to the proper vram size. The only reason to reprogram it would be to change the location. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index cc9f88057cd5..1c8c0f536aed 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -404,13 +404,16 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { + u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + if (mc->mc_vram_size > 0xFFC0000000ULL) { /* leave room for at least 1024M GTT */ dev_warn(adev->dev, "limiting VRAM\n"); mc->real_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL; } - amdgpu_vram_location(adev, &adev->mc, 0); + amdgpu_vram_location(adev, &adev->mc, base); adev->mc.gtt_base_align = 0; amdgpu_gtt_location(adev, mc); } From e0205a7156a62bfcaa3f3f2ed556faf3c9b38aad Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 1 Nov 2016 13:14:45 -0400 Subject: [PATCH 0249/1795] drm/amdgpu/gmc7: use the vram location programmed by the vbios MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes mc programming much simpler in future patches. Since evergreen, the vbios has been programming the fb location to the proper vram size. The only reason to reprogram it would be to change the location. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 7e9ea53edf8b..0fc7d31176a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -242,13 +242,16 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { + u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + if (mc->mc_vram_size > 0xFFC0000000ULL) { /* leave room for at least 1024M GTT */ dev_warn(adev->dev, "limiting VRAM\n"); mc->real_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL; } - amdgpu_vram_location(adev, &adev->mc, 0); + amdgpu_vram_location(adev, &adev->mc, base); adev->mc.gtt_base_align = 0; amdgpu_gtt_location(adev, mc); } From ba3a5b83dd9b7490fcab098c875709c03cf439ef Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 1 Nov 2016 13:15:29 -0400 Subject: [PATCH 0250/1795] drm/amdgpu/gmc6: use the vram location programmed by the vbios MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes mc programming much simpler in future patches. Since evergreen, the vbios has been programming the fb location to the proper vram size. The only reason to reprogram it would be to change the location. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index d0214d942bfc..224b6935c885 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -228,12 +228,15 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev) static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { + u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + if (mc->mc_vram_size > 0xFFC0000000ULL) { dev_warn(adev->dev, "limiting VRAM\n"); mc->real_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL; } - amdgpu_vram_location(adev, &adev->mc, 0); + amdgpu_vram_location(adev, &adev->mc, base); adev->mc.gtt_base_align = 0; amdgpu_gtt_location(adev, mc); } From cad81e34a8d268146fda82d2379eafb0ce9ea775 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 1 Nov 2016 13:33:28 -0400 Subject: [PATCH 0251/1795] drm/amdgpu/gmc8: drop fb location programming MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to do this as the vbios does this for us. As such we no longer need to stop the mc during init. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1c8c0f536aed..72ab2d04f048 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -428,7 +428,6 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, */ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; u32 tmp; int i, j; @@ -442,10 +441,6 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (adev->mode_info.num_crtc) - amdgpu_display_set_vga_render_state(adev, false); - - gmc_v8_0_mc_stop(adev, &save); if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } @@ -456,20 +451,12 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) adev->mc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->vram_scratch.gpu_addr >> 12); - tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; - tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); - WREG32(mmMC_VM_FB_LOCATION, tmp); - /* XXX double check these! */ - WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); - WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); - WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } - gmc_v8_0_mc_resume(adev, &save); WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); From 219611db30573bab37ff46f4e36571eba0bbd6a5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 1 Nov 2016 13:39:10 -0400 Subject: [PATCH 0252/1795] drm/amdgpu/gmc7: drop fb location programming MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to do this as the vbios does this for us. As such we no longer need to stop the mc during init. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0fc7d31176a5..31ad68a68c77 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -266,7 +266,6 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, */ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; u32 tmp; int i, j; @@ -280,10 +279,6 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (adev->mode_info.num_crtc) - amdgpu_display_set_vga_render_state(adev, false); - - gmc_v7_0_mc_stop(adev, &save); if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } @@ -294,20 +289,12 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) adev->mc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->vram_scratch.gpu_addr >> 12); - tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; - tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); - WREG32(mmMC_VM_FB_LOCATION, tmp); - /* XXX double check these! */ - WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); - WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); - WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } - gmc_v7_0_mc_resume(adev, &save); WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); From 71086a3e8470eee3e54a454d276e6f214be28fdc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 15 Nov 2016 17:37:41 -0500 Subject: [PATCH 0253/1795] drm/amdgpu/gmc6: drop fb location programming MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to do this as the vbios does this for us. As such we no longer need to stop the mc during init. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 224b6935c885..a652fbaa7b8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -243,8 +243,6 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, static void gmc_v6_0_mc_program(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; - u32 tmp; int i, j; /* Initialize HDP */ @@ -257,11 +255,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (adev->mode_info.num_crtc) - amdgpu_display_set_vga_render_state(adev, false); - - gmc_v6_0_mc_stop(adev, &save); - if (gmc_v6_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } @@ -274,13 +267,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) adev->mc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->vram_scratch.gpu_addr >> 12); - tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; - tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); - WREG32(mmMC_VM_FB_LOCATION, tmp); - /* XXX double check these! */ - WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); - WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); - WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); @@ -288,7 +274,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) if (gmc_v6_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } - gmc_v6_0_mc_resume(adev, &save); } static int gmc_v6_0_mc_init(struct amdgpu_device *adev) From b3fba8ad9d4be15cdf9aedb52b6a1262b213a1f4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 22 Nov 2016 18:09:47 -0500 Subject: [PATCH 0254/1795] drm/amdgpu: drop set_vga_render_state from display funcs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not used. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | 2 -- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 1 - drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 1 - drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 1 - drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 1 - drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 7 ------- 7 files changed, 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ff7bf1a9f967..407b6c6736ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1898,7 +1898,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) -#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r)) #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc)) #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 43a9d3aec6c4..35bd93cb0c19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -264,8 +264,6 @@ struct amdgpu_mode_mc_save { }; struct amdgpu_display_funcs { - /* vga render */ - void (*set_vga_render_state)(struct amdgpu_device *adev, bool render); /* display watermarks */ void (*bandwidth_update)(struct amdgpu_device *adev); /* get frame count */ diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9bda363e02be..fba084dc43a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -3739,7 +3739,6 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { - .set_vga_render_state = &dce_v10_0_set_vga_render_state, .bandwidth_update = &dce_v10_0_bandwidth_update, .vblank_get_counter = &dce_v10_0_vblank_get_counter, .vblank_wait = &dce_v10_0_vblank_wait, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index c02c7fb75c63..4eb3b90903ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3808,7 +3808,6 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { - .set_vga_render_state = &dce_v11_0_set_vga_render_state, .bandwidth_update = &dce_v11_0_bandwidth_update, .vblank_get_counter = &dce_v11_0_vblank_get_counter, .vblank_wait = &dce_v11_0_vblank_wait, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 0a8ad0fc13ea..fde68b959e91 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -3527,7 +3527,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { - .set_vga_render_state = &dce_v6_0_set_vga_render_state, .bandwidth_update = &dce_v6_0_bandwidth_update, .vblank_get_counter = &dce_v6_0_vblank_get_counter, .vblank_wait = &dce_v6_0_vblank_wait, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 7e06c36b6ee9..8e530a01ae83 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -3576,7 +3576,6 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { - .set_vga_render_state = &dce_v8_0_set_vga_render_state, .bandwidth_update = &dce_v8_0_bandwidth_update, .vblank_get_counter = &dce_v8_0_vblank_get_counter, .vblank_wait = &dce_v8_0_vblank_wait, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 90bb08309a53..f257702978fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -145,12 +145,6 @@ static void dce_virtual_resume_mc_access(struct amdgpu_device *adev, return; } -static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, - bool render) -{ - return; -} - /** * dce_virtual_bandwidth_update - program display watermarks * @@ -677,7 +671,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_virtual_display_funcs = { - .set_vga_render_state = &dce_virtual_set_vga_render_state, .bandwidth_update = &dce_virtual_bandwidth_update, .vblank_get_counter = &dce_virtual_vblank_get_counter, .vblank_wait = &dce_virtual_vblank_wait, From e4f6b39e8bcd1f4c455acbc1aef2de9a4a32deeb Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 8 Dec 2016 14:53:27 -0500 Subject: [PATCH 0255/1795] drm/amdgpu: remove *_mc_access from display funcs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are no longer needed now that we use the fb_location programmed by the vbios. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 - drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | 10 -- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 130 --------------------- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 75 ------------ drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 113 ------------------ drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 77 ------------ drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 91 +++++++-------- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 18 +-- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 17 +-- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 16 +-- 11 files changed, 51 insertions(+), 505 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 407b6c6736ec..20e0ed96efbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -613,7 +613,6 @@ struct amdgpu_mc { struct amdgpu_irq_src vm_fault; uint32_t vram_type; uint32_t srbm_soft_reset; - struct amdgpu_mode_mc_save save; bool prt_warning; uint64_t stolen_size; /* apertures */ @@ -1910,8 +1909,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) -#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) -#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f3811f6197e0..ae4387fd2b65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2851,12 +2851,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) r = amdgpu_suspend(adev); retry: - /* Disable fb access */ - if (adev->mode_info.num_crtc) { - struct amdgpu_mode_mc_save save; - amdgpu_display_stop_mc_access(adev, &save); - amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); - } if (adev->is_atom_fw) amdgpu_atomfirmware_scratch_regs_save(adev); else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 35bd93cb0c19..b8abd4e18d51 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -257,12 +257,6 @@ struct amdgpu_audio { int num_pins; }; -struct amdgpu_mode_mc_save { - u32 vga_render_control; - u32 vga_hdp_control; - bool crtc_enabled[AMDGPU_MAX_CRTCS]; -}; - struct amdgpu_display_funcs { /* display watermarks */ void (*bandwidth_update)(struct amdgpu_device *adev); @@ -298,10 +292,6 @@ struct amdgpu_display_funcs { uint16_t connector_object_id, struct amdgpu_hpd *hpd, struct amdgpu_router *router); - void (*stop_mc_access)(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); - void (*resume_mc_access)(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); }; struct amdgpu_mode_info { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index fba084dc43a2..16915a92672b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -484,134 +484,6 @@ static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev) return true; } -static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp; - int i; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - tmp = RREG32(mmVGA_RENDER_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32(mmVGA_RENDER_CONTROL, tmp); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), - CRTC_CONTROL, CRTC_MASTER_EN); - if (crtc_enabled) { -#if 0 - u32 frame_count; - int j; - - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - amdgpu_display_vblank_wait(adev, i); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } -#else - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ -#endif - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp, frame_count; - int i, j; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - - if (save->crtc_enabled[i]) { - tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0); - WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) - break; - udelay(1); - } - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } - } - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); -} - static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -3751,8 +3623,6 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos, .add_encoder = &dce_v10_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v10_0_stop_mc_access, - .resume_mc_access = &dce_v10_0_resume_mc_access, }; static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 4eb3b90903ac..6a43f25c5d96 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -499,79 +499,6 @@ static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev) return true; } -static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp; - int i; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - tmp = RREG32(mmVGA_RENDER_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32(mmVGA_RENDER_CONTROL, tmp); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), - CRTC_CONTROL, CRTC_MASTER_EN); - if (crtc_enabled) { -#if 1 - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - /*it is correct only for RGB ; black is 0*/ - WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } -#else - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ -#endif - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp; - int i; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - - if (save->crtc_enabled[i]) { - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); -} - static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -3820,8 +3747,6 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos, .add_encoder = &dce_v11_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v11_0_stop_mc_access, - .resume_mc_access = &dce_v11_0_resume_mc_access, }; static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index fde68b959e91..48d5dd4974de 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -392,117 +392,6 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) return mmDC_GPIO_HPD_A; } -static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc) -{ - if (crtc >= adev->mode_info.num_crtc) - return 0; - else - return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); -} - -static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp, frame_count; - int i, j; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - WREG32(mmVGA_RENDER_CONTROL, 0); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK; - if (crtc_enabled) { - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - - if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) { - dce_v6_0_vblank_wait(adev, i); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK; - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - /* wait for the next frame */ - frame_count = evergreen_get_vblank_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (evergreen_get_vblank_counter(adev, i) != frame_count) - break; - udelay(1); - } - - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK; - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp; - int i, j; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start); - - /* unlock regs and wait for update */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (save->crtc_enabled[i]) { - tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x7) != 0) { - tmp &= ~0x7; - WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) { - tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK; - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (tmp & 1) { - tmp &= ~1; - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0) - break; - udelay(1); - } - } - } - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); - -} - static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -3539,8 +3428,6 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos, .add_encoder = &dce_v6_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v6_0_stop_mc_access, - .resume_mc_access = &dce_v6_0_resume_mc_access, }; static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 8e530a01ae83..647a48f03574 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -419,81 +419,6 @@ static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev) return true; } -static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp; - int i; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - tmp = RREG32(mmVGA_RENDER_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32(mmVGA_RENDER_CONTROL, tmp); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), - CRTC_CONTROL, CRTC_MASTER_EN); - if (crtc_enabled) { -#if 1 - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - /*it is correct only for RGB ; black is 0*/ - WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } - mdelay(20); -#else - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ -#endif - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp; - int i; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - - if (save->crtc_enabled[i]) { - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } - mdelay(20); - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); -} - static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -3588,8 +3513,6 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos, .add_encoder = &dce_v8_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v8_0_stop_mc_access, - .resume_mc_access = &dce_v8_0_resume_mc_access, }; static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index f257702978fe..5ac2e17b76ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -95,56 +95,6 @@ static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev) return 0; } -static void dce_virtual_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - switch (adev->asic_type) { -#ifdef CONFIG_DRM_AMDGPU_SI - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - dce_v6_0_disable_dce(adev); - break; -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KAVERI: - case CHIP_KABINI: - case CHIP_MULLINS: - dce_v8_0_disable_dce(adev); - break; -#endif - case CHIP_FIJI: - case CHIP_TONGA: - dce_v10_0_disable_dce(adev); - break; - case CHIP_CARRIZO: - case CHIP_STONEY: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - dce_v11_0_disable_dce(adev); - break; - case CHIP_TOPAZ: -#ifdef CONFIG_DRM_AMDGPU_SI - case CHIP_HAINAN: -#endif - /* no DCE */ - return; - default: - DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); - } - - return; -} -static void dce_virtual_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - return; -} - /** * dce_virtual_bandwidth_update - program display watermarks * @@ -516,6 +466,45 @@ static int dce_virtual_sw_fini(void *handle) static int dce_virtual_hw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + dce_v6_0_disable_dce(adev); + break; +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + dce_v8_0_disable_dce(adev); + break; +#endif + case CHIP_FIJI: + case CHIP_TONGA: + dce_v10_0_disable_dce(adev); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + dce_v11_0_disable_dce(adev); + break; + case CHIP_TOPAZ: +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_HAINAN: +#endif + /* no DCE */ + break; + default: + DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); + } return 0; } @@ -683,8 +672,6 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = { .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, .add_encoder = NULL, .add_connector = NULL, - .stop_mc_access = &dce_virtual_stop_mc_access, - .resume_mc_access = &dce_virtual_resume_mc_access, }; static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index a652fbaa7b8c..c1a124a86775 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -66,14 +66,10 @@ static const u32 crtc_offsets[6] = SI_CRTC5_REGISTER_OFFSET }; -static void gmc_v6_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; - if (adev->mode_info.num_crtc) - amdgpu_display_stop_mc_access(adev, save); - gmc_v6_0_wait_for_idle((void *)adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); @@ -90,8 +86,7 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev, } -static void gmc_v6_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v6_0_mc_resume(struct amdgpu_device *adev) { u32 tmp; @@ -103,10 +98,6 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev, tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); WREG32(mmBIF_FB_EN, tmp); - - if (adev->mode_info.num_crtc) - amdgpu_display_resume_mc_access(adev, save); - } static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) @@ -975,7 +966,6 @@ static int gmc_v6_0_wait_for_idle(void *handle) static int gmc_v6_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -991,7 +981,7 @@ static int gmc_v6_0_soft_reset(void *handle) } if (srbm_soft_reset) { - gmc_v6_0_mc_stop(adev, &save); + gmc_v6_0_mc_stop(adev); if (gmc_v6_0_wait_for_idle(adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } @@ -1011,7 +1001,7 @@ static int gmc_v6_0_soft_reset(void *handle) udelay(50); - gmc_v6_0_mc_resume(adev, &save); + gmc_v6_0_mc_resume(adev); udelay(50); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 31ad68a68c77..575d72583746 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -76,14 +76,10 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) } } -static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; - if (adev->mode_info.num_crtc) - amdgpu_display_stop_mc_access(adev, save); - gmc_v7_0_wait_for_idle((void *)adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); @@ -99,8 +95,7 @@ static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_resume(struct amdgpu_device *adev) { u32 tmp; @@ -112,9 +107,6 @@ static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); WREG32(mmBIF_FB_EN, tmp); - - if (adev->mode_info.num_crtc) - amdgpu_display_resume_mc_access(adev, save); } /** @@ -1128,7 +1120,6 @@ static int gmc_v7_0_wait_for_idle(void *handle) static int gmc_v7_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1144,7 +1135,7 @@ static int gmc_v7_0_soft_reset(void *handle) } if (srbm_soft_reset) { - gmc_v7_0_mc_stop(adev, &save); + gmc_v7_0_mc_stop(adev); if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } @@ -1165,7 +1156,7 @@ static int gmc_v7_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - gmc_v7_0_mc_resume(adev, &save); + gmc_v7_0_mc_resume(adev); udelay(50); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 72ab2d04f048..6379177b9fcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -161,14 +161,10 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) } } -static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; - if (adev->mode_info.num_crtc) - amdgpu_display_stop_mc_access(adev, save); - gmc_v8_0_wait_for_idle(adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); @@ -184,8 +180,7 @@ static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_resume(struct amdgpu_device *adev) { u32 tmp; @@ -197,9 +192,6 @@ static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); WREG32(mmBIF_FB_EN, tmp); - - if (adev->mode_info.num_crtc) - amdgpu_display_resume_mc_access(adev, save); } /** @@ -1250,7 +1242,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle) if (!adev->mc.srbm_soft_reset) return 0; - gmc_v8_0_mc_stop(adev, &adev->mc.save); + gmc_v8_0_mc_stop(adev); if (gmc_v8_0_wait_for_idle(adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } @@ -1296,7 +1288,7 @@ static int gmc_v8_0_post_soft_reset(void *handle) if (!adev->mc.srbm_soft_reset) return 0; - gmc_v8_0_mc_resume(adev, &adev->mc.save); + gmc_v8_0_mc_resume(adev); return 0; } From 08d3874636a20b8852f8eea1c6ad1054234c9ae6 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Wed, 21 Jun 2017 03:44:55 +0200 Subject: [PATCH 0256/1795] drm/radeon: Allow vblank_disable_immediate. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With instantaneous high precision vblank timestamping that updates at leading edge of vblank, a cooked hw vblank counter which increments at leading edge of vblank, and reliable page flip execution and completion at leading edge of vblank, we should meet the requirements for fast/immediate vblank irq disable/enable. Testing on Linux-4.12-rc5 + drm-next on a Radeon HD 5770 (DCE 4) with timing measurement equipment indicates this works fine, so allow immediate vblank disable for power saving. For debugging in case of unexpected trouble, booting with kernel cmdline option drm.vblankoffdelay=0 (or echo 0 > /sys/module/drm/parameters/vblankoffdelay) would keep vblank irqs permanently on to approximate old behavior. Reviewed-and-Tested-by: Michel Dänzer Signed-off-by: Mario Kleiner Cc: Alex Deucher Cc: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_irq_kms.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 7aacb44df201..186076492f64 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -283,6 +283,10 @@ int radeon_irq_kms_init(struct radeon_device *rdev) int r = 0; spin_lock_init(&rdev->irq.lock); + + /* Disable vblank irqs aggressively for power-saving */ + rdev->ddev->vblank_disable_immediate = true; + r = drm_vblank_init(rdev->ddev, rdev->num_crtc); if (r) { return r; From 8e1b90cc44181405418071a13ead5892c3879239 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Wed, 21 Jun 2017 03:44:56 +0200 Subject: [PATCH 0257/1795] drm/amdgpu: Allow vblank_disable_immediate. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With instantaneous high precision vblank timestamping that updates at leading edge of vblank, a cooked hw vblank counter which increments at leading edge of vblank, and reliable page flip execution and completion at leading edge of vblank, we should meet the requirements for fast/immediate vblank irq disable/enable. Testing on Linux-4.12-rc5 + drm-next on a Radeon R9 380 Tonga Pro (DCE 10) with timing measurement equipment indicates this works fine, so allow immediate vblank disable for power saving. For debugging in case of unexpected trouble, booting with kernel cmdline option drm.vblankoffdelay=0 (or echo 0 > /sys/module/drm/parameters/vblankoffdelay) would keep vblank irqs permanently on to approximate old behavior. Reviewed-by: Michel Dänzer Signed-off-by: Mario Kleiner Cc: Alex Deucher Cc: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 62da6c5c6095..a28f8aad2035 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -220,6 +220,10 @@ int amdgpu_irq_init(struct amdgpu_device *adev) int r = 0; spin_lock_init(&adev->irq.lock); + + /* Disable vblank irqs aggressively for power-saving */ + adev->ddev->vblank_disable_immediate = true; + r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); if (r) { return r; From 890419409a3aba2ca7185a824e47d8ded8df11a2 Mon Sep 17 00:00:00 2001 From: Gavin Wan Date: Fri, 23 Jun 2017 13:55:15 -0400 Subject: [PATCH 0258/1795] drm/amdgpu: Support passing amdgpu critical error to host via GPU Mailbox. This feature works for SRIOV enviroment. For non-SRIOV enviroment, the trans_error function does nothing. The error information includes error_code (16bit), error_flags(16bit) and error_data(64bit). Since there are not many errors, we keep the errors in an array and transfer all errors to Host before amdgpu initialization function (amdgpu_device_init) exit. Signed-off-by: Gavin Wan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 21 ++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c | 85 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h | 62 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 1 + drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 46 ++++++----- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h | 4 +- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 1 + drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h | 4 +- 9 files changed, 200 insertions(+), 26 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index faea6349228f..658bac0cdc5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ - amdgpu_queue_mgr.o + amdgpu_queue_mgr.o amdgpu_vf_error.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ae4387fd2b65..88e45c6d36ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -53,6 +53,7 @@ #include "bif/bif_4_1_d.h" #include #include +#include "amdgpu_vf_error.h" MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -2134,6 +2135,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atombios_init(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); goto failed; } @@ -2144,6 +2146,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (amdgpu_vpost_needed(adev)) { if (!adev->bios) { dev_err(adev->dev, "no vBIOS found\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); r = -EINVAL; goto failed; } @@ -2151,6 +2154,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atom_asic_init(adev->mode_info.atom_context); if (r) { dev_err(adev->dev, "gpu post error!\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0); goto failed; } } else { @@ -2162,7 +2166,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atombios_get_clock_info(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); - return r; + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; } /* init i2c buses */ amdgpu_atombios_i2c_init(adev); @@ -2172,6 +2177,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_fence_driver_init(adev); if (r) { dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); goto failed; } @@ -2181,6 +2187,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_init(adev); if (r) { dev_err(adev->dev, "amdgpu_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); amdgpu_fini(adev); goto failed; } @@ -2200,6 +2207,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); goto failed; } @@ -2244,12 +2252,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_late_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); goto failed; } return 0; failed: + amdgpu_vf_error_trans_all(adev); if (runtime) vga_switcheroo_fini_domain_pm_ops(adev->dev); return r; @@ -2937,6 +2947,7 @@ out: } } else { dev_err(adev->dev, "asic resume failed (%d).\n", r); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { if (adev->rings[i] && adev->rings[i]->sched.thread) { kthread_unpark(adev->rings[i]->sched.thread); @@ -2947,12 +2958,16 @@ out: drm_helper_resume_force_mode(adev->ddev); ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); - if (r) + if (r) { /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); - else + amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); + } + else { dev_info(adev->dev, "GPU reset successed!\n"); + } + amdgpu_vf_error_trans_all(adev); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c new file mode 100644 index 000000000000..45ac91861965 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c @@ -0,0 +1,85 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_vf_error.h" +#include "mxgpu_ai.h" + +#define AMDGPU_VF_ERROR_ENTRY_SIZE 16 + +/* struct error_entry - amdgpu VF error information. */ +struct amdgpu_vf_error_buffer { + int read_count; + int write_count; + uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE]; + uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE]; + uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; +}; + +struct amdgpu_vf_error_buffer admgpu_vf_errors; + + +void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data) +{ + int index; + uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code); + + index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE; + admgpu_vf_errors.code [index] = error_code; + admgpu_vf_errors.flags [index] = error_flags; + admgpu_vf_errors.data [index] = error_data; + admgpu_vf_errors.write_count ++; +} + + +void amdgpu_vf_error_trans_all(struct amdgpu_device *adev) +{ + /* u32 pf2vf_flags = 0; */ + u32 data1, data2, data3; + int index; + + if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) { + return; + } +/* + TODO: Enable these code when pv2vf_info is merged + AMDGPU_FW_VRAM_PF2VF_READ (adev, feature_flags, &pf2vf_flags); + if (!(pf2vf_flags & AMDGIM_FEATURE_ERROR_LOG_COLLECT)) { + return; + } +*/ + /* The errors are overlay of array, correct read_count as full. */ + if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) { + admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE; + } + + while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) { + index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE; + data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]); + data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF; + data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF; + + adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3); + admgpu_vf_errors.read_count ++; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h new file mode 100644 index 000000000000..2a3278ec76ba --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h @@ -0,0 +1,62 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VF_ERROR_H__ +#define __VF_ERROR_H__ + +#define AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(c,f) (((c & 0xFFFF) << 16) | (f & 0xFFFF)) +#define AMDGIM_ERROR_CODE(t,c) (((t&0xF)<<12)|(c&0xFFF)) + +/* Please keep enum same as AMD GIM driver */ +enum AMDGIM_ERROR_VF { + AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL = 0, + AMDGIM_ERROR_VF_NO_VBIOS, + AMDGIM_ERROR_VF_GPU_POST_ERROR, + AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, + AMDGIM_ERROR_VF_FENCE_INIT_FAIL, + + AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, + AMDGIM_ERROR_VF_IB_INIT_FAIL, + AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, + AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, + AMDGIM_ERROR_VF_GPU_RESET_FAIL, + + AMDGIM_ERROR_VF_TEST, + AMDGIM_ERROR_VF_MAX +}; + +enum AMDGIM_ERROR_CATEGORY { + AMDGIM_ERROR_CATEGORY_NON_USED = 0, + AMDGIM_ERROR_CATEGORY_GIM, + AMDGIM_ERROR_CATEGORY_PF, + AMDGIM_ERROR_CATEGORY_VF, + AMDGIM_ERROR_CATEGORY_VBIOS, + AMDGIM_ERROR_CATEGORY_MONITOR, + + AMDGIM_ERROR_CATEGORY_MAX +}; + +void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data); +void amdgpu_vf_error_trans_all (struct amdgpu_device *adev); + +#endif /* __VF_ERROR_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 9e1062edb76e..e5b1baf387c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -43,6 +43,7 @@ struct amdgpu_virt_ops { int (*req_full_gpu)(struct amdgpu_device *adev, bool init); int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); int (*reset_gpu)(struct amdgpu_device *adev); + void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); }; /* GPU virtualization */ diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index bde3ca3c21c1..2812d88a8bdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -72,21 +72,6 @@ static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) reg); } -static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev, - enum idh_request req) -{ - u32 reg; - - reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); - reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, - MSGBUF_DATA, req); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), - reg); - - xgpu_ai_mailbox_set_valid(adev, true); -} - static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, enum idh_event event) { @@ -154,13 +139,25 @@ static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) return r; } - -static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, - enum idh_request req) -{ +static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, + enum idh_request req, u32 data1, u32 data2, u32 data3) { + u32 reg; int r; - xgpu_ai_mailbox_trans_msg(adev, req); + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); + reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, + MSGBUF_DATA, req); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), + reg); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), + data1); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), + data2); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), + data3); + + xgpu_ai_mailbox_set_valid(adev, true); /* start to poll ack */ r = xgpu_ai_poll_ack(adev); @@ -168,6 +165,14 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, pr_err("Doesn't get ack from pf, continue\n"); xgpu_ai_mailbox_set_valid(adev, false); +} + +static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, + enum idh_request req) +{ + int r; + + xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); /* start to check msg if request is idh_req_gpu_init_access */ if (req == IDH_REQ_GPU_INIT_ACCESS || @@ -342,4 +347,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .req_full_gpu = xgpu_ai_request_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access, .reset_gpu = xgpu_ai_request_reset, + .trans_msg = xgpu_ai_mailbox_trans_msg, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index 9aefc44d2c34..1e91b9a1c591 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -31,7 +31,9 @@ enum idh_request { IDH_REL_GPU_INIT_ACCESS, IDH_REQ_GPU_FINI_ACCESS, IDH_REL_GPU_FINI_ACCESS, - IDH_REQ_GPU_RESET_ACCESS + IDH_REQ_GPU_RESET_ACCESS, + + IDH_LOG_VF_ERROR = 200, }; enum idh_event { diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 171a658135b5..c25a831f94ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -613,4 +613,5 @@ const struct amdgpu_virt_ops xgpu_vi_virt_ops = { .req_full_gpu = xgpu_vi_request_full_gpu_access, .rel_full_gpu = xgpu_vi_release_full_gpu_access, .reset_gpu = xgpu_vi_request_reset, + .trans_msg = NULL, /* Does not need to trans VF errors to host. */ }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h index 2db741131bc6..c791d73d2d54 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h @@ -32,7 +32,9 @@ enum idh_request { IDH_REL_GPU_INIT_ACCESS, IDH_REQ_GPU_FINI_ACCESS, IDH_REL_GPU_FINI_ACCESS, - IDH_REQ_GPU_RESET_ACCESS + IDH_REQ_GPU_RESET_ACCESS, + + IDH_LOG_VF_ERROR = 200, }; /* VI mailbox messages data */ From 011d4bbea969268c013eaf6e39721d3181833711 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 26 Jun 2017 11:37:49 +0200 Subject: [PATCH 0259/1795] drm/amdgpu: cleanup initializing gtt_size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stop spreading the code over all GMC generations. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 20 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 10 +--------- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 10 +--------- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 10 +--------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 10 +--------- 6 files changed, 25 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 20e0ed96efbb..df63823ad4db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -559,6 +559,7 @@ struct amdgpu_gart { const struct amdgpu_gart_funcs *gart_funcs; }; +void amdgpu_gart_set_defaults(struct amdgpu_device *adev); int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index a57abc1a25fb..982b1cc11dac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -55,6 +55,26 @@ /* * Common GART table functions. */ + +/** + * amdgpu_gart_set_defaults - set the default gtt_size + * + * @adev: amdgpu_device pointer + * + * Set the default gtt_size based on parameters and available VRAM. + */ +void amdgpu_gart_set_defaults(struct amdgpu_device *adev) +{ + /* unless the user had overridden it, set the gart + * size equal to the 1024 or vram, whichever is larger. + */ + if (amdgpu_gart_size == -1) + adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), + adev->mc.mc_vram_size); + else + adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; +} + /** * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table * diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index c1a124a86775..5f7750c6497e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -321,15 +321,7 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.visible_vram_size = adev->mc.aper_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v6_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 575d72583746..388b52febc8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -373,15 +373,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v7_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 6379177b9fcb..d148d1c585b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -535,15 +535,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v8_0_vram_gtt_location(adev, &adev->mc); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c6394ea69f96..dbb43d99e02e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -494,15 +494,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v9_0_vram_gtt_location(adev, &adev->mc); return 0; From b8e0e6e16e3ac130901111e2c503b3533f9719ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 26 Jun 2017 15:19:30 +0200 Subject: [PATCH 0260/1795] drm/amdgpu: fix amdgpu_debugfs_gem_bo_info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise we trigger a bunch of WARN_ONs when this is called. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 621f739103a6..96c4493ccf8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -784,6 +784,7 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) unsigned domain; const char *placement; unsigned pin_count; + uint64_t offset; domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); switch (domain) { @@ -798,9 +799,12 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) placement = " CPU"; break; } - seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx", - id, amdgpu_bo_size(bo), placement, - amdgpu_bo_gpu_offset(bo)); + seq_printf(m, "\t0x%08x: %12ld byte %s", + id, amdgpu_bo_size(bo), placement); + + offset = ACCESS_ONCE(bo->tbo.mem.start); + if (offset != AMDGPU_BO_INVALID_OFFSET) + seq_printf(m, " @ 0x%010Lx", offset); pin_count = ACCESS_ONCE(bo->pin_count); if (pin_count) From 98b09f52db46b11fdc650dd0906ef682c5c0be63 Mon Sep 17 00:00:00 2001 From: ozeng Date: Tue, 27 Jun 2017 14:45:18 -0500 Subject: [PATCH 0261/1795] drm/amdgpu: Changed CU reservation golden settings With previous golden settings, compute task can't use reserved LDS (32K) on CU0 and CU1. On 64K LDS system, if compute work group allocate more than 32K LDS, then it can't be dispatched to CU0 and CU1 because of the reservation. This enables compute task to use reserved LDS on CU0 and CU1. Signed-off-by: Oak Zeng Acked-by: Alex Deucher Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 28 +++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index aa5a50f5eac8..4bb12ee46315 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -193,8 +193,8 @@ static const u32 tonga_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF }; static const u32 tonga_mgcg_cgcg_init[] = @@ -303,8 +303,8 @@ static const u32 polaris11_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, }; static const u32 golden_settings_polaris10_a11[] = @@ -336,8 +336,8 @@ static const u32 polaris10_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, }; static const u32 fiji_golden_common_all[] = @@ -348,8 +348,8 @@ static const u32 fiji_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009, }; @@ -436,8 +436,8 @@ static const u32 iceland_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF }; static const u32 iceland_mgcg_cgcg_init[] = @@ -532,8 +532,8 @@ static const u32 cz_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF }; static const u32 cz_mgcg_cgcg_init[] = @@ -637,8 +637,8 @@ static const u32 stoney_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, }; static const u32 stoney_mgcg_cgcg_init[] = From ccfee95c6e6e9614a7c5f1191902de7927b6af2f Mon Sep 17 00:00:00 2001 From: Kent Russell Date: Wed, 28 Jun 2017 15:16:41 -0400 Subject: [PATCH 0262/1795] drm/amdgpu: Update default vram_page_split description MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was updated to 512 and the description update got missed Signed-off-by: Kent Russell Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b59f37c83fa6..f8face2a1e77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -186,7 +186,7 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both"); module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444); -MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)"); +MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)"); module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444); MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); From 0ed279877ddf7ae8e5acd9fe4d464ceba98f37cd Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 29 Jun 2017 12:38:37 -0500 Subject: [PATCH 0263/1795] drm/radeon: add header comment for clarification to vce_v2_0_enable_mgcg() Add function header comment to make it clear that local variable sw_cg is used for debugging and it should not be removed. Addresses-Coverity-ID: 1198635 Cc: Alex Deucher Signed-off-by: Gustavo A. R. Silva Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/vce_v2_0.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c index fce214482e72..b0a43b68776d 100644 --- a/drivers/gpu/drm/radeon/vce_v2_0.c +++ b/drivers/gpu/drm/radeon/vce_v2_0.c @@ -104,6 +104,10 @@ static void vce_v2_0_disable_cg(struct radeon_device *rdev) WREG32(VCE_CGTT_CLK_OVERRIDE, 7); } +/* + * Local variable sw_cg is used for debugging purposes, in case we + * ran into problems with dynamic clock gating. Don't remove it. + */ void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable) { bool sw_cg = false; From e8110b1c9bdd3d76610a9783724233a570964d3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 28 Jun 2017 13:43:48 +0200 Subject: [PATCH 0264/1795] drm/amdgpu: move ring helpers to amdgpu_ring.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Keep them where they belong. Signed-off-by: Christian König Reviewed-by: Alex Deucher Acked-by: Felix Kuehling --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 44 ------------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 42 ++++++++++++++++++++++ 2 files changed, 42 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index df63823ad4db..714235f507f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1792,50 +1792,6 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) -/* - * RING helpers. - */ -static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) -{ - if (ring->count_dw <= 0) - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); - ring->ring[ring->wptr++ & ring->buf_mask] = v; - ring->wptr &= ring->ptr_mask; - ring->count_dw--; -} - -static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw) -{ - unsigned occupied, chunk1, chunk2; - void *dst; - - if (unlikely(ring->count_dw < count_dw)) { - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); - return; - } - - occupied = ring->wptr & ring->buf_mask; - dst = (void *)&ring->ring[occupied]; - chunk1 = ring->buf_mask + 1 - occupied; - chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1; - chunk2 = count_dw - chunk1; - chunk1 <<= 2; - chunk2 <<= 2; - - if (chunk1) - memcpy(dst, src, chunk1); - - if (chunk2) { - src += chunk1; - dst = (void *)ring->ring; - memcpy(dst, src, chunk2); - } - - ring->wptr += count_dw; - ring->wptr &= ring->ptr_mask; - ring->count_dw -= count_dw; -} - static inline struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index bc8dec992f73..04cbc3a4d4bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -212,4 +212,46 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) } +static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) +{ + if (ring->count_dw <= 0) + DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); + ring->ring[ring->wptr++ & ring->buf_mask] = v; + ring->wptr &= ring->ptr_mask; + ring->count_dw--; +} + +static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, + void *src, int count_dw) +{ + unsigned occupied, chunk1, chunk2; + void *dst; + + if (unlikely(ring->count_dw < count_dw)) { + DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); + return; + } + + occupied = ring->wptr & ring->buf_mask; + dst = (void *)&ring->ring[occupied]; + chunk1 = ring->buf_mask + 1 - occupied; + chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1; + chunk2 = count_dw - chunk1; + chunk1 <<= 2; + chunk2 <<= 2; + + if (chunk1) + memcpy(dst, src, chunk1); + + if (chunk2) { + src += chunk1; + dst = (void *)ring->ring; + memcpy(dst, src, chunk2); + } + + ring->wptr += count_dw; + ring->wptr &= ring->ptr_mask; + ring->count_dw -= count_dw; +} + #endif From 369421cbfabf0650b73571d7d1c026fc3830e74d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 28 Jun 2017 13:50:07 +0200 Subject: [PATCH 0265/1795] drm/amdgpu: fix amdgpu_ring_write_multiple MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Overwriting still used ring content has a low probability to cause problems, not writing at all has 100% probability to cause problems. Signed-off-by: Christian König Reviewed-by: Alex Deucher Acked-by: Felix Kuehling --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 04cbc3a4d4bf..322d25299a00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -227,10 +227,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, unsigned occupied, chunk1, chunk2; void *dst; - if (unlikely(ring->count_dw < count_dw)) { + if (unlikely(ring->count_dw < count_dw)) DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); - return; - } occupied = ring->wptr & ring->buf_mask; dst = (void *)&ring->ring[occupied]; From df264f9e08081c8c79523fd9e9f5241ed23ee7e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 28 Jun 2017 15:41:17 +0200 Subject: [PATCH 0266/1795] drm/amdgpu: allow flushing VMID0 before IB execution as well MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows us to queue IBs which needs an up to date system domain as well. Signed-off-by: Christian König Reviewed-by: Alex Deucher Acked-by: Felix Kuehling --- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index f774b3f497d2..1b30d2ab9c51 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -172,7 +172,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ring->funcs->insert_start) ring->funcs->insert_start(ring); - if (vm) { + if (job) { r = amdgpu_vm_flush(ring, job); if (r) { amdgpu_ring_undo(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 3d641e10e6b6..4510627ae83e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -81,6 +81,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]); if (r) kfree(*job); + else + (*job)->vm_pd_addr = adev->gart.table_addr; return r; } From fc9c8f5459f2dfa7f83bee3f388faaf570ef96ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 29 Jun 2017 11:46:15 +0200 Subject: [PATCH 0267/1795] drm/amdgpu: add vm_needs_flush parameter to amdgpu_copy_buffer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows us to flush the system VM here. Signed-off-by: Christian König Reviewed-by: Alex Deucher Acked-by: Felix Kuehling --- drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 ++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 9 ++++----- 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 1beae5b930d0..2fb299afc12b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, for (i = 0; i < n; i++) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence, - false); + false, false); if (r) goto exit_do_move; r = dma_fence_wait(fence, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 8ee69652be8c..c34cf2c1ae4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -535,7 +535,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, amdgpu_bo_size(bo), resv, fence, - direct); + direct, false); if (!r) amdgpu_bo_fence(bo, *fence, true); @@ -588,7 +588,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, amdgpu_bo_size(bo), resv, fence, - direct); + direct, false); if (!r) amdgpu_bo_fence(bo, *fence, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 15510dadde01..d02e611a2dae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(gtt_obj[i]); r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, - size, NULL, &fence, false); + size, NULL, &fence, false, false); if (r) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); @@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(vram_obj); r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, - size, NULL, &fence, false); + size, NULL, &fence, false, false); if (r) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c9b131b13ef7..8c5f75d29f32 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -318,7 +318,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_copy_buffer(ring, old_start, new_start, cur_pages * PAGE_SIZE, - bo->resv, &next, false); + bo->resv, &next, false, false); if (r) goto error; @@ -1256,12 +1256,11 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) return ttm_bo_mmap(filp, vma, &adev->mman.bdev); } -int amdgpu_copy_buffer(struct amdgpu_ring *ring, - uint64_t src_offset, - uint64_t dst_offset, - uint32_t byte_count, +int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, + uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct dma_fence **fence, bool direct_submit) + struct dma_fence **fence, bool direct_submit, + bool vm_needs_flush) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -1283,6 +1282,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, if (r) return r; + job->vm_needs_flush = vm_needs_flush; if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, AMDGPU_FENCE_OWNER_UNDEFINED); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 6bdede8ff12b..cd5bbfa2773f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -61,12 +61,11 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, const struct ttm_place *place, struct ttm_mem_reg *mem); -int amdgpu_copy_buffer(struct amdgpu_ring *ring, - uint64_t src_offset, - uint64_t dst_offset, - uint32_t byte_count, +int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, + uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct dma_fence **fence, bool direct_submit); + struct dma_fence **fence, bool direct_submit, + bool vm_needs_flush); int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct reservation_object *resv, From 92c60d9cf6636fdf740ebc98af0d68426f07b19b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 29 Jun 2017 10:44:39 +0200 Subject: [PATCH 0268/1795] drm/amdgpu: bind BOs to TTM only once MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to do this on every round. Signed-off-by: Christian König Reviewed-by: Alex Deucher Acked-by: Felix Kuehling --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 74 +++++++++++-------------- 1 file changed, 31 insertions(+), 43 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8c5f75d29f32..e97dfe888d55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -252,29 +252,15 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo, new_mem->mm_node = NULL; } -static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo, - struct drm_mm_node *mm_node, - struct ttm_mem_reg *mem, - uint64_t *addr) +static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, + struct drm_mm_node *mm_node, + struct ttm_mem_reg *mem) { - int r; + uint64_t addr; - switch (mem->mem_type) { - case TTM_PL_TT: - r = amdgpu_ttm_bind(bo, mem); - if (r) - return r; - - case TTM_PL_VRAM: - *addr = mm_node->start << PAGE_SHIFT; - *addr += bo->bdev->man[mem->mem_type].gpu_offset; - break; - default: - DRM_ERROR("Unknown placement %d\n", mem->mem_type); - return -EINVAL; - } - - return 0; + addr = mm_node->start << PAGE_SHIFT; + addr += bo->bdev->man[mem->mem_type].gpu_offset; + return addr; } static int amdgpu_move_blit(struct ttm_buffer_object *bo, @@ -298,18 +284,25 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, return -EINVAL; } - old_mm = old_mem->mm_node; - r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start); - if (r) - return r; - old_size = old_mm->size; + if (old_mem->mem_type == TTM_PL_TT) { + r = amdgpu_ttm_bind(bo, old_mem); + if (r) + return r; + } + old_mm = old_mem->mm_node; + old_size = old_mm->size; + old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem); + + if (new_mem->mem_type == TTM_PL_TT) { + r = amdgpu_ttm_bind(bo, new_mem); + if (r) + return r; + } new_mm = new_mem->mm_node; - r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start); - if (r) - return r; new_size = new_mm->size; + new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem); num_pages = new_mem->num_pages; while (num_pages) { @@ -331,10 +324,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, old_size -= cur_pages; if (!old_size) { - r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem, - &old_start); - if (r) - goto error; + old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem); old_size = old_mm->size; } else { old_start += cur_pages * PAGE_SIZE; @@ -342,11 +332,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, new_size -= cur_pages; if (!new_size) { - r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem, - &new_start); - if (r) - goto error; - + new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem); new_size = new_mm->size; } else { new_start += cur_pages * PAGE_SIZE; @@ -1347,6 +1333,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, return -EINVAL; } + if (bo->tbo.mem.mem_type == TTM_PL_TT) { + r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); + if (r) + return r; + } + num_pages = bo->tbo.num_pages; mm_node = bo->tbo.mem.mm_node; num_loops = 0; @@ -1382,11 +1374,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t byte_count = mm_node->size << PAGE_SHIFT; uint64_t dst_addr; - r = amdgpu_mm_node_addr(&bo->tbo, mm_node, - &bo->tbo.mem, &dst_addr); - if (r) - return r; - + dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); while (byte_count) { uint32_t cur_size_in_bytes = min(byte_count, max_bytes); From 98a7f88ce9a9ed26cea939558f33e3d483cfb4f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 30 Jun 2017 10:41:07 +0200 Subject: [PATCH 0269/1795] drm/amdgpu: bind BOs with GTT space allocated directly v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This avoids binding them later on. v2: fix typo in function name Signed-off-by: Christian König Reviewed-by: Alex Deucher Acked-by: Felix Kuehling --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 16 ++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 49 +++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 + 3 files changed, 46 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index f7d22c44034d..1ef625550442 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -80,6 +80,20 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) return 0; } +/** + * amdgpu_gtt_mgr_is_allocated - Check if mem has address space + * + * @mem: the mem object to check + * + * Check if a mem object has already address space allocated. + */ +bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem) +{ + struct drm_mm_node *node = mem->mm_node; + + return (node->start != AMDGPU_BO_INVALID_OFFSET); +} + /** * amdgpu_gtt_mgr_alloc - allocate new ranges * @@ -101,7 +115,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, unsigned long fpfn, lpfn; int r; - if (node->start != AMDGPU_BO_INVALID_OFFSET) + if (amdgpu_gtt_mgr_is_allocated(mem)) return 0; if (place) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e97dfe888d55..7064d31f0be5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -681,6 +681,31 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) sg_free_table(ttm->sg); } +static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + uint64_t flags; + int r; + + spin_lock(>t->adev->gtt_list_lock); + flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem); + gtt->offset = (u64)mem->start << PAGE_SHIFT; + r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, + ttm->pages, gtt->ttm.dma_address, flags); + + if (r) { + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + ttm->num_pages, gtt->offset); + goto error_gart_bind; + } + + list_add_tail(>t->list, >t->adev->gtt_list); +error_gart_bind: + spin_unlock(>t->adev->gtt_list_lock); + return r; + +} + static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { @@ -704,7 +729,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; - return 0; + if (amdgpu_gtt_mgr_is_allocated(bo_mem)) + r = amdgpu_ttm_do_bind(ttm, bo_mem); + + return r; } bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) @@ -717,8 +745,6 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) { struct ttm_tt *ttm = bo->ttm; - struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; - uint64_t flags; int r; if (!ttm || amdgpu_ttm_is_bound(ttm)) @@ -731,22 +757,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) return r; } - spin_lock(>t->adev->gtt_list_lock); - flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); - gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; - r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); - - if (r) { - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", - ttm->num_pages, gtt->offset); - goto error_gart_bind; - } - - list_add_tail(>t->list, >t->adev->gtt_list); -error_gart_bind: - spin_unlock(>t->adev->gtt_list_lock); - return r; + return amdgpu_ttm_do_bind(ttm, bo_mem); } int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index cd5bbfa2773f..776a20ae40c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -56,6 +56,7 @@ struct amdgpu_mman { extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; +bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, From 7504938f8e733053c8cf17dd7ac1f28053b8126a Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 8 Jun 2017 09:32:38 +0800 Subject: [PATCH 0270/1795] drm/amdgpu: add check when no firmware need to load Signed-off-by: Huang Rui Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 4f50eeb65855..17a935df8e1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -377,6 +377,11 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) struct amdgpu_firmware_info *ucode = NULL; const struct common_firmware_header *header = NULL; + if (!adev->firmware.fw_size) { + dev_warn(adev->dev, "No ip firmware need to load\n"); + return 0; + } + err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, @@ -459,6 +464,9 @@ int amdgpu_ucode_fini_bo(struct amdgpu_device *adev) int i; struct amdgpu_firmware_info *ucode = NULL; + if (!adev->firmware.fw_size) + return 0; + for (i = 0; i < adev->firmware.max_ucodes; i++) { ucode = &adev->firmware.ucode[i]; if (ucode->fw) { From a69c7e0138fcb74d3abd5d3760358ba8aa945bda Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 29 Jun 2017 18:27:38 +0800 Subject: [PATCH 0271/1795] drm/amdgpu: update pctl1 ram index/data for mmhub on raven Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Reviewed-by: Junwei Zhang --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 89 +++++++++++++------------ 1 file changed, 48 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 9804318f3488..01918dc5dc55 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -245,28 +245,28 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) } struct pctl_data { - uint32_t index; - uint32_t data; + uint32_t index; + uint32_t data; }; const struct pctl_data pctl0_data[] = { - {0x0, 0x7a640}, - {0x9, 0x2a64a}, - {0xd, 0x2a680}, - {0x11, 0x6a684}, - {0x19, 0xea68e}, - {0x29, 0xa69e}, - {0x2b, 0x34a6c0}, - {0x61, 0x83a707}, - {0xe6, 0x8a7a4}, - {0xf0, 0x1a7b8}, - {0xf3, 0xfa7cc}, - {0x104, 0x17a7dd}, - {0x11d, 0xa7dc}, - {0x11f, 0x12a7f5}, - {0x133, 0xa808}, - {0x135, 0x12a810}, - {0x149, 0x7a82c} + {0x0, 0x7a640}, + {0x9, 0x2a64a}, + {0xd, 0x2a680}, + {0x11, 0x6a684}, + {0x19, 0xea68e}, + {0x29, 0xa69e}, + {0x2b, 0x34a6c0}, + {0x61, 0x83a707}, + {0xe6, 0x8a7a4}, + {0xf0, 0x1a7b8}, + {0xf3, 0xfa7cc}, + {0x104, 0x17a7dd}, + {0x11d, 0xa7dc}, + {0x11f, 0x12a7f5}, + {0x133, 0xa808}, + {0x135, 0x12a810}, + {0x149, 0x7a82c} }; #define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0])) @@ -275,31 +275,38 @@ const struct pctl_data pctl0_data[] = { #define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833 const struct pctl_data pctl1_data[] = { - {0x0, 0x39a000}, - {0x3b, 0x44a040}, - {0x81, 0x2a08d}, - {0x85, 0x6ba094}, - {0xf2, 0x18a100}, - {0x10c, 0x4a132}, - {0x112, 0xca141}, - {0x120, 0x2fa158}, - {0x151, 0x17a1d0}, - {0x16a, 0x1a1e9}, - {0x16d, 0x13a1ec}, - {0x182, 0x7a201}, - {0x18b, 0x3a20a}, - {0x190, 0x7a580}, - {0x199, 0xa590}, - {0x19b, 0x4a594}, - {0x1a1, 0x1a59c}, - {0x1a4, 0x7a82c}, - {0x1ad, 0xfa7cc}, - {0x1be, 0x17a7dd}, - {0x1d7, 0x12a810} + {0x0, 0x39a000}, + {0x3b, 0x44a040}, + {0x81, 0x2a08d}, + {0x85, 0x6ba094}, + {0xf2, 0x18a100}, + {0x10c, 0x4a132}, + {0x112, 0xca141}, + {0x120, 0x2fa158}, + {0x151, 0x17a1d0}, + {0x16a, 0x1a1e9}, + {0x16d, 0x13a1ec}, + {0x182, 0x7a201}, + {0x18b, 0x3a20a}, + {0x190, 0x7a580}, + {0x199, 0xa590}, + {0x19b, 0x4a594}, + {0x1a1, 0x1a59c}, + {0x1a4, 0x7a82c}, + {0x1ad, 0xfa7cc}, + {0x1be, 0x17a7dd}, + {0x1d7, 0x12a810}, + {0x1eb, 0x4000a7e1}, + {0x1ec, 0x5000a7f5}, + {0x1ed, 0x4000a7e2}, + {0x1ee, 0x5000a7dc}, + {0x1ef, 0x4000a7e3}, + {0x1f0, 0x5000a7f6}, + {0x1f1, 0x5000a7e4} }; #define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0])) -#define PCTL1_RENG_EXEC_END_PTR 0x1ea +#define PCTL1_RENG_EXEC_END_PTR 0x1f1 #define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000 #define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d #define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE 0xa580 From eabd76cef9002697979794826e625b9868e697b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolai=20H=C3=A4hnle?= Date: Tue, 13 Jun 2017 22:12:38 +0200 Subject: [PATCH 0272/1795] drm/amd/sched: print sched job id in amd_sched_job trace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes it easier to correlate amd_sched_job with with other trace points that don't log the job pointer. v2: don't print the sched_job pointer (Andres) Signed-off-by: Nicolai Hähnle Reviewed-by: Andres Rodriguez Reviewed-by: Christian König --- drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index dbd4fd3a810b..8bd38102b58e 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -16,16 +16,16 @@ TRACE_EVENT(amd_sched_job, TP_ARGS(sched_job), TP_STRUCT__entry( __field(struct amd_sched_entity *, entity) - __field(struct amd_sched_job *, sched_job) __field(struct dma_fence *, fence) __field(const char *, name) + __field(uint64_t, id) __field(u32, job_count) __field(int, hw_job_count) ), TP_fast_assign( __entry->entity = sched_job->s_entity; - __entry->sched_job = sched_job; + __entry->id = sched_job->id; __entry->fence = &sched_job->s_fence->finished; __entry->name = sched_job->sched->name; __entry->job_count = kfifo_len( @@ -33,8 +33,9 @@ TRACE_EVENT(amd_sched_job, __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ), - TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d", - __entry->entity, __entry->sched_job, __entry->fence, __entry->name, + TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->id, + __entry->fence, __entry->name, __entry->job_count, __entry->hw_job_count) ); From 5327dd8acf05a48b01804af9e4fad90054c6cdb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 3 Jul 2017 15:21:42 +0200 Subject: [PATCH 0273/1795] drm/amdgpu: remove stale TODO comment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That is already fixed. Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index c34cf2c1ae4e..a85e75327456 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -951,7 +951,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) size = bo->mem.num_pages << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT; - /* TODO: figure out how to map scattered VRAM to the CPU */ if ((offset + size) <= adev->mc.visible_vram_size) return 0; From 663ebbf63180ec6fb67d732ff3a8335c9d266c00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 28 Jun 2017 11:06:52 +0200 Subject: [PATCH 0274/1795] drm/amdgpu: trace VM flags as 64bits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise the upper bits are lost. Signed-off-by: Christian König Reviewed-by: Alex Xie Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 8601904e670a..509f7a63d40c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -224,7 +224,7 @@ TRACE_EVENT(amdgpu_vm_bo_map, __field(long, start) __field(long, last) __field(u64, offset) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( @@ -234,7 +234,7 @@ TRACE_EVENT(amdgpu_vm_bo_map, __entry->offset = mapping->offset; __entry->flags = mapping->flags; ), - TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", + TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx", __entry->bo, __entry->start, __entry->last, __entry->offset, __entry->flags) ); @@ -248,7 +248,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, __field(long, start) __field(long, last) __field(u64, offset) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( @@ -258,7 +258,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, __entry->offset = mapping->offset; __entry->flags = mapping->flags; ), - TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", + TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx", __entry->bo, __entry->start, __entry->last, __entry->offset, __entry->flags) ); @@ -269,7 +269,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping, TP_STRUCT__entry( __field(u64, soffset) __field(u64, eoffset) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( @@ -277,7 +277,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping, __entry->eoffset = mapping->last + 1; __entry->flags = mapping->flags; ), - TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", + TP_printk("soffs=%010llx, eoffs=%010llx, flags=%llx", __entry->soffset, __entry->eoffset, __entry->flags) ); @@ -293,14 +293,14 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping, TRACE_EVENT(amdgpu_vm_set_ptes, TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags), + uint32_t incr, uint64_t flags), TP_ARGS(pe, addr, count, incr, flags), TP_STRUCT__entry( __field(u64, pe) __field(u64, addr) __field(u32, count) __field(u32, incr) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( @@ -310,7 +310,7 @@ TRACE_EVENT(amdgpu_vm_set_ptes, __entry->incr = incr; __entry->flags = flags; ), - TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u", + TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u", __entry->pe, __entry->addr, __entry->incr, __entry->flags, __entry->count) ); From 46886dbfbda2187adbe9028f4d0a190ce96200f1 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 2 Jul 2017 14:36:47 +0530 Subject: [PATCH 0275/1795] drm: radeon: radeon_ttm: constify ttm_place structures. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ttm_place are not supposed to change at runtime. All functions working with ttm_place provided by work with const ttm_place. So mark the non-const structs as const. File size before: text data bss dec hex filename 9235 344 136 9715 25f3 drivers/gpu/drm/radeon/radeon_ttm.o File size After adding 'const': text data bss dec hex filename 9267 312 136 9715 25f3 drivers/gpu/drm/radeon/radeon_ttm.o Reviewed-by: Christian König Signed-off-by: Arvind Yadav Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index faa021396da3..2804b4a15896 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -178,7 +178,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, static void radeon_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { - static struct ttm_place placements = { + static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM From c4fc445a9ded54cfbe4fca6299c0fcb7dcd0b47b Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 1 Jul 2017 15:17:01 +0530 Subject: [PATCH 0276/1795] drm: radeon: constify drm_prop_enum_list structures. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drm_prop_enum_lists are not supposed to change at runtime. All functions working with drm_prop_enum_list provided by work with const drm_prop_enum_list. So mark the non-const structs as const. File size before: text data bss dec hex filename 18276 384 0 18660 48e4 drivers/gpu/drm/radeon/radeon_display.o File size After adding 'const': text data bss dec hex filename 18660 0 0 18660 48e4 drivers/gpu/drm/radeon/radeon_display.o Reviewed-by: Christian König Signed-off-by: Arvind Yadav Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_display.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 17d3dafc8319..f339c1c10fa1 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1388,12 +1388,12 @@ static const struct drm_mode_config_funcs radeon_mode_funcs = { .output_poll_changed = radeon_output_poll_changed }; -static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = +static const struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = { { 0, "driver" }, { 1, "bios" }, }; -static struct drm_prop_enum_list radeon_tv_std_enum_list[] = +static const struct drm_prop_enum_list radeon_tv_std_enum_list[] = { { TV_STD_NTSC, "ntsc" }, { TV_STD_PAL, "pal" }, { TV_STD_PAL_M, "pal-m" }, @@ -1404,25 +1404,25 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] = { TV_STD_SECAM, "secam" }, }; -static struct drm_prop_enum_list radeon_underscan_enum_list[] = +static const struct drm_prop_enum_list radeon_underscan_enum_list[] = { { UNDERSCAN_OFF, "off" }, { UNDERSCAN_ON, "on" }, { UNDERSCAN_AUTO, "auto" }, }; -static struct drm_prop_enum_list radeon_audio_enum_list[] = +static const struct drm_prop_enum_list radeon_audio_enum_list[] = { { RADEON_AUDIO_DISABLE, "off" }, { RADEON_AUDIO_ENABLE, "on" }, { RADEON_AUDIO_AUTO, "auto" }, }; /* XXX support different dither options? spatial, temporal, both, etc. */ -static struct drm_prop_enum_list radeon_dither_enum_list[] = +static const struct drm_prop_enum_list radeon_dither_enum_list[] = { { RADEON_FMT_DITHER_DISABLE, "off" }, { RADEON_FMT_DITHER_ENABLE, "on" }, }; -static struct drm_prop_enum_list radeon_output_csc_enum_list[] = +static const struct drm_prop_enum_list radeon_output_csc_enum_list[] = { { RADEON_OUTPUT_CSC_BYPASS, "bypass" }, { RADEON_OUTPUT_CSC_TVRGB, "tvrgb" }, { RADEON_OUTPUT_CSC_YCBCR601, "ycbcr601" }, From 1aaa56029f47e16f9d03e25a313677f16212ad72 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 2 Jul 2017 14:43:58 +0530 Subject: [PATCH 0277/1795] drm: amd: amdgpu: constify ttm_place structures. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ttm_place are not supposed to change at runtime. All functions working with ttm_place provided by work with const ttm_place. So mark the non-const structs as const. Reviewed-by: Christian König Signed-off-by: Arvind Yadav Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7064d31f0be5..ace178b393dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -186,7 +186,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - static struct ttm_place placements = { + static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM From 3164cba317c235f950c861928b290dd93b30d821 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 30 Jun 2017 17:00:01 -0400 Subject: [PATCH 0278/1795] drm/amdgpu/atombios: use bios_scratch_reg_offset for atombios MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Align with the atomfirmware code. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 22 +++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1e8e1123ddf4..8e7a7b9baa1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1686,7 +1686,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock) { uint32_t bios_6_scratch; - bios_6_scratch = RREG32(mmBIOS_SCRATCH_6); + bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6); if (lock) { bios_6_scratch |= ATOM_S6_CRITICAL_STATE; @@ -1696,15 +1696,17 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock) bios_6_scratch |= ATOM_S6_ACC_MODE; } - WREG32(mmBIOS_SCRATCH_6, bios_6_scratch); + WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch); } void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev) { uint32_t bios_2_scratch, bios_6_scratch; - bios_2_scratch = RREG32(mmBIOS_SCRATCH_2); - bios_6_scratch = RREG32(mmBIOS_SCRATCH_6); + adev->bios_scratch_reg_offset = mmBIOS_SCRATCH_0; + + bios_2_scratch = RREG32(adev->bios_scratch_reg_offset + 2); + bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6); /* let the bios control the backlight */ bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; @@ -1715,8 +1717,8 @@ void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev) /* clear the vbios dpms state */ bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE; - WREG32(mmBIOS_SCRATCH_2, bios_2_scratch); - WREG32(mmBIOS_SCRATCH_6, bios_6_scratch); + WREG32(adev->bios_scratch_reg_offset + 2, bios_2_scratch); + WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch); } void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev) @@ -1724,7 +1726,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev) int i; for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - adev->bios_scratch[i] = RREG32(mmBIOS_SCRATCH_0 + i); + adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i); } void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) @@ -1738,20 +1740,20 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK; for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); + WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]); } void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung) { - u32 tmp = RREG32(mmBIOS_SCRATCH_3); + u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3); if (hung) tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; else tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; - WREG32(mmBIOS_SCRATCH_3, tmp); + WREG32(adev->bios_scratch_reg_offset + 3, tmp); } /* Atom needs data in little endian format From d05da0e24bb584baf634489810561fc3d2a83bf3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 30 Jun 2017 17:08:45 -0400 Subject: [PATCH 0279/1795] drm/amdgpu: unify some atombios/atomfirmware scratch reg functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we use a pointer to the scratch reg start offset, most of the functions were duplicated. Acked-by: Christian König Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 35 ------------------- .../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h | 4 --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 20 +++-------- drivers/gpu/drm/amd/amdgpu/soc15.c | 6 ++-- 4 files changed, 7 insertions(+), 58 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 4bdda56fccee..9ddfe34d12af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -66,41 +66,6 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) } } -void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i); -} - -void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev) -{ - int i; - - /* - * VBIOS will check ASIC_INIT_COMPLETE bit to decide if - * execute ASIC_Init posting via driver - */ - adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK; - - for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]); -} - -void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev, - bool hung) -{ - u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3); - - if (hung) - tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; - else - tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; - - WREG32(adev->bios_scratch_reg_offset + 3, tmp); -} - int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) { struct atom_context *ctx = adev->mode_info.atom_context; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index a2c3ebe22c71..907e48f6b301 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -26,10 +26,6 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); -void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev); -void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev); -void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev, - bool hung); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 88e45c6d36ea..7963c54e5d03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2393,10 +2393,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) */ amdgpu_bo_evict_vram(adev); - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_save(adev); - else - amdgpu_atombios_scratch_regs_save(adev); + amdgpu_atombios_scratch_regs_save(adev); pci_save_state(dev->pdev); if (suspend) { /* Shut down the device */ @@ -2445,10 +2442,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (r) goto unlock; } - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_restore(adev); - else - amdgpu_atombios_scratch_regs_restore(adev); + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ if (amdgpu_need_post(adev)) { @@ -2861,15 +2855,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) r = amdgpu_suspend(adev); retry: - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_save(adev); - else - amdgpu_atombios_scratch_regs_save(adev); + amdgpu_atombios_scratch_regs_save(adev); r = amdgpu_asic_reset(adev); - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_restore(adev); - else - amdgpu_atombios_scratch_regs_restore(adev); + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ amdgpu_atom_asic_init(adev->mode_info.atom_context); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index a7341d88a320..79f5e3e73e0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -25,7 +25,7 @@ #include #include #include "amdgpu.h" -#include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" #include "amdgpu_ih.h" #include "amdgpu_uvd.h" #include "amdgpu_vce.h" @@ -392,11 +392,11 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev) { - amdgpu_atomfirmware_scratch_regs_engine_hung(adev, true); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); soc15_gpu_pci_config_reset(adev); - amdgpu_atomfirmware_scratch_regs_engine_hung(adev, false); + amdgpu_atombios_scratch_regs_engine_hung(adev, false); return 0; } From 4426826c02dc367ec3c245ef5c5ca4dcdb45b4c8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 30 Jun 2017 17:21:42 -0400 Subject: [PATCH 0280/1795] drm/amdgpu/atombios: add function for whether we need asic_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check the atom scratch registers to see if asic_init is complete or not. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h | 1 + 2 files changed, 11 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 8e7a7b9baa1d..ce443586a0c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1756,6 +1756,16 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, WREG32(adev->bios_scratch_reg_offset + 3, tmp); } +bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7); + + if (tmp & ATOM_S7_ASIC_INIT_COMPLETE_MASK) + return false; + else + return true; +} + /* Atom needs data in little endian format * so swap as appropriate when copying data to * or from atom. Note that atom operates on diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 38d0fe32e5cd..b0d5d1d7fdba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -200,6 +200,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung); +bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev); void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, From 47ed4e1c93a6c73f313022369c12ddd693890715 Mon Sep 17 00:00:00 2001 From: Ken Wang Date: Tue, 4 Jul 2017 13:11:52 +0800 Subject: [PATCH 0281/1795] drm/amdgpu: add workaround for S3 issues on some vega10 boards Certain MC registers need a delay after writing them to properly update in the init sequence. Signed-off-by: Ken Wang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 15 +++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 714235f507f6..bbc7b19d2c1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1687,6 +1687,8 @@ struct amdgpu_device { bool has_hw_reset; u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; + /* record last mm index being written through WREG32*/ + unsigned long last_mm_index; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7963c54e5d03..8ce522b490c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -129,6 +129,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, { trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { + adev->last_mm_index = v; + } + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { BUG_ON(in_interrupt()); return amdgpu_virt_kiq_wreg(adev, reg, v); @@ -144,6 +148,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } } u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) @@ -158,6 +166,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { + if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { + adev->last_mm_index = v; + } if ((reg * 4) < adev->rio_mem_size) iowrite32(v, adev->rio_mem + (reg * 4)); @@ -165,6 +176,10 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } } /** From a80c929442d05bbf7f12d9b828ae59cbebbbe094 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 23 Jun 2017 15:06:37 +0800 Subject: [PATCH 0282/1795] drm/amdgpu: drop SMU_DRIVER_IF_VERSION check for some vega10 variants Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/smumgr/vega10_smumgr.c | 30 +++++++++++++++---- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 269678443862..408514c965a0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -356,6 +356,9 @@ int vega10_set_tools_address(struct pp_smumgr *smumgr) static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) { uint32_t smc_driver_if_version; + struct cgs_system_info sys_info = {0}; + uint32_t dev_id; + uint32_t rev_id; PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr, PPSMC_MSG_GetDriverIfVersion), @@ -363,12 +366,27 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) return -EINVAL); vega10_read_arg_from_smc(smumgr, &smc_driver_if_version); - if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) { - pr_err("Your firmware(0x%x) doesn't match \ - SMU9_DRIVER_IF_VERSION(0x%x). \ - Please update your firmware!\n", - smc_driver_if_version, SMU9_DRIVER_IF_VERSION); - return -EINVAL; + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(smumgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; + cgs_query_system_info(smumgr->device, &sys_info); + rev_id = (uint32_t)sys_info.value; + + if (!((dev_id == 0x687f) && + ((rev_id == 0xc0) || + (rev_id == 0xc1) || + (rev_id == 0xc3)))) { + if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) { + pr_err("Your firmware(0x%x) doesn't match \ + SMU9_DRIVER_IF_VERSION(0x%x). \ + Please update your firmware!\n", + smc_driver_if_version, SMU9_DRIVER_IF_VERSION); + return -EINVAL; + } } return 0; From 747f6c921d0a4ec0cba76d8ef57729a959e0fd30 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 23 Jun 2017 15:08:15 +0800 Subject: [PATCH 0283/1795] drm/amdgpu: add ACG SMU firmware for other vega10 variants Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 8 +++++++- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 -- drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 3 ++- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index c0a806280257..e525de2ecb2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -719,7 +719,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, strcpy(fw_name, "amdgpu/polaris12_smc.bin"); break; case CHIP_VEGA10: - strcpy(fw_name, "amdgpu/vega10_smc.bin"); + if ((adev->pdev->device == 0x687f) && + ((adev->pdev->revision == 0xc0) || + (adev->pdev->revision == 0xc1) || + (adev->pdev->revision == 0xc3))) + strcpy(fw_name, "amdgpu/vega10_acg_smc.bin"); + else + strcpy(fw_name, "amdgpu/vega10_smc.bin"); break; default: DRM_ERROR("SMC firmware not supported\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 79f5e3e73e0b..4003cb517451 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -62,8 +62,6 @@ #include "dce_virtual.h" #include "mxgpu_ai.h" -MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); - #define mmFabricConfigAccessControl 0x0410 #define mmFabricConfigAccessControl_BASE_IDX 0 #define mmFabricConfigAccessControl_DEFAULT 0x00000000 diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index bcc61ffd13cb..0ac19cf3f987 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -43,7 +43,8 @@ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); - +MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); +MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); int smum_early_init(struct pp_instance *handle) { From b7437509525a2de463594c77dc66bf675f8f99fd Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sat, 24 Jun 2017 16:45:58 +0800 Subject: [PATCH 0284/1795] drm/amd/powerplay: add avfs profiling_info_v4_2 support on Vega10. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/atomfirmware.h | 63 +++++ .../drm/amd/powerplay/hwmgr/ppatomfwctrl.c | 235 +++++++++++++----- .../drm/amd/powerplay/hwmgr/ppatomfwctrl.h | 8 + 3 files changed, 238 insertions(+), 68 deletions(-) diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 0021a1c63356..837296db9628 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1233,6 +1233,69 @@ struct atom_asic_profiling_info_v4_1 uint32_t phyclk2gfxclk_c; }; +struct atom_asic_profiling_info_v4_2 { + struct atom_common_table_header table_header; + uint32_t maxvddc; + uint32_t minvddc; + uint32_t avfs_meannsigma_acontant0; + uint32_t avfs_meannsigma_acontant1; + uint32_t avfs_meannsigma_acontant2; + uint16_t avfs_meannsigma_dc_tol_sigma; + uint16_t avfs_meannsigma_platform_mean; + uint16_t avfs_meannsigma_platform_sigma; + uint32_t gb_vdroop_table_cksoff_a0; + uint32_t gb_vdroop_table_cksoff_a1; + uint32_t gb_vdroop_table_cksoff_a2; + uint32_t gb_vdroop_table_ckson_a0; + uint32_t gb_vdroop_table_ckson_a1; + uint32_t gb_vdroop_table_ckson_a2; + uint32_t avfsgb_fuse_table_cksoff_m1; + uint32_t avfsgb_fuse_table_cksoff_m2; + uint32_t avfsgb_fuse_table_cksoff_b; + uint32_t avfsgb_fuse_table_ckson_m1; + uint32_t avfsgb_fuse_table_ckson_m2; + uint32_t avfsgb_fuse_table_ckson_b; + uint16_t max_voltage_0_25mv; + uint8_t enable_gb_vdroop_table_cksoff; + uint8_t enable_gb_vdroop_table_ckson; + uint8_t enable_gb_fuse_table_cksoff; + uint8_t enable_gb_fuse_table_ckson; + uint16_t psm_age_comfactor; + uint8_t enable_apply_avfs_cksoff_voltage; + uint8_t reserved; + uint32_t dispclk2gfxclk_a; + uint32_t dispclk2gfxclk_b; + uint32_t dispclk2gfxclk_c; + uint32_t pixclk2gfxclk_a; + uint32_t pixclk2gfxclk_b; + uint32_t pixclk2gfxclk_c; + uint32_t dcefclk2gfxclk_a; + uint32_t dcefclk2gfxclk_b; + uint32_t dcefclk2gfxclk_c; + uint32_t phyclk2gfxclk_a; + uint32_t phyclk2gfxclk_b; + uint32_t phyclk2gfxclk_c; + uint32_t acg_gb_vdroop_table_a0; + uint32_t acg_gb_vdroop_table_a1; + uint32_t acg_gb_vdroop_table_a2; + uint32_t acg_avfsgb_fuse_table_m1; + uint32_t acg_avfsgb_fuse_table_m2; + uint32_t acg_avfsgb_fuse_table_b; + uint8_t enable_acg_gb_vdroop_table; + uint8_t enable_acg_gb_fuse_table; + uint32_t acg_dispclk2gfxclk_a; + uint32_t acg_dispclk2gfxclk_b; + uint32_t acg_dispclk2gfxclk_c; + uint32_t acg_pixclk2gfxclk_a; + uint32_t acg_pixclk2gfxclk_b; + uint32_t acg_pixclk2gfxclk_c; + uint32_t acg_dcefclk2gfxclk_a; + uint32_t acg_dcefclk2gfxclk_b; + uint32_t acg_dcefclk2gfxclk_c; + uint32_t acg_phyclk2gfxclk_a; + uint32_t acg_phyclk2gfxclk_b; + uint32_t acg_phyclk2gfxclk_c; +}; /* *************************************************************************** diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index 720d5006ff62..cd33eb179db2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -276,7 +276,10 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_avfs_parameters *param) { uint16_t idx; + uint8_t format_revision, content_revision; + struct atom_asic_profiling_info_v4_1 *profile; + struct atom_asic_profiling_info_v4_2 *profile_v4_2; idx = GetIndexIntoMasterDataTable(asic_profiling_info); profile = (struct atom_asic_profiling_info_v4_1 *) @@ -286,76 +289,172 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, if (!profile) return -1; - param->ulMaxVddc = le32_to_cpu(profile->maxvddc); - param->ulMinVddc = le32_to_cpu(profile->minvddc); - param->ulMeanNsigmaAcontant0 = - le32_to_cpu(profile->avfs_meannsigma_acontant0); - param->ulMeanNsigmaAcontant1 = - le32_to_cpu(profile->avfs_meannsigma_acontant1); - param->ulMeanNsigmaAcontant2 = - le32_to_cpu(profile->avfs_meannsigma_acontant2); - param->usMeanNsigmaDcTolSigma = - le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma); - param->usMeanNsigmaPlatformMean = - le16_to_cpu(profile->avfs_meannsigma_platform_mean); - param->usMeanNsigmaPlatformSigma = - le16_to_cpu(profile->avfs_meannsigma_platform_sigma); - param->ulGbVdroopTableCksoffA0 = - le32_to_cpu(profile->gb_vdroop_table_cksoff_a0); - param->ulGbVdroopTableCksoffA1 = - le32_to_cpu(profile->gb_vdroop_table_cksoff_a1); - param->ulGbVdroopTableCksoffA2 = - le32_to_cpu(profile->gb_vdroop_table_cksoff_a2); - param->ulGbVdroopTableCksonA0 = - le32_to_cpu(profile->gb_vdroop_table_ckson_a0); - param->ulGbVdroopTableCksonA1 = - le32_to_cpu(profile->gb_vdroop_table_ckson_a1); - param->ulGbVdroopTableCksonA2 = - le32_to_cpu(profile->gb_vdroop_table_ckson_a2); - param->ulGbFuseTableCksoffM1 = - le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1); - param->ulGbFuseTableCksoffM2 = - le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m2); - param->ulGbFuseTableCksoffB = - le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b); - param->ulGbFuseTableCksonM1 = - le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1); - param->ulGbFuseTableCksonM2 = - le32_to_cpu(profile->avfsgb_fuse_table_ckson_m2); - param->ulGbFuseTableCksonB = - le32_to_cpu(profile->avfsgb_fuse_table_ckson_b); + format_revision = ((struct atom_common_table_header *)profile)->format_revision; + content_revision = ((struct atom_common_table_header *)profile)->content_revision; - param->ucEnableGbVdroopTableCkson = - profile->enable_gb_vdroop_table_ckson; - param->ucEnableGbFuseTableCkson = - profile->enable_gb_fuse_table_ckson; - param->usPsmAgeComfactor = - le16_to_cpu(profile->psm_age_comfactor); + if (format_revision == 4 && content_revision == 1) { + param->ulMaxVddc = le32_to_cpu(profile->maxvddc); + param->ulMinVddc = le32_to_cpu(profile->minvddc); + param->ulMeanNsigmaAcontant0 = + le32_to_cpu(profile->avfs_meannsigma_acontant0); + param->ulMeanNsigmaAcontant1 = + le32_to_cpu(profile->avfs_meannsigma_acontant1); + param->ulMeanNsigmaAcontant2 = + le32_to_cpu(profile->avfs_meannsigma_acontant2); + param->usMeanNsigmaDcTolSigma = + le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma); + param->usMeanNsigmaPlatformMean = + le16_to_cpu(profile->avfs_meannsigma_platform_mean); + param->usMeanNsigmaPlatformSigma = + le16_to_cpu(profile->avfs_meannsigma_platform_sigma); + param->ulGbVdroopTableCksoffA0 = + le32_to_cpu(profile->gb_vdroop_table_cksoff_a0); + param->ulGbVdroopTableCksoffA1 = + le32_to_cpu(profile->gb_vdroop_table_cksoff_a1); + param->ulGbVdroopTableCksoffA2 = + le32_to_cpu(profile->gb_vdroop_table_cksoff_a2); + param->ulGbVdroopTableCksonA0 = + le32_to_cpu(profile->gb_vdroop_table_ckson_a0); + param->ulGbVdroopTableCksonA1 = + le32_to_cpu(profile->gb_vdroop_table_ckson_a1); + param->ulGbVdroopTableCksonA2 = + le32_to_cpu(profile->gb_vdroop_table_ckson_a2); + param->ulGbFuseTableCksoffM1 = + le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1); + param->ulGbFuseTableCksoffM2 = + le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m2); + param->ulGbFuseTableCksoffB = + le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b); + param->ulGbFuseTableCksonM1 = + le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1); + param->ulGbFuseTableCksonM2 = + le32_to_cpu(profile->avfsgb_fuse_table_ckson_m2); + param->ulGbFuseTableCksonB = + le32_to_cpu(profile->avfsgb_fuse_table_ckson_b); - param->ulDispclk2GfxclkM1 = - le32_to_cpu(profile->dispclk2gfxclk_a); - param->ulDispclk2GfxclkM2 = - le32_to_cpu(profile->dispclk2gfxclk_b); - param->ulDispclk2GfxclkB = - le32_to_cpu(profile->dispclk2gfxclk_c); - param->ulDcefclk2GfxclkM1 = - le32_to_cpu(profile->dcefclk2gfxclk_a); - param->ulDcefclk2GfxclkM2 = - le32_to_cpu(profile->dcefclk2gfxclk_b); - param->ulDcefclk2GfxclkB = - le32_to_cpu(profile->dcefclk2gfxclk_c); - param->ulPixelclk2GfxclkM1 = - le32_to_cpu(profile->pixclk2gfxclk_a); - param->ulPixelclk2GfxclkM2 = - le32_to_cpu(profile->pixclk2gfxclk_b); - param->ulPixelclk2GfxclkB = - le32_to_cpu(profile->pixclk2gfxclk_c); - param->ulPhyclk2GfxclkM1 = - le32_to_cpu(profile->phyclk2gfxclk_a); - param->ulPhyclk2GfxclkM2 = - le32_to_cpu(profile->phyclk2gfxclk_b); - param->ulPhyclk2GfxclkB = - le32_to_cpu(profile->phyclk2gfxclk_c); + param->ucEnableGbVdroopTableCkson = + profile->enable_gb_vdroop_table_ckson; + param->ucEnableGbFuseTableCkson = + profile->enable_gb_fuse_table_ckson; + param->usPsmAgeComfactor = + le16_to_cpu(profile->psm_age_comfactor); + + param->ulDispclk2GfxclkM1 = + le32_to_cpu(profile->dispclk2gfxclk_a); + param->ulDispclk2GfxclkM2 = + le32_to_cpu(profile->dispclk2gfxclk_b); + param->ulDispclk2GfxclkB = + le32_to_cpu(profile->dispclk2gfxclk_c); + param->ulDcefclk2GfxclkM1 = + le32_to_cpu(profile->dcefclk2gfxclk_a); + param->ulDcefclk2GfxclkM2 = + le32_to_cpu(profile->dcefclk2gfxclk_b); + param->ulDcefclk2GfxclkB = + le32_to_cpu(profile->dcefclk2gfxclk_c); + param->ulPixelclk2GfxclkM1 = + le32_to_cpu(profile->pixclk2gfxclk_a); + param->ulPixelclk2GfxclkM2 = + le32_to_cpu(profile->pixclk2gfxclk_b); + param->ulPixelclk2GfxclkB = + le32_to_cpu(profile->pixclk2gfxclk_c); + param->ulPhyclk2GfxclkM1 = + le32_to_cpu(profile->phyclk2gfxclk_a); + param->ulPhyclk2GfxclkM2 = + le32_to_cpu(profile->phyclk2gfxclk_b); + param->ulPhyclk2GfxclkB = + le32_to_cpu(profile->phyclk2gfxclk_c); + param->ulAcgGbVdroopTableA0 = 0; + param->ulAcgGbVdroopTableA1 = 0; + param->ulAcgGbVdroopTableA2 = 0; + param->ulAcgGbFuseTableM1 = 0; + param->ulAcgGbFuseTableM2 = 0; + param->ulAcgGbFuseTableB = 0; + param->ucAcgEnableGbVdroopTable = 0; + param->ucAcgEnableGbFuseTable = 0; + } else if (format_revision == 4 && content_revision == 2) { + profile_v4_2 = (struct atom_asic_profiling_info_v4_2 *)profile; + param->ulMaxVddc = le32_to_cpu(profile_v4_2->maxvddc); + param->ulMinVddc = le32_to_cpu(profile_v4_2->minvddc); + param->ulMeanNsigmaAcontant0 = + le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant0); + param->ulMeanNsigmaAcontant1 = + le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant1); + param->ulMeanNsigmaAcontant2 = + le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant2); + param->usMeanNsigmaDcTolSigma = + le16_to_cpu(profile_v4_2->avfs_meannsigma_dc_tol_sigma); + param->usMeanNsigmaPlatformMean = + le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_mean); + param->usMeanNsigmaPlatformSigma = + le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_sigma); + param->ulGbVdroopTableCksoffA0 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a0); + param->ulGbVdroopTableCksoffA1 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a1); + param->ulGbVdroopTableCksoffA2 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a2); + param->ulGbVdroopTableCksonA0 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a0); + param->ulGbVdroopTableCksonA1 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a1); + param->ulGbVdroopTableCksonA2 = + le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a2); + param->ulGbFuseTableCksoffM1 = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m1); + param->ulGbFuseTableCksoffM2 = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m2); + param->ulGbFuseTableCksoffB = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_b); + param->ulGbFuseTableCksonM1 = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m1); + param->ulGbFuseTableCksonM2 = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m2); + param->ulGbFuseTableCksonB = + le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_b); + + param->ucEnableGbVdroopTableCkson = + profile_v4_2->enable_gb_vdroop_table_ckson; + param->ucEnableGbFuseTableCkson = + profile_v4_2->enable_gb_fuse_table_ckson; + param->usPsmAgeComfactor = + le16_to_cpu(profile_v4_2->psm_age_comfactor); + + param->ulDispclk2GfxclkM1 = + le32_to_cpu(profile_v4_2->dispclk2gfxclk_a); + param->ulDispclk2GfxclkM2 = + le32_to_cpu(profile_v4_2->dispclk2gfxclk_b); + param->ulDispclk2GfxclkB = + le32_to_cpu(profile_v4_2->dispclk2gfxclk_c); + param->ulDcefclk2GfxclkM1 = + le32_to_cpu(profile_v4_2->dcefclk2gfxclk_a); + param->ulDcefclk2GfxclkM2 = + le32_to_cpu(profile_v4_2->dcefclk2gfxclk_b); + param->ulDcefclk2GfxclkB = + le32_to_cpu(profile_v4_2->dcefclk2gfxclk_c); + param->ulPixelclk2GfxclkM1 = + le32_to_cpu(profile_v4_2->pixclk2gfxclk_a); + param->ulPixelclk2GfxclkM2 = + le32_to_cpu(profile_v4_2->pixclk2gfxclk_b); + param->ulPixelclk2GfxclkB = + le32_to_cpu(profile_v4_2->pixclk2gfxclk_c); + param->ulPhyclk2GfxclkM1 = + le32_to_cpu(profile->phyclk2gfxclk_a); + param->ulPhyclk2GfxclkM2 = + le32_to_cpu(profile_v4_2->phyclk2gfxclk_b); + param->ulPhyclk2GfxclkB = + le32_to_cpu(profile_v4_2->phyclk2gfxclk_c); + param->ulAcgGbVdroopTableA0 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a0); + param->ulAcgGbVdroopTableA1 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a1); + param->ulAcgGbVdroopTableA2 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a2); + param->ulAcgGbFuseTableM1 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m1); + param->ulAcgGbFuseTableM2 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m2); + param->ulAcgGbFuseTableB = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_b); + param->ucAcgEnableGbVdroopTable = le32_to_cpu(profile_v4_2->enable_acg_gb_vdroop_table); + param->ucAcgEnableGbFuseTable = le32_to_cpu(profile_v4_2->enable_acg_gb_fuse_table); + } else { + pr_info("Invalid VBIOS AVFS ProfilingInfo Revision!\n"); + return -EINVAL; + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index 81908b5cfd5f..8e6b1f0ddebc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -109,6 +109,14 @@ struct pp_atomfwctrl_avfs_parameters { uint32_t ulPhyclk2GfxclkM1; uint32_t ulPhyclk2GfxclkM2; uint32_t ulPhyclk2GfxclkB; + uint32_t ulAcgGbVdroopTableA0; + uint32_t ulAcgGbVdroopTableA1; + uint32_t ulAcgGbVdroopTableA2; + uint32_t ulAcgGbFuseTableM1; + uint32_t ulAcgGbFuseTableM2; + uint32_t ulAcgGbFuseTableB; + uint32_t ucAcgEnableGbVdroopTable; + uint32_t ucAcgEnableGbFuseTable; }; struct pp_atomfwctrl_gpio_parameters { From 3272cfcf73b9e0932a037ed711347ce9dc97c16e Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sat, 24 Jun 2017 18:27:07 +0800 Subject: [PATCH 0285/1795] drm/amd/powerplay: export ACG related smu message for vega10 Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smu9.h | 13 +++++++------ drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h | 4 ++++ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9.h b/drivers/gpu/drm/amd/powerplay/inc/smu9.h index 9ef2490c7c2e..550ed675027a 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu9.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu9.h @@ -55,9 +55,9 @@ #define FEATURE_FW_CTF_BIT 23 #define FEATURE_LED_DISPLAY_BIT 24 #define FEATURE_FAN_CONTROL_BIT 25 -#define FEATURE_VOLTAGE_CONTROLLER_BIT 26 -#define FEATURE_SPARE_27_BIT 27 -#define FEATURE_SPARE_28_BIT 28 +#define FEATURE_FAST_PPT_BIT 26 +#define FEATURE_GFX_EDC_BIT 27 +#define FEATURE_ACG_BIT 28 #define FEATURE_SPARE_29_BIT 29 #define FEATURE_SPARE_30_BIT 30 #define FEATURE_SPARE_31_BIT 31 @@ -90,9 +90,10 @@ #define FFEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) #define FFEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT ) #define FFEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) -#define FFEATURE_VOLTAGE_CONTROLLER_MASK (1 << FEATURE_VOLTAGE_CONTROLLER_BIT ) -#define FFEATURE_SPARE_27_MASK (1 << FEATURE_SPARE_27_BIT ) -#define FFEATURE_SPARE_28_MASK (1 << FEATURE_SPARE_28_BIT ) + +#define FEATURE_FAST_PPT_MASK (1 << FAST_PPT_BIT ) +#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT ) +#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT ) #define FFEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT ) #define FFEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT ) #define FFEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT ) diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h index b4af9e85dfa5..cb070ebc7de1 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h @@ -124,6 +124,10 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_NumOfDisplays 0x56 #define PPSMC_MSG_ReadSerialNumTop32 0x58 #define PPSMC_MSG_ReadSerialNumBottom32 0x59 +#define PPSMC_MSG_RunAcgBtc 0x5C +#define PPSMC_MSG_RunAcgInClosedLoop 0x5D +#define PPSMC_MSG_RunAcgInOpenLoop 0x5E +#define PPSMC_MSG_InitializeAcg 0x5F #define PPSMC_MSG_GetCurrPkgPwr 0x61 #define PPSMC_Message_Count 0x62 From fc3a4fc6317a807002d4f3b6a8af50983feb98cd Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sat, 24 Jun 2017 18:11:53 +0800 Subject: [PATCH 0286/1795] drm/amd/powerplay: add acg support in pptable for vega10 Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h index 532186b6f941..f6d6c61f796a 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h @@ -312,7 +312,10 @@ typedef struct { PllSetting_t GfxBoostState; - uint32_t Reserved[14]; + uint8_t AcgEnable[NUM_GFXCLK_DPM_LEVELS]; + GbVdroopTable_t AcgBtcGbVdroopTable; + QuadraticInt_t AcgAvfsGb; + uint32_t Reserved[4]; /* Padding - ignore */ uint32_t MmHubPadding[7]; /* SMU internal use */ From bdb8cd10b99d55d35b869fbf5cd6df9420b9bd85 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sat, 24 Jun 2017 18:13:26 +0800 Subject: [PATCH 0287/1795] drm/amd/powerplay: enable ACG feature on vega10. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 78 ++++++++++++++++++- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.h | 6 +- 2 files changed, 80 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index d6f097f44b6c..b14ea319bd95 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -321,8 +321,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) FEATURE_LED_DISPLAY_BIT; data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = FEATURE_FAN_CONTROL_BIT; - data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id = - FEATURE_VOLTAGE_CONTROLLER_BIT; + data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT; if (!data->registry_data.prefetcher_dpm_key_disabled) data->smu_features[GNLD_DPM_PREFETCHER].supported = true; @@ -386,6 +385,12 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if (data->registry_data.vr0hot_enabled) data->smu_features[GNLD_VR0HOT].supported = true; + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetSmuVersion); + vega10_read_arg_from_smc(hwmgr->smumgr, &(data->smu_version)); + /* ACG firmware has major version 5 */ + if ((data->smu_version & 0xff000000) == 0x5000000) + data->smu_features[GNLD_ACG].supported = true; + } #ifdef PPLIB_VEGA10_EVV_SUPPORT @@ -2228,6 +2233,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24; pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12; pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12; + + pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0; + pp_table->AcgBtcGbVdroopTable.a0_shift = 20; + pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1; + pp_table->AcgBtcGbVdroopTable.a1_shift = 20; + pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2; + pp_table->AcgBtcGbVdroopTable.a2_shift = 20; + + pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1; + pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2; + pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB; + pp_table->AcgAvfsGb.m1_shift = 0; + pp_table->AcgAvfsGb.m2_shift = 0; + pp_table->AcgAvfsGb.b_shift = 0; + } else { data->smu_features[GNLD_AVFS].supported = false; } @@ -2236,6 +2256,55 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) return 0; } +static int vega10_acg_enable(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t agc_btc_response; + + if (data->smu_features[GNLD_ACG].supported) { + if (0 == vega10_enable_smc_features(hwmgr->smumgr, true, + data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap)) + data->smu_features[GNLD_DPM_PREFETCHER].enabled = true; + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg); + + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc); + vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);; + + if (1 == agc_btc_response) { + if (1 == data->acg_loop_state) + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInClosedLoop); + else if (2 == data->acg_loop_state) + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInOpenLoop); + if (0 == vega10_enable_smc_features(hwmgr->smumgr, true, + data->smu_features[GNLD_ACG].smu_feature_bitmap)) + data->smu_features[GNLD_ACG].enabled = true; + } else { + pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n"); + data->smu_features[GNLD_ACG].enabled = false; + } + } + + return 0; +} + +static int vega10_acg_disable(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_ACG].supported) { + if (data->smu_features[GNLD_ACG].enabled) { + if (0 == vega10_enable_smc_features(hwmgr->smumgr, false, + data->smu_features[GNLD_ACG].smu_feature_bitmap)) + data->smu_features[GNLD_ACG].enabled = false; + } + } + + return 0; +} + static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = @@ -2506,7 +2575,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) result = vega10_avfs_enable(hwmgr, true); PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!", return result); - + vega10_acg_enable(hwmgr); vega10_save_default_power_profile(hwmgr); return 0; @@ -4683,6 +4752,9 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to disable ulv!", result = tmp_result); + tmp_result = vega10_acg_disable(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable acg!", result = tmp_result); return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h index 6e5c5b99593b..415080174b28 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -64,7 +64,9 @@ enum { GNLD_FW_CTF, GNLD_LED_DISPLAY, GNLD_FAN_CONTROL, - GNLD_VOLTAGE_CONTROLLER, + GNLD_FEATURE_FAST_PPT_BIT, + GNLD_DIDT, + GNLD_ACG, GNLD_FEATURES_MAX }; @@ -381,6 +383,8 @@ struct vega10_hwmgr { struct vega10_smc_state_table smc_state_table; uint32_t config_telemetry; + uint32_t smu_version; + uint32_t acg_loop_state; }; #define VEGA10_DPM2_NEAR_TDP_DEC 10 From 8fdf074f1840eae838bbccbec37d0a1504ee432b Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 6 Jun 2017 17:25:13 +0800 Subject: [PATCH 0288/1795] drm/amdgpu:fix world switch hang MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit for SR-IOV, we must keep the pipeline-sync in the protection of COND_EXEC, otherwise the command consumed by CPG is not consistent when world switch triggerd, e.g.: world switch hit and the IB frame is skipped so the fence won't signal, thus CP will jump to the next DMAframe's pipeline-sync command, and it will make CP hang foever. after pipelin-sync moved into COND_EXEC the consistency can be guaranteed Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 1b30d2ab9c51..659997bfff30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -130,6 +130,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, unsigned i; int r = 0; + bool need_pipe_sync = false; if (num_ibs == 0) return -EINVAL; @@ -165,7 +166,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ring->funcs->emit_pipeline_sync && job && ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || amdgpu_vm_need_pipeline_sync(ring, job))) { - amdgpu_ring_emit_pipeline_sync(ring); + need_pipe_sync = true; dma_fence_put(tmp); } @@ -173,7 +174,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ring->funcs->insert_start(ring); if (job) { - r = amdgpu_vm_flush(ring, job); + r = amdgpu_vm_flush(ring, job, need_pipe_sync); if (r) { amdgpu_ring_undo(ring); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index cda9e5d8b831..30c4322ddce7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -743,7 +743,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) * * Emit a VM flush when it is necessary. */ -int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -765,12 +765,15 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) vm_flush_needed = true; } - if (!vm_flush_needed && !gds_switch_needed) + if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) return 0; if (ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); + if (need_pipe_sync) + amdgpu_ring_emit_pipeline_sync(ring); + if (ring->funcs->emit_vm_flush && vm_flush_needed) { struct dma_fence *fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 936f158bc5ec..3441ec58c823 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -222,7 +222,7 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct dma_fence *fence, struct amdgpu_job *job); -int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, unsigned vmid); void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev); From c708535e9ced6213b7c327eff88970e95515ec8a Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Wed, 5 Jul 2017 10:53:55 -0400 Subject: [PATCH 0289/1795] drm/amdgpu: Add WREG32_SOC15_NO_KIQ macro define Signed-off-by: Shaoyun Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15_common.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index e2d330eed952..7a8e4e28abb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -77,6 +77,13 @@ struct nbio_pcie_index_data { (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \ (ip##_BASE__INST##inst##_SEG4 + reg))))), value) +#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \ + WREG32_NO_KIQ( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \ + (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \ + (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \ + (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \ + (ip##_BASE__INST##inst##_SEG4 + reg))))), value) + #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \ WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \ (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \ From 57ea8c7b4d831caac27631329507baf2fbe094ae Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Wed, 5 Jul 2017 10:56:14 -0400 Subject: [PATCH 0290/1795] drm/amdgpu: NO KIQ usage on nbio hdp flush routine nbio hdp flush routine are called within atomic context. Avoid use KIQ when write to the HDP_MEM_COHERENCY_FLUSH_CNTL register since this register has its own VF copy Signed-off-by: Shaoyun Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c | 2 +- drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 1e272f785def..61c00281a61b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -67,7 +67,7 @@ void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable) void nbio_v6_1_hdp_flush(struct amdgpu_device *adev) { - WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); + WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); } u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index aa04632523fa..11b70d601922 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -65,7 +65,7 @@ void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable) void nbio_v7_0_hdp_flush(struct amdgpu_device *adev) { - WREG32_SOC15(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); + WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); } u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev) From edc4d3db0660624996ef4d1bbc7e8b39572fbe2e Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 2 Jun 2017 10:42:28 +0800 Subject: [PATCH 0291/1795] drm/amdgpu: remove superfluous check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Reviewed-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 15 +++++---------- drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 8 ++++---- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 4083be61b328..dc2cc28c9588 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -435,16 +435,11 @@ static int psp_hw_fini(void *handle) psp_ring_destroy(psp, PSP_RING_TYPE__KM); - if (psp->tmr_buf) - amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); - - if (psp->fw_pri_buf) - amdgpu_bo_free_kernel(&psp->fw_pri_bo, - &psp->fw_pri_mc_addr, &psp->fw_pri_buf); - - if (psp->fence_buf_bo) - amdgpu_bo_free_kernel(&psp->fence_buf_bo, - &psp->fence_buf_mc_addr, &psp->fence_buf); + amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); + amdgpu_bo_free_kernel(&psp->fw_pri_bo, + &psp->fw_pri_mc_addr, &psp->fw_pri_buf); + amdgpu_bo_free_kernel(&psp->fence_buf_bo, + &psp->fence_buf_mc_addr, &psp->fence_buf); kfree(psp->cmd); psp->cmd = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index c98d77d0c8f8..48f69fe6b9e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -341,10 +341,10 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), 0x80000000, 0x80000000, false); - if (ring->ring_mem) - amdgpu_bo_free_kernel(&adev->firmware.rbuf, - &ring->ring_mem_mc_addr, - (void **)&ring->ring_mem); + amdgpu_bo_free_kernel(&adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + return ret; } From 311146c91a2fdb4de92a0769cc1e5359a49b4c04 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Sun, 11 Jun 2017 18:28:00 +0800 Subject: [PATCH 0292/1795] drm/amdgpu: fix missed asd bo free when hw_fini MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Reviewed-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index dc2cc28c9588..e54c8fe90288 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -440,6 +440,8 @@ static int psp_hw_fini(void *handle) &psp->fw_pri_mc_addr, &psp->fw_pri_buf); amdgpu_bo_free_kernel(&psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); + amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr, + &psp->asd_shared_buf); kfree(psp->cmd); psp->cmd = NULL; From a1952da73f8951eeb6e99e2b4fd1b8680ed9d801 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Sun, 11 Jun 2017 18:57:08 +0800 Subject: [PATCH 0293/1795] drm/amdgpu: make psp cmd buffer as a reserve memory MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Acked-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 38 ++++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 5 ++++ 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index e54c8fe90288..5346f291f881 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -118,33 +118,18 @@ psp_cmd_submit_buf(struct psp_context *psp, int index) { int ret; - struct amdgpu_bo *cmd_buf_bo; - uint64_t cmd_buf_mc_addr; - struct psp_gfx_cmd_resp *cmd_buf_mem; - struct amdgpu_device *adev = psp->adev; - ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &cmd_buf_bo, &cmd_buf_mc_addr, - (void **)&cmd_buf_mem); - if (ret) - return ret; + memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); - memset(cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); + memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); - memcpy(cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); - - ret = psp_cmd_submit(psp, ucode, cmd_buf_mc_addr, + ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr, fence_mc_addr, index); while (*((unsigned int *)psp->fence_buf) != index) { msleep(1); } - amdgpu_bo_free_kernel(&cmd_buf_bo, - &cmd_buf_mc_addr, - (void **)&cmd_buf_mem); - return ret; } @@ -351,6 +336,13 @@ static int psp_load_fw(struct amdgpu_device *adev) &psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); + if (ret) + goto failed_mem2; + + ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, + (void **)&psp->cmd_buf_mem); if (ret) goto failed_mem1; @@ -358,7 +350,7 @@ static int psp_load_fw(struct amdgpu_device *adev) ret = psp_ring_init(psp, PSP_RING_TYPE__KM); if (ret) - goto failed_mem1; + goto failed_mem; ret = psp_tmr_init(psp); if (ret) @@ -379,9 +371,13 @@ static int psp_load_fw(struct amdgpu_device *adev) return 0; failed_mem: + amdgpu_bo_free_kernel(&psp->cmd_buf_bo, + &psp->cmd_buf_mc_addr, + (void **)&psp->cmd_buf_mem); +failed_mem1: amdgpu_bo_free_kernel(&psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); -failed_mem1: +failed_mem2: amdgpu_bo_free_kernel(&psp->fw_pri_bo, &psp->fw_pri_mc_addr, &psp->fw_pri_buf); failed: @@ -442,6 +438,8 @@ static int psp_hw_fini(void *handle) &psp->fence_buf_mc_addr, &psp->fence_buf); amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr, &psp->asd_shared_buf); + amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, + (void **)&psp->cmd_buf_mem); kfree(psp->cmd); psp->cmd = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 1a1c8b469f93..538fa9dbfb21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -108,6 +108,11 @@ struct psp_context struct amdgpu_bo *fence_buf_bo; uint64_t fence_buf_mc_addr; void *fence_buf; + + /* cmd buffer */ + struct amdgpu_bo *cmd_buf_bo; + uint64_t cmd_buf_mc_addr; + struct psp_gfx_cmd_resp *cmd_buf_mem; }; struct amdgpu_psp_funcs { From 342169c42214bd7682f550eccae119e85f09006c Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 4 Jul 2017 16:14:06 +0800 Subject: [PATCH 0294/1795] drm/amdgpu: set firmware loading type as direct by default for raven In previous case, driver can't enable psp via the kernel parameter for raven. We should open this path and set it as direct by default till psp firmware loading is workable. Signed-off-by: Huang Rui Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 17a935df8e1d..fcfb9d4f7477 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -275,14 +275,10 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) else return AMDGPU_FW_LOAD_PSP; case CHIP_RAVEN: -#if 0 - if (!load_type) + if (load_type != 2) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; -#else - return AMDGPU_FW_LOAD_DIRECT; -#endif default: DRM_ERROR("Unknow firmware load type\n"); } From 8882635119f3d0bd9071ea00d873014f56f37cd0 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 6 Jul 2017 09:36:27 +0800 Subject: [PATCH 0295/1795] drm/amd/powerplay: fixed wrong data type declaration for ppfeaturemask Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f8face2a1e77..d90dc426ee5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -199,7 +199,7 @@ MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); -module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444); +module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444); MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))"); module_param_named(no_evict, amdgpu_no_evict, int, 0444); From bcadae84232d42260f802152609060cc766e1166 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 15 Jun 2017 13:58:41 +0800 Subject: [PATCH 0296/1795] drm/amd/powerplay: fix avfs state update error on polaris. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 9616cedc139c..7e0347088941 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -200,15 +200,16 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr), "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled", return -1); - + smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS; break; case AVFS_BTC_DISABLED: + case AVFS_BTC_ENABLEAVFS: case AVFS_BTC_NOTSUPPORTED: break; default: - pr_info("[AVFS] Something is broken. See log!"); + pr_err("AVFS failed status is %x!\n", smu_data->avfs.avfs_btc_status); break; } From b37afd41a1f5a7d9dbc4cc6ede62e590a31e2192 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 15 Jun 2017 14:02:51 +0800 Subject: [PATCH 0297/1795] drm/amd/powerplay: refine avfs enable code on fiji. 1. simplify avfs state switch. 2. delete save/restore VFT table functions as not support by fiji. 3. implement thermal_avfs_enable funciton. Signed-off-by: Rex Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/smumgr/fiji_smc.c | 19 +++ .../gpu/drm/amd/powerplay/smumgr/fiji_smc.h | 1 + .../drm/amd/powerplay/smumgr/fiji_smumgr.c | 115 ++---------------- 3 files changed, 28 insertions(+), 107 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index 6a320b27aefd..ca24e155ef2d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -2129,6 +2129,25 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) return 0; } + +int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) +{ + int ret; + struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend); + + if (smu_data->avfs.AvfsBtcStatus != AVFS_BTC_ENABLEAVFS) + return 0; + + ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs); + + if (!ret) + /* If this param is not changed, this function could fire unnecessarily */ + smu_data->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_PREVIOUSLY; + + return ret; +} + static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h index 0e9e1f2d7238..d9c72d992e30 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h @@ -48,5 +48,6 @@ int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr); int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request); +int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index a1cb78552cf6..719e8853b0dc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -194,22 +194,10 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) int result = 0; struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED; if (priv->avfs.AvfsBtcParam) { if (!smum_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) { - if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) { - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED; - result = 0; - } else { - pr_err("[AVFS][fiji_start_avfs_btc] Attempt" - " to Enable AVFS Failed!"); - smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs); - result = -1; - } - } else { - pr_err("[AVFS][fiji_start_avfs_btc] " - "PerformBTC SMU msg failed"); + pr_err("PerformBTC SMU msg failed \n"); result = -1; } } @@ -224,42 +212,6 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) return result; } -static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) -{ - int result = 0; - uint32_t table_start; - uint32_t charz_freq_addr, inversion_voltage_addr, charz_freq; - uint16_t inversion_voltage; - - charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */ - inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */ - - PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, - SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, - PmFuseTable), &table_start, 0x40000), - "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate " - "starting address of PmFuse structure", - return -1;); - - charz_freq_addr = table_start + - offsetof(struct SMU73_Discrete_PmFuses, PsmCharzFreq); - inversion_voltage_addr = table_start + - offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage); - - result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr, - (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000); - PP_ASSERT_WITH_CODE(0 == result, - "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not " - "be populated.", return -1;); - - result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr, - (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000); - PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] " - "charz_freq could not be populated.", return -1;); - - return result; -} - static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) { int32_t vr_config; @@ -298,67 +250,18 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) return 0; } -/* Work in Progress */ -static int fiji_restore_vft_table(struct pp_smumgr *smumgr) -{ - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - - if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) { - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED; - return 0; - } else - return -EINVAL; -} - -/* Work in Progress */ -static int fiji_save_vft_table(struct pp_smumgr *smumgr) -{ - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - - if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) { - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED; - return 0; - } else - return -EINVAL; -} - static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) { struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); switch (priv->avfs.AvfsBtcStatus) { - case AVFS_BTC_COMPLETED_SAVED: /*S3 State - Pre SMU Start */ - priv->avfs.AvfsBtcStatus = AVFS_BTC_RESTOREVFT_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_restore_vft_table(smumgr), - "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics " - "Level table over to SMU", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED; - break; - case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/ - priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr, - 0x666), - "[AVFS][fiji_avfs_event_mgr] SMU did not respond " - "correctly to VftTableIsValid Msg", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; - PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr, - PPSMC_MSG_EnableAvfs), - "[AVFS][fiji_avfs_event_mgr] SMU did not respond " - "correctly to EnableAvfs Message Msg", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED; + case AVFS_BTC_COMPLETED_PREVIOUSLY: break; + case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/ if (!smu_started) break; priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_setup_pm_fuse_for_avfs(smumgr), - "[AVFS][fiji_avfs_event_mgr] Failure at " - "fiji_setup_pm_fuse_for_avfs", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_DPMTABLESETUP_FAILED; PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr), "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU", @@ -373,18 +276,15 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled", return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_SAVEVFT_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_save_vft_table(smumgr), - "[AVFS][fiji_avfs_event_mgr] Could not save VFT Table", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED; + + priv->avfs.AvfsBtcStatus = AVFS_BTC_ENABLEAVFS; break; case AVFS_BTC_DISABLED: /* Do nothing */ - break; case AVFS_BTC_NOTSUPPORTED: /* Do nothing */ + case AVFS_BTC_ENABLEAVFS: break; default: - pr_err("[AVFS] Something is broken. See log!"); + pr_err("AVFS failed status is %x !\n", priv->avfs.AvfsBtcStatus); break; } return 0; @@ -514,6 +414,7 @@ const struct pp_smumgr_func fiji_smu_funcs = { .init_smc_table = fiji_init_smc_table, .update_sclk_threshold = fiji_update_sclk_threshold, .thermal_setup_fan_table = fiji_thermal_setup_fan_table, + .thermal_avfs_enable = fiji_thermal_avfs_enable, .populate_all_graphic_levels = fiji_populate_all_graphic_levels, .populate_all_memory_levels = fiji_populate_all_memory_levels, .get_mac_definition = fiji_get_mac_definition, From ce09d8ecb1b5db528b43e4799a267ea446f93799 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 5 Jul 2017 18:12:46 +0800 Subject: [PATCH 0298/1795] drm/amd/powerplay: move VI common AVFS code to smu7_smumgr.c Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 3 + .../gpu/drm/amd/powerplay/smumgr/fiji_smc.c | 6 +- .../drm/amd/powerplay/smumgr/fiji_smumgr.c | 75 ++++++++----------- .../drm/amd/powerplay/smumgr/fiji_smumgr.h | 11 --- .../drm/amd/powerplay/smumgr/polaris10_smc.c | 4 +- .../amd/powerplay/smumgr/polaris10_smumgr.c | 29 +++---- .../amd/powerplay/smumgr/polaris10_smumgr.h | 12 +-- .../drm/amd/powerplay/smumgr/smu7_smumgr.c | 6 +- .../drm/amd/powerplay/smumgr/smu7_smumgr.h | 8 +- drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 8 ++ 10 files changed, 75 insertions(+), 87 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 976e942ec694..5d61cc9d4554 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -131,6 +131,7 @@ struct pp_smumgr_func { bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request); + bool (*is_hw_avfs_present)(struct pp_smumgr *smumgr); }; struct pp_smumgr { @@ -202,6 +203,8 @@ extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr); extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request); +extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr); + #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index ca24e155ef2d..8712f093d6d9 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -2134,16 +2134,16 @@ int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { int ret; struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); - if (smu_data->avfs.AvfsBtcStatus != AVFS_BTC_ENABLEAVFS) + if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) return 0; ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs); if (!ret) /* If this param is not changed, this function could fire unnecessarily */ - smu_data->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_PREVIOUSLY; + smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY; return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 719e8853b0dc..6ae948fc524f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -161,44 +161,47 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) { - int i, result = -1; + int i; + int result = -EINVAL; uint32_t reg, data; - const PWR_Command_Table *virus = PwrVirusTable; - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); - priv->avfs.AvfsBtcStatus = AVFS_LOAD_VIRUS; - for (i = 0; (i < PWR_VIRUS_TABLE_SIZE); i++) { - switch (virus->command) { + const PWR_Command_Table *pvirus = PwrVirusTable; + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + + for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { + switch (pvirus->command) { case PwrCmdWrite: - reg = virus->reg; - data = virus->data; + reg = pvirus->reg; + data = pvirus->data; cgs_write_register(smumgr->device, reg, data); break; + case PwrCmdEnd: - priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_LOADED; result = 0; break; + default: - pr_err("Table Exit with Invalid Command!"); - priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL; - result = -1; + pr_info("Table Exit with Invalid Command!"); + smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; + result = -EINVAL; break; } - virus++; + pvirus++; } + return result; } static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) { int result = 0; - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); - if (priv->avfs.AvfsBtcParam) { - if (!smum_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) { - pr_err("PerformBTC SMU msg failed \n"); - result = -1; + if (0 != smu_data->avfs.avfs_btc_param) { + if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { + pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed"); + result = -EINVAL; } } /* Soft-Reset to reset the engine before loading uCode */ @@ -252,39 +255,39 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) { - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); - switch (priv->avfs.AvfsBtcStatus) { + switch (smu_data->avfs.avfs_btc_status) { case AVFS_BTC_COMPLETED_PREVIOUSLY: break; case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/ if (!smu_started) break; - priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED; + smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr), "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL; + return -EINVAL;); + smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr), "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS ", - return -1;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED; + return -EINVAL;); + smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr), "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled", - return -1;); + return -EINVAL;); - priv->avfs.AvfsBtcStatus = AVFS_BTC_ENABLEAVFS; + smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS; break; case AVFS_BTC_DISABLED: /* Do nothing */ case AVFS_BTC_NOTSUPPORTED: /* Do nothing */ case AVFS_BTC_ENABLEAVFS: break; default: - pr_err("AVFS failed status is %x !\n", priv->avfs.AvfsBtcStatus); + pr_err("AVFS failed status is %x !\n", smu_data->avfs.avfs_btc_status); break; } return 0; @@ -377,19 +380,6 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) if (smu7_init(smumgr)) return -EINVAL; - fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT; - if (fiji_is_hw_avfs_present(smumgr)) - /* AVFS Parameter - * 0 - BTC DC disabled, BTC AC disabled - * 1 - BTC DC enabled, BTC AC disabled - * 2 - BTC DC disabled, BTC AC enabled - * 3 - BTC DC enabled, BTC AC enabled - * Default is 0 - BTC DC disabled, BTC AC disabled - */ - fiji_priv->avfs.AvfsBtcParam = 0; - else - fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED; - for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) fiji_priv->activity_target[i] = 30; @@ -421,4 +411,5 @@ const struct pp_smumgr_func fiji_smu_funcs = { .initialize_mc_reg_table = fiji_initialize_mc_reg_table, .is_dpm_running = fiji_is_dpm_running, .populate_requested_graphic_levels = fiji_populate_requested_graphic_levels, + .is_hw_avfs_present = fiji_is_hw_avfs_present, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h index adcbdfb209be..175bf9f8ef9c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h @@ -28,17 +28,8 @@ #include "smu7_smumgr.h" - -struct fiji_smu_avfs { - enum AVFS_BTC_STATUS AvfsBtcStatus; - uint32_t AvfsBtcParam; -}; - - struct fiji_smumgr { struct smu7_smumgr smu7_data; - - struct fiji_smu_avfs avfs; struct SMU73_Discrete_DpmTable smc_state_table; struct SMU73_Discrete_Ulv ulv_setting; struct SMU73_Discrete_PmFuses power_tune_table; @@ -47,7 +38,5 @@ struct fiji_smumgr { }; - - #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index f68e759e8be2..99a00bd39256 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -1498,7 +1498,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) table_info->vdd_dep_on_sclk; - if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) return result; result = atomctrl_get_avfs_information(hwmgr, &avfs_params); @@ -1889,7 +1889,7 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { int ret; struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 7e0347088941..75f43dadc56b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -60,16 +60,14 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = { 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; - static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) { int i; - int result = -1; + int result = -EINVAL; uint32_t reg, data; const PWR_Command_Table *pvirus = pwr_virus_table; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); - + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { switch (pvirus->command) { @@ -86,7 +84,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) default: pr_info("Table Exit with Invalid Command!"); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; - result = -1; + result = -EINVAL; break; } pvirus++; @@ -98,7 +96,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) static int polaris10_perform_btc(struct pp_smumgr *smumgr) { int result = 0; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); if (0 != smu_data->avfs.avfs_btc_param) { if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { @@ -172,10 +170,11 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) return 0; } + static int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); switch (smu_data->avfs.avfs_btc_status) { case AVFS_BTC_COMPLETED_PREVIOUSLY: @@ -185,21 +184,21 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED; PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr), - "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU", - return -1); + "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU", + return -EINVAL); if (smu_data->avfs.avfs_btc_param > 1) { pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting."); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; - PP_ASSERT_WITH_CODE(-1 == polaris10_setup_pwr_virus(smumgr), + PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr), "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ", - return -1); + return -EINVAL); } smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr), "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled", - return -1); + return -EINVAL); smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS; break; @@ -377,11 +376,6 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) if (smu7_init(smumgr)) return -EINVAL; - if (polaris10_is_hw_avfs_present(smumgr)) - smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; - else - smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; - for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++) smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT; @@ -411,4 +405,5 @@ const struct pp_smumgr_func polaris10_smu_funcs = { .get_mac_definition = polaris10_get_mac_definition, .is_dpm_running = polaris10_is_dpm_running, .populate_requested_graphic_levels = polaris10_populate_requested_graphic_levels, + .is_hw_avfs_present = polaris10_is_hw_avfs_present, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h index 49ebf1d5a53c..5e19c24b0561 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h @@ -32,11 +32,6 @@ #define SMC_RAM_END 0x40000 -struct polaris10_avfs { - enum AVFS_BTC_STATUS avfs_btc_status; - uint32_t avfs_btc_param; -}; - struct polaris10_pt_defaults { uint8_t SviLoadLineEn; uint8_t SviLoadLineVddC; @@ -51,8 +46,6 @@ struct polaris10_pt_defaults { uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS]; }; - - struct polaris10_range_table { uint32_t trans_lower_frequency; /* in 10khz */ uint32_t trans_upper_frequency; @@ -61,14 +54,13 @@ struct polaris10_range_table { struct polaris10_smumgr { struct smu7_smumgr smu7_data; uint8_t protected_mode; - struct polaris10_avfs avfs; SMU74_Discrete_DpmTable smc_state_table; struct SMU74_Discrete_Ulv ulv_setting; struct SMU74_Discrete_PmFuses power_tune_table; struct polaris10_range_table range_table[NUM_SCLK_RANGE]; const struct polaris10_pt_defaults *power_tune_defaults; - uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS]; - uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK]; + uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS]; + uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK]; }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 35ac27681415..76347ff6d655 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -540,7 +540,6 @@ int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr) return result; } - int smu7_init(struct pp_smumgr *smumgr) { struct smu7_smumgr *smu_data; @@ -596,6 +595,11 @@ int smu7_init(struct pp_smumgr *smumgr) (cgs_handle_t)smu_data->smu_buffer.handle); return -EINVAL); + if (smum_is_hw_avfs_present(smumgr)) + smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; + else + smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index 919be435b49c..ee5e32d2921e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h @@ -37,6 +37,11 @@ struct smu7_buffer_entry { unsigned long handle; }; +struct smu7_avfs { + enum AVFS_BTC_STATUS avfs_btc_status; + uint32_t avfs_btc_param; +}; + struct smu7_smumgr { uint8_t *header; uint8_t *mec_image; @@ -50,7 +55,8 @@ struct smu7_smumgr { uint32_t arb_table_start; uint32_t ulv_setting_starts; uint8_t security_hard_key; - uint32_t acpi_optimization; + uint32_t acpi_optimization; + struct smu7_avfs avfs; }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 0ac19cf3f987..3bdf6478de7f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -404,3 +404,11 @@ int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, return 0; } + +bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr) +{ + if (smumgr->smumgr_funcs->is_hw_avfs_present) + return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr); + + return false; +} From 74c31c6e61f3f903799986a07ab5b7c7437d456a Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 3 Jul 2017 17:50:45 +0800 Subject: [PATCH 0299/1795] drm/amd/powerplay: add avfs check for old asics on Vi. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 1f01020ce3a9..f01cda93f178 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4630,6 +4630,15 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) { + struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + + if (smu_data == NULL) + return -EINVAL; + + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + return 0; + if (enable) { if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) From a5d20c405a37db7e59089422e795ad58feb37638 Mon Sep 17 00:00:00 2001 From: Alex Xie Date: Wed, 5 Jul 2017 18:02:04 -0400 Subject: [PATCH 0300/1795] drm/amdgpu: Free resources of bo_list when idr_alloc fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Alex Xie Reviewed-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index f621ee115c98..a71b875d0503 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -83,7 +83,7 @@ static int amdgpu_bo_list_create(struct amdgpu_device *adev, r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); mutex_unlock(&fpriv->bo_list_lock); if (r < 0) { - kfree(list); + amdgpu_bo_list_free(list); return r; } *id = r; From 6c8855541492b3d812d55c9fe5a2ed17ddf022b8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 5 Jul 2017 15:17:00 -0400 Subject: [PATCH 0301/1795] drm/amdgpu/atom: fix atom_fw check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not all vbios images seem to set the version appropriately. Switch the check based on asic type instead. Reviewed-by: Hawking Zhang Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 365e735f6647..ea3a2501c7cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -86,19 +86,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size) return false; } -static bool is_atom_fw(uint8_t *bios) -{ - uint16_t bios_header_start = bios[0x48] | (bios[0x49] << 8); - uint8_t frev = bios[bios_header_start + 2]; - uint8_t crev = bios[bios_header_start + 3]; - - if ((frev < 3) || - ((frev == 3) && (crev < 3))) - return false; - - return true; -} - /* If you boot an IGP board with a discrete card as the primary, * the IGP rom is not accessible via the rom bar as the IGP rom is * part of the system bios. On boot, the system bios puts a @@ -455,6 +442,6 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) return false; success: - adev->is_atom_fw = is_atom_fw(adev->bios); + adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false; return true; } From 21f6bcb6d449d24b63dd1d5d030d34d88baa6a0f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 5 Jul 2017 15:26:48 -0400 Subject: [PATCH 0302/1795] drm/amdgpu/atomfirmware: implement vram_width for APUs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement support using the new atomfirmware system info table. Reviewed-by: Hawking Zhang Acked-by: Christian König Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 33 +++++++++++++++++++ .../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h | 1 + 2 files changed, 34 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 9ddfe34d12af..a7d65f033883 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -95,3 +95,36 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) ctx->scratch_size_bytes = usage_bytes; return 0; } + +union igp_info { + struct atom_integrated_system_info_v1_11 v11; +}; + +/* + * Return vram width from integrated system info table, if available, + * or 0 if not. + */ +int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + integratedsysteminfo); + u16 data_offset, size; + union igp_info *igp_info; + u8 frev, crev; + + /* get any igp specific overrides */ + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, + &frev, &crev, &data_offset)) { + igp_info = (union igp_info *) + (mode_info->atom_context->bios + data_offset); + switch (crev) { + case 11: + return igp_info->v11.umachannelnumber * 64; + default: + return 0; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index 907e48f6b301..cedafbb9183e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -27,5 +27,6 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); #endif From 8d6a5230e1a00c2f69e6c7e8a8fcef6d81a06dde Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 5 Jul 2017 15:37:35 -0400 Subject: [PATCH 0303/1795] drm/amdgpu/gmc9: get vram width from atom for Raven MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Get it from the system info table. Reviewed-by: Hawking Zhang Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 74 ++++++++++++++------------- 1 file changed, 39 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index dbb43d99e02e..8ec148727149 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -23,6 +23,7 @@ #include #include "amdgpu.h" #include "gmc_v9_0.h" +#include "amdgpu_atomfirmware.h" #include "vega10/soc15ip.h" #include "vega10/HDP/hdp_4_0_offset.h" @@ -442,43 +443,46 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) u32 tmp; int chansize, numchan; - /* hbm memory channel size */ - chansize = 128; + adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); + if (!adev->mc.vram_width) { + /* hbm memory channel size */ + chansize = 128; - tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); - tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; - tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; - switch (tmp) { - case 0: - default: - numchan = 1; - break; - case 1: - numchan = 2; - break; - case 2: - numchan = 0; - break; - case 3: - numchan = 4; - break; - case 4: - numchan = 0; - break; - case 5: - numchan = 8; - break; - case 6: - numchan = 0; - break; - case 7: - numchan = 16; - break; - case 8: - numchan = 2; - break; + tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); + tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; + tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; + switch (tmp) { + case 0: + default: + numchan = 1; + break; + case 1: + numchan = 2; + break; + case 2: + numchan = 0; + break; + case 3: + numchan = 4; + break; + case 4: + numchan = 0; + break; + case 5: + numchan = 8; + break; + case 6: + numchan = 0; + break; + case 7: + numchan = 16; + break; + case 8: + numchan = 2; + break; + } + adev->mc.vram_width = numchan * chansize; } - adev->mc.vram_width = numchan * chansize; /* Could aper size report 0 ? */ adev->mc.aper_base = pci_resource_start(adev->pdev, 0); From 606ce3c098b19cd5458930a133ef1577c64b1072 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 6 Jul 2017 10:15:46 -0400 Subject: [PATCH 0304/1795] drm/amdgpu: make arrays pctl0_data and pctl1_data static The arrays pctl0_data and pctl1_data do not need to be in global scope, so them both static. Cleans up sparse warnings: symbol 'pctl0_data' was not declared. Should it be static? symbol 'pctl1_data' was not declared. Should it be static? Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 01918dc5dc55..9f2cf78907a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -249,7 +249,7 @@ struct pctl_data { uint32_t data; }; -const struct pctl_data pctl0_data[] = { +static const struct pctl_data pctl0_data[] = { {0x0, 0x7a640}, {0x9, 0x2a64a}, {0xd, 0x2a680}, @@ -274,7 +274,7 @@ const struct pctl_data pctl0_data[] = { #define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640 #define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833 -const struct pctl_data pctl1_data[] = { +static const struct pctl_data pctl1_data[] = { {0x0, 0x39a000}, {0x3b, 0x44a040}, {0x81, 0x2a08d}, From cc25188afdb886044785be4c29f3993c3a3b2b92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 28 Jun 2017 12:18:54 +0200 Subject: [PATCH 0305/1795] drm/amdgpu: reserve the first 2x512 pages of GART MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want to use them as remap address space. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 1ef625550442..f46a97d91675 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -43,12 +43,15 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, unsigned long p_size) { struct amdgpu_gtt_mgr *mgr; + uint64_t start, size; mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); if (!mgr) return -ENOMEM; - drm_mm_init(&mgr->mm, 0, p_size); + start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; + size = p_size - start; + drm_mm_init(&mgr->mm, start, size); spin_lock_init(&mgr->lock); mgr->available = p_size; man->priv = mgr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 776a20ae40c4..c8059f067185 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -34,6 +34,9 @@ #define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1) #define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2) +#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 +#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 + struct amdgpu_mman { struct ttm_bo_global_ref bo_global_ref; struct drm_global_reference mem_global_ref; From 0c2c421e2657da6eece66bd22eaaedf21dcebef7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 29 Jun 2017 17:24:26 +0200 Subject: [PATCH 0306/1795] drm/amdgpu: add amdgpu_gart_map function v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows us to write the mapped PTEs into an IB instead of the table directly. v2: fix build with debugfs enabled, remove unused assignment Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 62 ++++++++++++++++++------ 2 files changed, 51 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index bbc7b19d2c1b..56cac98892f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -570,6 +570,9 @@ int amdgpu_gart_init(struct amdgpu_device *adev); void amdgpu_gart_fini(struct amdgpu_device *adev); int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages); +int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, + int pages, dma_addr_t *dma_addr, uint64_t flags, + void *dst); int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, struct page **pagelist, dma_addr_t *dma_addr, uint64_t flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 982b1cc11dac..b4048a91c814 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -282,6 +282,41 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, return 0; } +/** + * amdgpu_gart_map - map dma_addresses into GART entries + * + * @adev: amdgpu_device pointer + * @offset: offset into the GPU's gart aperture + * @pages: number of pages to bind + * @dma_addr: DMA addresses of pages + * + * Map the dma_addresses into GART entries (all asics). + * Returns 0 for success, -EINVAL for failure. + */ +int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, + int pages, dma_addr_t *dma_addr, uint64_t flags, + void *dst) +{ + uint64_t page_base; + unsigned i, j, t; + + if (!adev->gart.ready) { + WARN(1, "trying to bind memory to uninitialized GART !\n"); + return -EINVAL; + } + + t = offset / AMDGPU_GPU_PAGE_SIZE; + + for (i = 0; i < pages; i++) { + page_base = dma_addr[i]; + for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { + amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags); + page_base += AMDGPU_GPU_PAGE_SIZE; + } + } + return 0; +} + /** * amdgpu_gart_bind - bind pages into the gart page table * @@ -299,31 +334,30 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, struct page **pagelist, dma_addr_t *dma_addr, uint64_t flags) { - unsigned t; - unsigned p; - uint64_t page_base; - int i, j; +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS + unsigned i,t,p; +#endif + int r; if (!adev->gart.ready) { WARN(1, "trying to bind memory to uninitialized GART !\n"); return -EINVAL; } +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS t = offset / AMDGPU_GPU_PAGE_SIZE; p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); - - for (i = 0; i < pages; i++, p++) { -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS + for (i = 0; i < pages; i++, p++) adev->gart.pages[p] = pagelist[i]; #endif - if (adev->gart.ptr) { - page_base = dma_addr[i]; - for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { - amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags); - page_base += AMDGPU_GPU_PAGE_SIZE; - } - } + + if (adev->gart.ptr) { + r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags, + adev->gart.ptr); + if (r) + return r; } + mb(); amdgpu_gart_flush_gpu_tlb(adev, 0); return 0; From abca90f1c8103528ca4b194fdc69e933bd23db4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 30 Jun 2017 11:05:54 +0200 Subject: [PATCH 0307/1795] drm/amdgpu: use the GTT windows for BO moves v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This way we don't need to map the full BO at a time any more. v2: use fixed windows for src/dst Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 125 ++++++++++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 2 + 2 files changed, 108 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index ace178b393dd..4a34b61d44ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -47,10 +47,15 @@ #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) +static int amdgpu_map_buffer(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem, unsigned num_pages, + uint64_t offset, unsigned window, + struct amdgpu_ring *ring, + uint64_t *addr); + static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); - /* * Global memory. */ @@ -97,6 +102,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) goto error_bo; } + mutex_init(&adev->mman.gtt_window_lock); + ring = adev->mman.buffer_funcs_ring; rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, @@ -123,6 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) if (adev->mman.mem_global_referenced) { amd_sched_entity_fini(adev->mman.entity.sched, &adev->mman.entity); + mutex_destroy(&adev->mman.gtt_window_lock); drm_global_item_unref(&adev->mman.bo_global_ref.ref); drm_global_item_unref(&adev->mman.mem_global_ref); adev->mman.mem_global_referenced = false; @@ -256,10 +264,13 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, struct drm_mm_node *mm_node, struct ttm_mem_reg *mem) { - uint64_t addr; + uint64_t addr = 0; - addr = mm_node->start << PAGE_SHIFT; - addr += bo->bdev->man[mem->mem_type].gpu_offset; + if (mem->mem_type != TTM_PL_TT || + amdgpu_gtt_mgr_is_allocated(mem)) { + addr = mm_node->start << PAGE_SHIFT; + addr += bo->bdev->man[mem->mem_type].gpu_offset; + } return addr; } @@ -284,34 +295,41 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, return -EINVAL; } - if (old_mem->mem_type == TTM_PL_TT) { - r = amdgpu_ttm_bind(bo, old_mem); - if (r) - return r; - } - old_mm = old_mem->mm_node; old_size = old_mm->size; old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem); - if (new_mem->mem_type == TTM_PL_TT) { - r = amdgpu_ttm_bind(bo, new_mem); - if (r) - return r; - } - new_mm = new_mem->mm_node; new_size = new_mm->size; new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem); num_pages = new_mem->num_pages; + mutex_lock(&adev->mman.gtt_window_lock); while (num_pages) { - unsigned long cur_pages = min(old_size, new_size); + unsigned long cur_pages = min(min(old_size, new_size), + (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE); + uint64_t from = old_start, to = new_start; struct dma_fence *next; - r = amdgpu_copy_buffer(ring, old_start, new_start, + if (old_mem->mem_type == TTM_PL_TT && + !amdgpu_gtt_mgr_is_allocated(old_mem)) { + r = amdgpu_map_buffer(bo, old_mem, cur_pages, + old_start, 0, ring, &from); + if (r) + goto error; + } + + if (new_mem->mem_type == TTM_PL_TT && + !amdgpu_gtt_mgr_is_allocated(new_mem)) { + r = amdgpu_map_buffer(bo, new_mem, cur_pages, + new_start, 1, ring, &to); + if (r) + goto error; + } + + r = amdgpu_copy_buffer(ring, from, to, cur_pages * PAGE_SIZE, - bo->resv, &next, false, false); + bo->resv, &next, false, true); if (r) goto error; @@ -338,12 +356,15 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, new_start += cur_pages * PAGE_SIZE; } } + mutex_unlock(&adev->mman.gtt_window_lock); r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); dma_fence_put(fence); return r; error: + mutex_unlock(&adev->mman.gtt_window_lock); + if (fence) dma_fence_wait(fence, false); dma_fence_put(fence); @@ -1253,6 +1274,72 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) return ttm_bo_mmap(filp, vma, &adev->mman.bdev); } +static int amdgpu_map_buffer(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem, unsigned num_pages, + uint64_t offset, unsigned window, + struct amdgpu_ring *ring, + uint64_t *addr) +{ + struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; + struct amdgpu_device *adev = ring->adev; + struct ttm_tt *ttm = bo->ttm; + struct amdgpu_job *job; + unsigned num_dw, num_bytes; + dma_addr_t *dma_address; + struct dma_fence *fence; + uint64_t src_addr, dst_addr; + uint64_t flags; + int r; + + BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < + AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); + + *addr = adev->mc.gtt_start; + *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * + AMDGPU_GPU_PAGE_SIZE; + + num_dw = adev->mman.buffer_funcs->copy_num_dw; + while (num_dw & 0x7) + num_dw++; + + num_bytes = num_pages * 8; + + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); + if (r) + return r; + + src_addr = num_dw * 4; + src_addr += job->ibs[0].gpu_addr; + + dst_addr = adev->gart.table_addr; + dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, + dst_addr, num_bytes); + + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); + + dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT]; + flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); + r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, + &job->ibs[0].ptr[num_dw]); + if (r) + goto error_free; + + r = amdgpu_job_submit(job, ring, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, &fence); + if (r) + goto error_free; + + dma_fence_put(fence); + + return r; + +error_free: + amdgpu_job_free(job); + return r; +} + int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index c8059f067185..4f5c1da5922e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -52,6 +52,8 @@ struct amdgpu_mman { /* buffer handling */ const struct amdgpu_buffer_funcs *buffer_funcs; struct amdgpu_ring *buffer_funcs_ring; + + struct mutex gtt_window_lock; /* Scheduler entity for buffer moves */ struct amd_sched_entity entity; }; From 5e7e83963fcf7688c2a171bedd9e76e2aa4eb85a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 30 Jun 2017 12:19:42 +0200 Subject: [PATCH 0308/1795] drm/amdgpu: stop mapping BOs to GTT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to map BOs to GTT on eviction and intermediate transfers any more. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 4a34b61d44ef..fb9c6988f5f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -199,7 +199,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, .lpfn = 0, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM }; - unsigned i; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { placement->placement = &placements; @@ -217,20 +216,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); } else { amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); - for (i = 0; i < abo->placement.num_placement; ++i) { - if (!(abo->placements[i].flags & - TTM_PL_FLAG_TT)) - continue; - - if (abo->placements[i].lpfn) - continue; - - /* set an upper limit to force directly - * allocating address space for the BO. - */ - abo->placements[i].lpfn = - adev->mc.gtt_size >> PAGE_SHIFT; - } } break; case TTM_PL_TT: @@ -391,7 +376,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, placement.num_busy_placement = 1; placement.busy_placement = &placements; placements.fpfn = 0; - placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT; + placements.lpfn = 0; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); @@ -438,7 +423,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, placement.num_busy_placement = 1; placement.busy_placement = &placements; placements.fpfn = 0; - placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT; + placements.lpfn = 0; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); From 935eefb312566364ef395eb29574e093686dbec8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 30 Jun 2017 12:20:45 +0200 Subject: [PATCH 0309/1795] drm/amdgpu: remove maximum BO size limitation v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can finally remove this now. v2: remove now unused max_size variable as well. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 96c4493ccf8f..917ac5e074a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -49,7 +49,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, struct drm_gem_object **obj) { struct amdgpu_bo *robj; - unsigned long max_size; int r; *obj = NULL; @@ -58,17 +57,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, alignment = PAGE_SIZE; } - if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { - /* Maximum bo size is the unpinned gtt size since we use the gtt to - * handle vram to system pool migrations. - */ - max_size = adev->mc.gtt_size - adev->gart_pin_size; - if (size > max_size) { - DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", - size >> 20, max_size >> 20); - return -ENOMEM; - } - } retry: r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, NULL, &robj); From 09628c3f68c6ec63c8eba324eb7fd70d46bf3eb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 30 Jun 2017 14:37:02 +0200 Subject: [PATCH 0310/1795] drm/amdgpu: use TTM values instead of MC values for the info queries MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the TTM values instead of the hardware config here. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index b0b23101d1c8..63017de99f36 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -485,7 +485,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file vram_gtt.vram_size -= adev->vram_pin_size; vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size); - vram_gtt.gtt_size = adev->mc.gtt_size; + vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; + vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size -= adev->gart_pin_size; return copy_to_user(out, &vram_gtt, min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; @@ -510,9 +511,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file mem.cpu_accessible_vram.max_allocation = mem.cpu_accessible_vram.usable_heap_size * 3 / 4; - mem.gtt.total_heap_size = adev->mc.gtt_size; - mem.gtt.usable_heap_size = - adev->mc.gtt_size - adev->gart_pin_size; + mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; + mem.gtt.total_heap_size *= PAGE_SIZE; + mem.gtt.usable_heap_size = mem.gtt.total_heap_size + - adev->gart_pin_size; mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage); mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; From 560460f282543d484158c7760464495392f8fa4a Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 3 Jul 2017 22:37:44 +0800 Subject: [PATCH 0311/1795] drm/amd/powerplay: added index gc cac read/write apis for vega10 Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4003cb517451..ca9fa3fe788d 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -196,6 +196,28 @@ static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->didt_idx_lock, flags); } +static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); + r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); + return r; +} + +static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); + WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); +} + static u32 soc15_get_config_memsize(struct amdgpu_device *adev) { if (adev->flags & AMD_IS_APU) @@ -555,6 +577,8 @@ static int soc15_common_early_init(void *handle) adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; adev->didt_rreg = &soc15_didt_rreg; adev->didt_wreg = &soc15_didt_wreg; + adev->gc_cac_rreg = &soc15_gc_cac_rreg; + adev->gc_cac_wreg = &soc15_gc_cac_wreg; adev->asic_funcs = &soc15_asic_funcs; From 16abb5d206499d8cb84103c758d45afbfecf76c0 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 4 Jul 2017 09:21:50 +0800 Subject: [PATCH 0312/1795] drm/amd/powerplay: added new se_cac_idx r/w APIs v2 - v2: added missing spinlock init Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + 2 files changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 56cac98892f1..1bb1912f863c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1560,6 +1560,10 @@ struct amdgpu_device { spinlock_t gc_cac_idx_lock; amdgpu_rreg_t gc_cac_rreg; amdgpu_wreg_t gc_cac_wreg; + /* protects concurrent se_cac register access */ + spinlock_t se_cac_idx_lock; + amdgpu_rreg_t se_cac_rreg; + amdgpu_wreg_t se_cac_wreg; /* protects concurrent ENDPOINT (audio) register access */ spinlock_t audio_endpt_idx_lock; amdgpu_block_rreg_t audio_endpt_rreg; @@ -1747,6 +1751,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) +#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg)) +#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v)) #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) #define WREG32_P(reg, val, mask) \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8ce522b490c8..541695768f0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2075,6 +2075,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->uvd_ctx_idx_lock); spin_lock_init(&adev->didt_idx_lock); spin_lock_init(&adev->gc_cac_idx_lock); + spin_lock_init(&adev->se_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); spin_lock_init(&adev->mm_stats.lock); From 2f11fb02873890aca30deb9ca4b65b841c487bd9 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 4 Jul 2017 09:23:01 +0800 Subject: [PATCH 0313/1795] drm/amd/powerplay: added soc15 support for new se_cac_idx APIs Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index ca9fa3fe788d..0d9a3dd302a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -218,6 +218,28 @@ static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); } +static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->se_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); + r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); + spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); + return r; +} + +static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->se_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); + WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); +} + static u32 soc15_get_config_memsize(struct amdgpu_device *adev) { if (adev->flags & AMD_IS_APU) @@ -579,6 +601,8 @@ static int soc15_common_early_init(void *handle) adev->didt_wreg = &soc15_didt_wreg; adev->gc_cac_rreg = &soc15_gc_cac_rreg; adev->gc_cac_wreg = &soc15_gc_cac_wreg; + adev->se_cac_rreg = &soc15_se_cac_rreg; + adev->se_cac_wreg = &soc15_se_cac_wreg; adev->asic_funcs = &soc15_asic_funcs; From c62a59d0c8117f709f1e7a9fc8ccc1aae1db3f2c Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 4 Jul 2017 09:24:34 +0800 Subject: [PATCH 0314/1795] drm/amd/powerplay: added support for new se_cac_idx APIs to cgs Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 4 ++++ drivers/gpu/drm/amd/include/cgs_common.h | 1 + 2 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index e525de2ecb2d..9b589402b58d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -240,6 +240,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, return RREG32_DIDT(index); case CGS_IND_REG_GC_CAC: return RREG32_GC_CAC(index); + case CGS_IND_REG_SE_CAC: + return RREG32_SE_CAC(index); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return 0; @@ -266,6 +268,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, return WREG32_DIDT(index, value); case CGS_IND_REG_GC_CAC: return WREG32_GC_CAC(index, value); + case CGS_IND_REG_SE_CAC: + return WREG32_SE_CAC(index, value); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return; diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 0a94f749e3c0..b46d12df8df0 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -50,6 +50,7 @@ enum cgs_ind_reg { CGS_IND_REG__UVD_CTX, CGS_IND_REG__DIDT, CGS_IND_REG_GC_CAC, + CGS_IND_REG_SE_CAC, CGS_IND_REG__AUDIO_ENDPT }; From 209ee27e9ba35c53e18284c7bb5ac2ba6a2fcd22 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 4 Jul 2017 15:37:09 +0800 Subject: [PATCH 0315/1795] drm/amd/powerplay: added grbm_idx_mutex lock/unlock to cgs v2 - v2: rename param 'en' as 'lock' Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 12 ++++++++++++ drivers/gpu/drm/amd/include/cgs_common.h | 5 +++++ 2 files changed, 17 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 9b589402b58d..a99e0bca6812 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -614,6 +614,17 @@ static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device, return 0; } +static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device, + bool lock) +{ + CGS_FUNC_ADEV; + + if (lock) + mutex_lock(&adev->grbm_idx_mutex); + else + mutex_unlock(&adev->grbm_idx_mutex); +} + static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info) @@ -1127,6 +1138,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { .query_system_info = amdgpu_cgs_query_system_info, .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled, .enter_safe_mode = amdgpu_cgs_enter_safe_mode, + .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx, }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index b46d12df8df0..0214f63f52fc 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -407,6 +407,8 @@ typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device); typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en); +typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock); + struct cgs_ops { /* memory management calls (similar to KFD interface) */ cgs_alloc_gpu_mem_t alloc_gpu_mem; @@ -442,6 +444,7 @@ struct cgs_ops { cgs_query_system_info query_system_info; cgs_is_virtualization_enabled_t is_virtualization_enabled; cgs_enter_safe_mode enter_safe_mode; + cgs_lock_grbm_idx lock_grbm_idx; }; struct cgs_os_ops; /* To be define in OS-specific CGS header */ @@ -518,4 +521,6 @@ struct cgs_device #define cgs_enter_safe_mode(cgs_device, en) \ CGS_CALL(enter_safe_mode, cgs_device, en) +#define cgs_lock_grbm_idx(cgs_device, lock) \ + CGS_CALL(lock_grbm_idx, cgs_device, lock) #endif /* _CGS_COMMON_H */ From 9b7b8154cdb8225cbef2f470285fd97ddbd34a44 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 5 Jul 2017 15:33:00 +0800 Subject: [PATCH 0316/1795] drm/amd/powerplay: added didt support for vega10 Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 56 + .../drm/amd/powerplay/hwmgr/vega10_hwmgr.h | 5 + .../amd/powerplay/hwmgr/vega10_powertune.c | 1291 +++++++++++++++++ .../amd/powerplay/hwmgr/vega10_powertune.h | 16 + .../drm/amd/powerplay/inc/hardwaremanager.h | 5 + drivers/gpu/drm/amd/powerplay/inc/pp_debug.h | 6 + drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h | 2 + 7 files changed, 1381 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index b14ea319bd95..8d567620576a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -146,6 +146,19 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.vr1hot_enabled = 1; data->registry_data.regulator_hot_gpio_support = 1; + data->registry_data.didt_support = 1; + if (data->registry_data.didt_support) { + data->registry_data.didt_mode = 6; + data->registry_data.sq_ramping_support = 1; + data->registry_data.db_ramping_support = 0; + data->registry_data.td_ramping_support = 0; + data->registry_data.tcp_ramping_support = 0; + data->registry_data.dbr_ramping_support = 0; + data->registry_data.edc_didt_support = 1; + data->registry_data.gc_didt_support = 0; + data->registry_data.psm_didt_support = 0; + } + data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT; data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; @@ -222,6 +235,8 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) /* assume disabled */ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DiDtSupport); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, @@ -230,6 +245,34 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_TDRamping); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DiDtEDCEnable); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_GCEDC); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PSM); + + if (data->registry_data.didt_support) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport); + if (data->registry_data.sq_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); + if (data->registry_data.db_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping); + if (data->registry_data.td_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping); + if (data->registry_data.tcp_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); + if (data->registry_data.dbr_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping); + if (data->registry_data.edc_didt_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable); + if (data->registry_data.gc_didt_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC); + if (data->registry_data.psm_didt_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM); + } if (data->registry_data.power_containment_support) phm_cap_set(hwmgr->platform_descriptor.platformCaps, @@ -322,6 +365,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = FEATURE_FAN_CONTROL_BIT; data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT; + data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT; if (!data->registry_data.prefetcher_dpm_key_disabled) data->smu_features[GNLD_DPM_PREFETCHER].supported = true; @@ -391,6 +435,9 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) if ((data->smu_version & 0xff000000) == 0x5000000) data->smu_features[GNLD_ACG].supported = true; + if (data->registry_data.didt_support) + data->smu_features[GNLD_DIDT].supported = true; + } #ifdef PPLIB_VEGA10_EVV_SUPPORT @@ -2907,6 +2954,11 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!tmp_result, "Failed to start DPM!", result = tmp_result); + /* enable didt, do not abort if failed didt */ + tmp_result = vega10_enable_didt_config(hwmgr); + PP_ASSERT(!tmp_result, + "Failed to enable didt config!"); + tmp_result = vega10_enable_power_containment(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, "Failed to enable power containment!", @@ -4736,6 +4788,10 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to disable power containment!", result = tmp_result); + tmp_result = vega10_disable_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable didt config!", result = tmp_result); + tmp_result = vega10_avfs_enable(hwmgr, false); PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to disable AVFS!", result = tmp_result); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h index 415080174b28..5c97a8b6c46a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -232,7 +232,9 @@ struct vega10_registry_data { uint8_t cac_support; uint8_t clock_stretcher_support; uint8_t db_ramping_support; + uint8_t didt_mode; uint8_t didt_support; + uint8_t edc_didt_support; uint8_t dynamic_state_patching_support; uint8_t enable_pkg_pwr_tracking_feature; uint8_t enable_tdc_limit_feature; @@ -265,6 +267,9 @@ struct vega10_registry_data { uint8_t tcp_ramping_support; uint8_t tdc_support; uint8_t td_ramping_support; + uint8_t dbr_ramping_support; + uint8_t gc_didt_support; + uint8_t psm_didt_support; uint8_t thermal_out_gpio_support; uint8_t thermal_support; uint8_t fw_ctf_enabled; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index 3f72268e99bb..fbafc849ea71 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -26,7 +26,1298 @@ #include "vega10_powertune.h" #include "vega10_smumgr.h" #include "vega10_ppsmc.h" +#include "vega10_inc.h" #include "pp_debug.h" +#include "pp_soc15.h" + +static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853 }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153 }, + + /* DIDT_TD */ + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde }, + + /* DIDT_TCP */ + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde }, + + /* DIDT_DB */ + { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde }, + { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEDiDtCtrl3Config_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /*DIDT_SQ_CTRL3 */ + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK, DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK, DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 }, + + /*DIDT_TCP_CTRL3 */ + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK, DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 }, + + /*DIDT_TD_CTRL3 */ + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__THROTTLE_POLICY_MASK, DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 }, + + /*DIDT_DB_CTRL3 */ + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__THROTTLE_POLICY_MASK, DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK, DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEDiDtCtrl2Config_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853 }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000 }, + + /* DIDT_TD */ + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 }, + + /* DIDT_TCP */ + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 }, + + /* DIDT_DB */ + { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK, DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde }, + { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 }, + { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEDiDtCtrl1Config_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff }, + /* DIDT_TD */ + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff }, + /* DIDT_TCP */ + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff }, + /* DIDT_DB */ + { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MIN_POWER_MASK, DIDT_DB_CTRL1__MIN_POWER__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MAX_POWER_MASK, DIDT_DB_CTRL1__MAX_POWER__SHIFT, 0xffff }, + + { 0xFFFFFFFF } /* End of list */ +}; + + +static const struct vega10_didt_config_reg SEDiDtWeightConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B363B1A }, + { ixDIDT_SQ_WEIGHT4_7, 0xFFFFFFFF, 0, 0x270B2432 }, + { ixDIDT_SQ_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000018 }, + + /* DIDT_TD */ + { ixDIDT_TD_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B1D220F }, + { ixDIDT_TD_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00007558 }, + { ixDIDT_TD_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 }, + + /* DIDT_TCP */ + { ixDIDT_TCP_WEIGHT0_3, 0xFFFFFFFF, 0, 0x5ACE160D }, + { ixDIDT_TCP_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 }, + + /* DIDT_DB */ + { ixDIDT_DB_WEIGHT0_3, 0xFFFFFFFF, 0, 0x0E152A0F }, + { ixDIDT_DB_WEIGHT4_7, 0xFFFFFFFF, 0, 0x09061813 }, + { ixDIDT_DB_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000013 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEDiDtCtrl0Config_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 }, + /* DIDT_TD */ + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 }, + /* DIDT_TCP */ + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 }, + /* DIDT_DB */ + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__PHASE_OFFSET_MASK, DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK, DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 }, + { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + + +static const struct vega10_didt_config_reg SEDiDtStallCtrlConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a }, + + /* DIDT_TD */ + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a }, + + /* DIDT_TCP */ + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a }, + + /* DIDT_DB */ + { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 }, + { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 }, + { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a }, + { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEDiDtStallPatternConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ_STALL_PATTERN_1_2 */ + { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 }, + { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 }, + + /* DIDT_SQ_STALL_PATTERN_3_4 */ + { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 }, + { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 }, + + /* DIDT_SQ_STALL_PATTERN_5_6 */ + { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 }, + { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 }, + + /* DIDT_SQ_STALL_PATTERN_7 */ + { ixDIDT_SQ_STALL_PATTERN_7, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 }, + + /* DIDT_TCP_STALL_PATTERN_1_2 */ + { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 }, + { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 }, + + /* DIDT_TCP_STALL_PATTERN_3_4 */ + { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 }, + { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 }, + + /* DIDT_TCP_STALL_PATTERN_5_6 */ + { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 }, + { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 }, + + /* DIDT_TCP_STALL_PATTERN_7 */ + { ixDIDT_TCP_STALL_PATTERN_7, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 }, + + /* DIDT_TD_STALL_PATTERN_1_2 */ + { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 }, + { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 }, + + /* DIDT_TD_STALL_PATTERN_3_4 */ + { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 }, + { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 }, + + /* DIDT_TD_STALL_PATTERN_5_6 */ + { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 }, + { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 }, + + /* DIDT_TD_STALL_PATTERN_7 */ + { ixDIDT_TD_STALL_PATTERN_7, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 }, + + /* DIDT_DB_STALL_PATTERN_1_2 */ + { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 }, + { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 }, + + /* DIDT_DB_STALL_PATTERN_3_4 */ + { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 }, + { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 }, + + /* DIDT_DB_STALL_PATTERN_5_6 */ + { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 }, + { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 }, + + /* DIDT_DB_STALL_PATTERN_7 */ + { ixDIDT_DB_STALL_PATTERN_7, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SELCacConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860021 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060021 }, + /* TD */ + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x028E0020 }, + /* TCP */ + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x001c0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x009c0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x011c0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x019c0020 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x021c0020 }, + /* DB */ + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00200008 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00820008 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01020008 }, + { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01820008 }, + + { 0xFFFFFFFF } /* End of list */ +}; + + +static const struct vega10_didt_config_reg SEEDCStallPatternConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00030001 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x000F0007 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x003F001F }, + { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x0000007F }, + /* TD */ + { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 }, + /* TCP */ + { ixDIDT_TCP_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 }, + /* DB */ + { ixDIDT_DB_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_DB_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_DB_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_DB_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCForceStallPatternConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 }, + /* TD */ + { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 }, + { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCStallDelayConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 }, + /* TD */ + { ixDIDT_TD_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TD_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 }, + /* TCP */ + { ixDIDT_TCP_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 }, + { ixDIDT_TCP_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 }, + /* DB */ + { ixDIDT_DB_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCThresholdConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixDIDT_SQ_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0x0000010E }, + { ixDIDT_TD_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF }, + { ixDIDT_TCP_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF }, + { ixDIDT_DB_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCCtrlResetConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCCtrlConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0004 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0006 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ */ + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000C }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 }, + + /* TD */ + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg GCDiDtDroopCtrlConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT, 0x0000 }, + { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT, 0x0000 }, + { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT, 0x0000 }, + { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT, 0x0000 }, + { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg GCDiDtCtrl0Config_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK, GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 }, + { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__PHASE_OFFSET_MASK, GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 }, + { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_SW_RST_MASK, GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT, 0x0000 }, + { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { 0xFFFFFFFF } /* End of list */ +}; + + +static const struct vega10_didt_config_reg PSMSEEDCStallPatternConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ EDC STALL PATTERNs */ + { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT, 0x0101 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT, 0x0101 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT, 0x1111 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT, 0x1111 }, + + { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT, 0x1515 }, + { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT, 0x1515 }, + + { ixDIDT_SQ_EDC_STALL_PATTERN_7, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT, 0x5555 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ EDC STALL DELAYs */ + { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT, 0x0000 }, + + { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7__SHIFT, 0x0000 }, + + { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ11_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ11__SHIFT, 0x0000 }, + + { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ13__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ14_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ14__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ15_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ15__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMSEEDCThresholdConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ EDC THRESHOLD */ + { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ EDC CTRL */ + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* SQ EDC CTRL */ + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0003 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 }, + { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMGCEDCThresholdConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT, 0x0001 }, + { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT, 0x0384 }, + { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT, 0x0001 }, + { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK, GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT, 0x0001 }, + { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT, 0x0001 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMGCEDCCtrlResetConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg PSMGCEDCCtrlConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 }, + { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg AvfsPSMResetConfig_vega10[]= +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { 0x16A02, 0xFFFFFFFF, 0x0, 0x0000005F }, + { 0x16A05, 0xFFFFFFFF, 0x0, 0x00000001 }, + { 0x16A06, 0x00000001, 0x0, 0x02000000 }, + { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static const struct vega10_didt_config_reg AvfsPSMInitConfig_vega10[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { 0x16A05, 0xFFFFFFFF, 0x18, 0x00000001 }, + { 0x16A05, 0xFFFFFFFF, 0x8, 0x00000003 }, + { 0x16A05, 0xFFFFFFFF, 0xa, 0x00000006 }, + { 0x16A05, 0xFFFFFFFF, 0x7, 0x00000000 }, + { 0x16A06, 0xFFFFFFFF, 0x18, 0x00000001 }, + { 0x16A06, 0xFFFFFFFF, 0x19, 0x00000001 }, + { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 }, + + { 0xFFFFFFFF } /* End of list */ +}; + +static int vega10_program_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega10_didt_config_reg *config_regs, enum vega10_didt_config_reg_type reg_type) +{ + uint32_t data; + + PP_ASSERT_WITH_CODE((config_regs != NULL), "[vega10_program_didt_config_registers] Invalid config register table!", return -EINVAL); + + while (config_regs->offset != 0xFFFFFFFF) { + switch (reg_type) { + case VEGA10_CONFIGREG_DIDT: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset); + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data); + break; + case VEGA10_CONFIGREG_GCCAC: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset); + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data); + break; + case VEGA10_CONFIGREG_SECAC: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset); + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset, data); + break; + default: + return -EINVAL; + } + + config_regs++; + } + + return 0; +} + +static int vega10_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega10_didt_config_reg *config_regs) +{ + uint32_t data; + + while (config_regs->offset != 0xFFFFFFFF) { + data = cgs_read_register(hwmgr->device, config_regs->offset); + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + cgs_write_register(hwmgr->device, config_regs->offset, data); + config_regs++; + } + + return 0; +} + +static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) +{ + uint32_t data; + int result; + uint32_t en = (enable ? 1 : 0); + uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); + data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); + didt_block_info &= ~SQ_Enable_MASK; + didt_block_info |= en << SQ_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); + data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); + didt_block_info &= ~DB_Enable_MASK; + didt_block_info |= en << DB_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); + data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); + didt_block_info &= ~TD_Enable_MASK; + didt_block_info |= en << TD_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); + data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); + didt_block_info &= ~TCP_Enable_MASK; + didt_block_info |= en << TCP_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0); + data &= ~DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL); + data &= ~DIDT_SQ_EDC_CTRL__EDC_EN_MASK; + data |= ((en << DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_EN_MASK); + data &= ~DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK; + data |= ((~en << DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL); + data &= ~DIDT_DB_EDC_CTRL__EDC_EN_MASK; + data |= ((en << DIDT_DB_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DB_EDC_CTRL__EDC_EN_MASK); + data &= ~DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK; + data |= ((~en << DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL); + data &= ~DIDT_TD_EDC_CTRL__EDC_EN_MASK; + data |= ((en << DIDT_TD_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TD_EDC_CTRL__EDC_EN_MASK); + data &= ~DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK; + data |= ((~en << DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL); + data &= ~DIDT_TCP_EDC_CTRL__EDC_EN_MASK; + data |= ((en << DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_EN_MASK); + data &= ~DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK; + data |= ((~en << DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL); + data &= ~DIDT_DBR_EDC_CTRL__EDC_EN_MASK; + data |= ((en << DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_EN_MASK); + data &= ~DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK; + data |= ((~en << DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data); + } + } + + if (enable) { + /* For Vega10, SMC does not support any mask yet. */ + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info); + PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!"); + } +} + +static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0, count, data; + struct cgs_system_info sys_info = {0}; + uint32_t reg; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + if (cgs_query_system_info(hwmgr->device, &sys_info) == 0) + num_se = sys_info.value; + + cgs_enter_safe_mode(hwmgr->device, true); + + cgs_lock_grbm_idx(hwmgr->device, true); + reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, reg, data); + + result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl1Config_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl2Config_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtTuningCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SELCacConfig_Vega10, VEGA10_CONFIGREG_SECAC); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega10, VEGA10_CONFIGREG_DIDT); + + if (0 != result) + break; + } + cgs_write_register(hwmgr->device, reg, 0xE0000000); + cgs_lock_grbm_idx(hwmgr->device, false); + + vega10_didt_set_mask(hwmgr, true); + + cgs_enter_safe_mode(hwmgr->device, false); + + return 0; +} + +static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) +{ + cgs_enter_safe_mode(hwmgr->device, true); + + vega10_didt_set_mask(hwmgr, false); + + cgs_enter_safe_mode(hwmgr->device, false); + + return 0; +} + +static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0, count, data; + struct cgs_system_info sys_info = {0}; + uint32_t reg; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + if (cgs_query_system_info(hwmgr->device, &sys_info) == 0) + num_se = sys_info.value; + + cgs_enter_safe_mode(hwmgr->device, true); + + cgs_lock_grbm_idx(hwmgr->device, true); + reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, reg, data); + + result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega10, VEGA10_CONFIGREG_DIDT); + if (0 != result) + break; + } + cgs_write_register(hwmgr->device, reg, 0xE0000000); + cgs_lock_grbm_idx(hwmgr->device, false); + + vega10_didt_set_mask(hwmgr, true); + + cgs_enter_safe_mode(hwmgr->device, false); + + vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) + vega10_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega10); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10); + + return 0; +} + +static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) +{ + uint32_t data; + + cgs_enter_safe_mode(hwmgr->device, true); + + vega10_didt_set_mask(hwmgr, false); + + cgs_enter_safe_mode(hwmgr->device, false); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + data = 0x00000000; + cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); + + return 0; +} + +static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0, count, data; + struct cgs_system_info sys_info = {0}; + uint32_t reg; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + if (cgs_query_system_info(hwmgr->device, &sys_info) == 0) + num_se = sys_info.value; + + cgs_enter_safe_mode(hwmgr->device, true); + + cgs_lock_grbm_idx(hwmgr->device, true); + reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, reg, data); + result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCThresholdConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT); + + if (0 != result) + break; + } + cgs_write_register(hwmgr->device, reg, 0xE0000000); + cgs_lock_grbm_idx(hwmgr->device, false); + + vega10_didt_set_mask(hwmgr, true); + + cgs_enter_safe_mode(hwmgr->device, false); + + return 0; +} + +static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr) +{ + cgs_enter_safe_mode(hwmgr->device, true); + + vega10_didt_set_mask(hwmgr, false); + + cgs_enter_safe_mode(hwmgr->device, false); + + return 0; +} + +static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0; + uint32_t count, data; + struct cgs_system_info sys_info = {0}; + uint32_t reg; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + if (cgs_query_system_info(hwmgr->device, &sys_info) == 0) + num_se = sys_info.value; + + cgs_enter_safe_mode(hwmgr->device, true); + + vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); + + cgs_lock_grbm_idx(hwmgr->device, true); + reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, reg, data); + result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT); + + if (0 != result) + break; + } + cgs_write_register(hwmgr->device, reg, 0xE0000000); + cgs_lock_grbm_idx(hwmgr->device, false); + + vega10_didt_set_mask(hwmgr, true); + + cgs_enter_safe_mode(hwmgr->device, false); + + vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega10); + vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega10); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10); + + return 0; +} + +static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) +{ + uint32_t data; + + cgs_enter_safe_mode(hwmgr->device, true); + + vega10_didt_set_mask(hwmgr, false); + + cgs_enter_safe_mode(hwmgr->device, false); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) { + data = 0x00000000; + cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM)) + vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); + + return 0; +} + +static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) +{ + uint32_t reg; + int result; + + cgs_enter_safe_mode(hwmgr->device, true); + + cgs_lock_grbm_idx(hwmgr->device, true); + reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + cgs_write_register(hwmgr->device, reg, 0xE0000000); + cgs_lock_grbm_idx(hwmgr->device, false); + + result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT); + if (0 != result) + return result; + + vega10_didt_set_mask(hwmgr, true); + + cgs_enter_safe_mode(hwmgr->device, false); + + return 0; +} + +static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) +{ + int result; + + result = vega10_disable_se_edc_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Pre DIDT disable clock gating failed!", return result); + + return 0; +} + +int vega10_enable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result = 0; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DIDT].supported) { + if (data->smu_features[GNLD_DIDT].enabled) + PP_DBG_LOG("[EnableDiDtConfig] Feature DiDt Already enabled!\n"); + + switch (data->registry_data.didt_mode) { + case 0: + result = vega10_enable_cac_driving_se_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 0 Failed!", return result); + break; + case 2: + result = vega10_enable_psm_gc_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 2 Failed!", return result); + break; + case 3: + result = vega10_enable_se_edc_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 3 Failed!", return result); + break; + case 1: + case 4: + case 5: + result = vega10_enable_psm_gc_edc_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 5 Failed!", return result); + break; + case 6: + result = vega10_enable_se_edc_force_stall_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 6 Failed!", return result); + break; + default: + result = -EINVAL; + break; + } + + if (0 == result) { + PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), + "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result); + data->smu_features[GNLD_DIDT].enabled = true; + } + } + + return result; +} + +int vega10_disable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result = 0; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DIDT].supported) { + if (!data->smu_features[GNLD_DIDT].enabled) + PP_DBG_LOG("[DisableDiDtConfig] Feature DiDt Already Disabled!\n"); + + switch (data->registry_data.didt_mode) { + case 0: + result = vega10_disable_cac_driving_se_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 0 Failed!", return result); + break; + case 2: + result = vega10_disable_psm_gc_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 2 Failed!", return result); + break; + case 3: + result = vega10_disable_se_edc_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 3 Failed!", return result); + break; + case 1: + case 4: + case 5: + result = vega10_disable_psm_gc_edc_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 5 Failed!", return result); + break; + case 6: + result = vega10_disable_se_edc_force_stall_config(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 6 Failed!", return result); + break; + default: + result = -EINVAL; + break; + } + + if (0 == result) { + PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr->smumgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)), + "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result); + data->smu_features[GNLD_DIDT].enabled = false; + } + } + + return result; +} void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h index 9ecaa27c0bb5..b95771ab89cd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h @@ -31,6 +31,12 @@ enum vega10_pt_config_reg_type { VEGA10_CONFIGREG_MAX }; +enum vega10_didt_config_reg_type { + VEGA10_CONFIGREG_DIDT = 0, + VEGA10_CONFIGREG_GCCAC, + VEGA10_CONFIGREG_SECAC +}; + /* PowerContainment Features */ #define POWERCONTAINMENT_FEATURE_DTE 0x00000001 #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 @@ -44,6 +50,13 @@ struct vega10_pt_config_reg { enum vega10_pt_config_reg_type type; }; +struct vega10_didt_config_reg { + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; +}; + struct vega10_pt_defaults { uint8_t SviLoadLineEn; uint8_t SviLoadLineVddC; @@ -62,5 +75,8 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); int vega10_power_control_set_level(struct pp_hwmgr *hwmgr); int vega10_disable_power_containment(struct pp_hwmgr *hwmgr); +int vega10_enable_didt_config(struct pp_hwmgr *hwmgr); +int vega10_disable_didt_config(struct pp_hwmgr *hwmgr); + #endif /* _VEGA10_POWERTUNE_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index a1ebe1014492..a4c8b09b6f14 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -164,9 +164,14 @@ enum phm_platform_caps { PHM_PlatformCaps_EnablePlatformPowerManagement, /* indicates that Platform Power Management feature is supported */ PHM_PlatformCaps_SurpriseRemoval, /* indicates that surprise removal feature is requested */ PHM_PlatformCaps_NewCACVoltage, /* indicates new CAC voltage table support */ + PHM_PlatformCaps_DiDtSupport, /* for dI/dT feature */ PHM_PlatformCaps_DBRamping, /* for dI/dT feature */ PHM_PlatformCaps_TDRamping, /* for dI/dT feature */ PHM_PlatformCaps_TCPRamping, /* for dI/dT feature */ + PHM_PlatformCaps_DBRRamping, /* for dI/dT feature */ + PHM_PlatformCaps_DiDtEDCEnable, /* for dI/dT feature */ + PHM_PlatformCaps_GCEDC, /* for dI/dT feature */ + PHM_PlatformCaps_PSM, /* for dI/dT feature */ PHM_PlatformCaps_EnableSMU7ThermalManagement, /* SMC will manage thermal events */ PHM_PlatformCaps_FPS, /* FPS support */ PHM_PlatformCaps_ACP, /* ACP support */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h index f3f9ebb631a5..822cd8b5bf90 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h @@ -42,6 +42,12 @@ } \ } while (0) +#define PP_ASSERT(cond, msg) \ + do { \ + if (!(cond)) { \ + pr_warn("%s\n", msg); \ + } \ + } while (0) #define PP_DBG_LOG(fmt, ...) \ do { \ diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h index 227d999b6bd1..a511611ec7e0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h @@ -41,6 +41,8 @@ inline static uint32_t soc15_get_register_offset( reg = MP1_BASE.instance[inst].segment[segment] + offset; else if (hw_id == DF_HWID) reg = DF_BASE.instance[inst].segment[segment] + offset; + else if (hw_id == GC_HWID) + reg = GC_BASE.instance[inst].segment[segment] + offset; return reg; } From 12097c6d67ba851d7eea7ab82b7ba70019347bfd Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jul 2017 13:38:09 -0400 Subject: [PATCH 0317/1795] drm/amdgpu: add nbio 6.1 register init function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Used for nbio registers that need to be initialized. Currently only used for a golden setting that got missed on some boards. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h | 1 + 2 files changed, 14 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 61c00281a61b..045988b18bc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -32,6 +32,7 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 +#define smnPCIE_CONFIG_CNTL 0x11180044 u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev) { @@ -256,3 +257,15 @@ void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } } + +void nbio_v6_1_init_registers(struct amdgpu_device *adev) +{ + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); + + if (def != data) + WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); +} diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h index f6f8bc045518..686e4b4d296a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h @@ -50,5 +50,6 @@ void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable); void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags); void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev); +void nbio_v6_1_init_registers(struct amdgpu_device *adev); #endif From 833fa075b87c36be437a941393d750c36022d902 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jul 2017 13:43:55 -0400 Subject: [PATCH 0318/1795] drm/amdgpu/soc15: init nbio registers for vega10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Call nbio init registers on hw_init to set up any nbio registers that need initialization at hw init time. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0d9a3dd302a7..8dc023296773 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -727,6 +727,9 @@ static int soc15_common_hw_init(void *handle) soc15_pcie_gen3_enable(adev); /* enable aspm */ soc15_program_aspm(adev); + /* setup nbio registers */ + if (!(adev->flags & AMD_IS_APU)) + nbio_v6_1_init_registers(adev); /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); From 70d17a25da21e1d497db3580ae27682952ddfd98 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 30 Jun 2017 17:26:47 -0400 Subject: [PATCH 0319/1795] drm/amdgpu: check scratch registers to see if we need post (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than checking the CONGIG_MEMSIZE register as that may not be reliable on some APUs. v2: The scratch register is only used on CIK+ Reviewed-by: Junwei Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 541695768f0a..d92ac5c1af54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -736,7 +736,12 @@ bool amdgpu_need_post(struct amdgpu_device *adev) adev->has_hw_reset = false; return true; } - /* then check MEM_SIZE, in case the crtcs are off */ + + /* bios scratch used on CIK+ */ + if (adev->asic_type >= CHIP_BONAIRE) + return amdgpu_atombios_scratch_need_asic_init(adev); + + /* check MEM_SIZE for older asics */ reg = amdgpu_asic_get_config_memsize(adev); if ((reg != 0) && (reg != 0xffffffff)) From 3490bdb537fbf94a7a57f6c353a39e6306a9cfbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 6 Jul 2017 22:02:41 +0200 Subject: [PATCH 0320/1795] drm/amdgpu: move GART struct and function into amdgpu_gart.h v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No functional change, just cleanup. v2: rebased, keep gart name. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 48 +-------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h | 77 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 + 3 files changed, 79 insertions(+), 47 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 1bb1912f863c..9c1146228421 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -68,6 +68,7 @@ #include "gpu_scheduler.h" #include "amdgpu_virt.h" +#include "amdgpu_gart.h" /* * Modules parameters. @@ -531,53 +532,6 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, int amdgpu_fence_slab_init(void); void amdgpu_fence_slab_fini(void); -/* - * GART structures, functions & helpers - */ -struct amdgpu_mc; - -#define AMDGPU_GPU_PAGE_SIZE 4096 -#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) -#define AMDGPU_GPU_PAGE_SHIFT 12 -#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) - -struct amdgpu_gart { - dma_addr_t table_addr; - struct amdgpu_bo *robj; - void *ptr; - unsigned num_gpu_pages; - unsigned num_cpu_pages; - unsigned table_size; -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - struct page **pages; -#endif - bool ready; - - /* Asic default pte flags */ - uint64_t gart_pte_flags; - - const struct amdgpu_gart_funcs *gart_funcs; -}; - -void amdgpu_gart_set_defaults(struct amdgpu_device *adev); -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); -void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); -int amdgpu_gart_init(struct amdgpu_device *adev); -void amdgpu_gart_fini(struct amdgpu_device *adev); -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, - int pages); -int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, - int pages, dma_addr_t *dma_addr, uint64_t flags, - void *dst); -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, - int pages, struct page **pagelist, - dma_addr_t *dma_addr, uint64_t flags); -int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); - /* * VMHUB structures, functions & helpers */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h new file mode 100644 index 000000000000..d4cce6936200 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -0,0 +1,77 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_GART_H__ +#define __AMDGPU_GART_H__ + +#include + +/* + * GART structures, functions & helpers + */ +struct amdgpu_device; +struct amdgpu_bo; +struct amdgpu_gart_funcs; + +#define AMDGPU_GPU_PAGE_SIZE 4096 +#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) +#define AMDGPU_GPU_PAGE_SHIFT 12 +#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) + +struct amdgpu_gart { + dma_addr_t table_addr; + struct amdgpu_bo *robj; + void *ptr; + unsigned num_gpu_pages; + unsigned num_cpu_pages; + unsigned table_size; +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS + struct page **pages; +#endif + bool ready; + + /* Asic default pte flags */ + uint64_t gart_pte_flags; + + const struct amdgpu_gart_funcs *gart_funcs; +}; + +void amdgpu_gart_set_defaults(struct amdgpu_device *adev); +int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); +void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); +int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); +void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); +int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); +void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); +int amdgpu_gart_init(struct amdgpu_device *adev); +void amdgpu_gart_fini(struct amdgpu_device *adev); +int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, + int pages); +int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, + int pages, dma_addr_t *dma_addr, uint64_t flags, + void *dst); +int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, + int pages, struct page **pagelist, + dma_addr_t *dma_addr, uint64_t flags); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 4f5c1da5922e..f137c2458ee8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -80,5 +80,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem); +int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); #endif From ed21c047e9753ed5c7abe437ec25222b7d538a89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 6 Jul 2017 22:26:05 +0200 Subject: [PATCH 0321/1795] drm/amdgpu: remove gtt_base_align handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not used any more. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 1 - drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 1 - drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1 - drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 - 6 files changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9c1146228421..590798f0c245 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -564,7 +564,6 @@ struct amdgpu_mc { unsigned vram_width; u64 real_vram_size; int vram_mtrr; - u64 gtt_base_align; u64 mc_mask; const struct firmware *fw; /* MC firmware */ uint32_t fw_version; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d92ac5c1af54..c635abdac96f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -696,8 +696,8 @@ void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { u64 size_af, size_bf; - size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; - size_bf = mc->vram_start & ~mc->gtt_base_align; + size_af = adev->mc.mc_mask - mc->vram_end; + size_bf = mc->vram_start; if (size_bf > size_af) { if (mc->gtt_size > size_bf) { dev_warn(adev->dev, "limiting GTT\n"); @@ -709,7 +709,7 @@ void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) dev_warn(adev->dev, "limiting GTT\n"); mc->gtt_size = size_af; } - mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; + mc->gtt_start = mc->vram_end + 1; } mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 5f7750c6497e..810d5734ce1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -228,7 +228,6 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, mc->mc_vram_size = 0xFFC0000000ULL; } amdgpu_vram_location(adev, &adev->mc, base); - adev->mc.gtt_base_align = 0; amdgpu_gtt_location(adev, mc); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 388b52febc8b..066f00ad4152 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -244,7 +244,6 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, mc->mc_vram_size = 0xFFC0000000ULL; } amdgpu_vram_location(adev, &adev->mc, base); - adev->mc.gtt_base_align = 0; amdgpu_gtt_location(adev, mc); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index d148d1c585b3..f30c39c72bca 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -406,7 +406,6 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, mc->mc_vram_size = 0xFFC0000000ULL; } amdgpu_vram_location(adev, &adev->mc, base); - adev->mc.gtt_base_align = 0; amdgpu_gtt_location(adev, mc); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 8ec148727149..dd2756ec11b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -420,7 +420,6 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, if (!amdgpu_sriov_vf(adev)) base = mmhub_v1_0_get_fb_location(adev); amdgpu_vram_location(adev, &adev->mc, base); - adev->mc.gtt_base_align = 0; amdgpu_gtt_location(adev, mc); /* base offset of vram pages */ if (adev->flags & AMD_IS_APU) From 6f02a69648f14024213ab65cd4a4a701e40e46ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 7 Jul 2017 11:56:59 +0200 Subject: [PATCH 0322/1795] drm/amdgpu: consistent name all GART related parts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename symbols from gtt_ to gart_ as appropriate. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 22 +++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 12 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 48 +++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 10 ++--- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 8 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 8 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 8 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 8 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 8 ++-- 11 files changed, 72 insertions(+), 72 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 590798f0c245..b19557b8c683 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -556,9 +556,9 @@ struct amdgpu_mc { * about vram size near mc fb location */ u64 mc_vram_size; u64 visible_vram_size; - u64 gtt_size; - u64 gtt_start; - u64 gtt_end; + u64 gart_size; + u64 gart_start; + u64 gart_end; u64 vram_start; u64 vram_end; unsigned vram_width; @@ -1860,7 +1860,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); +void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c635abdac96f..84ff824ea260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -681,7 +681,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 } /** - * amdgpu_gtt_location - try to find GTT location + * amdgpu_gart_location - try to find GTT location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations * @@ -692,28 +692,28 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 * * FIXME: when reducing GTT size align new size on power of 2. */ -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) +void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { u64 size_af, size_bf; size_af = adev->mc.mc_mask - mc->vram_end; size_bf = mc->vram_start; if (size_bf > size_af) { - if (mc->gtt_size > size_bf) { + if (mc->gart_size > size_bf) { dev_warn(adev->dev, "limiting GTT\n"); - mc->gtt_size = size_bf; + mc->gart_size = size_bf; } - mc->gtt_start = 0; + mc->gart_start = 0; } else { - if (mc->gtt_size > size_af) { + if (mc->gart_size > size_af) { dev_warn(adev->dev, "limiting GTT\n"); - mc->gtt_size = size_af; + mc->gart_size = size_af; } - mc->gtt_start = mc->vram_end + 1; + mc->gart_start = mc->vram_end + 1; } - mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; + mc->gart_end = mc->gart_start + mc->gart_size - 1; dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", - mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); + mc->gart_size >> 20, mc->gart_start, mc->gart_end); } /* @@ -2031,7 +2031,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->flags = flags; adev->asic_type = flags & AMD_ASIC_MASK; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; - adev->mc.gtt_size = 512 * 1024 * 1024; + adev->mc.gart_size = 512 * 1024 * 1024; adev->accel_working = false; adev->num_rings = 0; adev->mman.buffer_funcs = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index b4048a91c814..d578ca6f2dfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -57,11 +57,11 @@ */ /** - * amdgpu_gart_set_defaults - set the default gtt_size + * amdgpu_gart_set_defaults - set the default gart_size * * @adev: amdgpu_device pointer * - * Set the default gtt_size based on parameters and available VRAM. + * Set the default gart_size based on parameters and available VRAM. */ void amdgpu_gart_set_defaults(struct amdgpu_device *adev) { @@ -69,10 +69,10 @@ void amdgpu_gart_set_defaults(struct amdgpu_device *adev) * size equal to the 1024 or vram, whichever is larger. */ if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), + adev->mc.gart_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), adev->mc.mc_vram_size); else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; + adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20; } /** @@ -387,8 +387,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev) if (r) return r; /* Compute table size */ - adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE; - adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE; + adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE; + adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE; DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index d02e611a2dae..3c4d7574d704 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -33,7 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_bo *vram_obj = NULL; struct amdgpu_bo **gtt_obj = NULL; - uint64_t gtt_addr, vram_addr; + uint64_t gart_addr, vram_addr; unsigned n, size; int i, r; @@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) /* Number of tests = * (Total GTT - IB pool - writeback page - ring buffers) / test size */ - n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024; + n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) if (adev->rings[i]) n -= adev->rings[i]->ring_size; @@ -76,7 +76,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } for (i = 0; i < n; i++) { void *gtt_map, *vram_map; - void **gtt_start, **gtt_end; + void **gart_start, **gart_end; void **vram_start, **vram_end; struct dma_fence *fence = NULL; @@ -91,7 +91,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) r = amdgpu_bo_reserve(gtt_obj[i], false); if (unlikely(r != 0)) goto out_lclean_unref; - r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, >t_addr); + r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr); if (r) { DRM_ERROR("Failed to pin GTT object %d\n", i); goto out_lclean_unres; @@ -103,14 +103,14 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - for (gtt_start = gtt_map, gtt_end = gtt_map + size; - gtt_start < gtt_end; - gtt_start++) - *gtt_start = gtt_start; + for (gart_start = gtt_map, gart_end = gtt_map + size; + gart_start < gart_end; + gart_start++) + *gart_start = gart_start; amdgpu_bo_kunmap(gtt_obj[i]); - r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, + r = amdgpu_copy_buffer(ring, gart_addr, vram_addr, size, NULL, &fence, false, false); if (r) { @@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - for (gtt_start = gtt_map, gtt_end = gtt_map + size, + for (gart_start = gtt_map, gart_end = gtt_map + size, vram_start = vram_map, vram_end = vram_map + size; vram_start < vram_end; - gtt_start++, vram_start++) { - if (*vram_start != gtt_start) { + gart_start++, vram_start++) { + if (*vram_start != gart_start) { DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " "expected 0x%p (GTT/VRAM offset " "0x%16llx/0x%16llx)\n", - i, *vram_start, gtt_start, + i, *vram_start, gart_start, (unsigned long long) - (gtt_addr - adev->mc.gtt_start + - (void*)gtt_start - gtt_map), + (gart_addr - adev->mc.gart_start + + (void*)gart_start - gtt_map), (unsigned long long) (vram_addr - adev->mc.vram_start + - (void*)gtt_start - gtt_map)); + (void*)gart_start - gtt_map)); amdgpu_bo_kunmap(vram_obj); goto out_lclean_unpin; } @@ -155,7 +155,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(vram_obj); - r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, + r = amdgpu_copy_buffer(ring, vram_addr, gart_addr, size, NULL, &fence, false, false); if (r) { @@ -177,20 +177,20 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - for (gtt_start = gtt_map, gtt_end = gtt_map + size, + for (gart_start = gtt_map, gart_end = gtt_map + size, vram_start = vram_map, vram_end = vram_map + size; - gtt_start < gtt_end; - gtt_start++, vram_start++) { - if (*gtt_start != vram_start) { + gart_start < gart_end; + gart_start++, vram_start++) { + if (*gart_start != vram_start) { DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " "expected 0x%p (VRAM/GTT offset " "0x%16llx/0x%16llx)\n", - i, *gtt_start, vram_start, + i, *gart_start, vram_start, (unsigned long long) (vram_addr - adev->mc.vram_start + (void*)vram_start - vram_map), (unsigned long long) - (gtt_addr - adev->mc.gtt_start + + (gart_addr - adev->mc.gart_start + (void*)vram_start - vram_map)); amdgpu_bo_kunmap(gtt_obj[i]); goto out_lclean_unpin; @@ -200,7 +200,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(gtt_obj[i]); DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", - gtt_addr - adev->mc.gtt_start); + gart_addr - adev->mc.gart_start); continue; out_lclean_unpin: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fb9c6988f5f2..4e711d9af67c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -158,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_TT: man->func = &amdgpu_gtt_mgr_func; - man->gpu_offset = adev->mc.gtt_start; + man->gpu_offset = adev->mc.gart_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; @@ -1144,13 +1144,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, - adev->mc.gtt_size >> PAGE_SHIFT); + adev->mc.gart_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); return r; } DRM_INFO("amdgpu: %uM of GTT memory ready.\n", - (unsigned)(adev->mc.gtt_size / (1024 * 1024))); + (unsigned)(adev->mc.gart_size / (1024 * 1024))); adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; @@ -1279,7 +1279,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); - *addr = adev->mc.gtt_start; + *addr = adev->mc.gart_start; *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GPU_PAGE_SIZE; @@ -1645,7 +1645,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) adev, &amdgpu_ttm_gtt_fops); if (IS_ERR(ent)) return PTR_ERR(ent); - i_size_write(ent->d_inode, adev->mc.gtt_size); + i_size_write(ent->d_inode, adev->mc.gart_size); adev->mman.gtt = ent; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index a42f483767e7..3ff786cfc947 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) gfxhub_v1_0_init_gart_pt_regs(adev); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->mc.gtt_start >> 12)); + (u32)(adev->mc.gart_start >> 12)); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->mc.gtt_start >> 44)); + (u32)(adev->mc.gart_start >> 44)); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, - (u32)(adev->mc.gtt_end >> 12)); + (u32)(adev->mc.gart_end >> 12)); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, - (u32)(adev->mc.gtt_end >> 44)); + (u32)(adev->mc.gart_end >> 44)); } static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 810d5734ce1e..886df0902067 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -228,7 +228,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, mc->mc_vram_size = 0xFFC0000000ULL; } amdgpu_vram_location(adev, &adev->mc, base); - amdgpu_gtt_location(adev, mc); + amdgpu_gart_location(adev, mc); } static void gmc_v6_0_mc_program(struct amdgpu_device *adev) @@ -481,8 +481,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) | (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); /* setup context0 */ - WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); @@ -529,7 +529,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) gmc_v6_0_gart_flush_gpu_tlb(adev, 0); dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 066f00ad4152..21a45b403b21 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -244,7 +244,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, mc->mc_vram_size = 0xFFC0000000ULL; } amdgpu_vram_location(adev, &adev->mc, base); - amdgpu_gtt_location(adev, mc); + amdgpu_gart_location(adev, mc); } /** @@ -584,8 +584,8 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); WREG32(mmVM_L2_CNTL3, tmp); /* setup context0 */ - WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); @@ -639,7 +639,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) gmc_v7_0_gart_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index f30c39c72bca..2b39606f1d3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -406,7 +406,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, mc->mc_vram_size = 0xFFC0000000ULL; } amdgpu_vram_location(adev, &adev->mc, base); - amdgpu_gtt_location(adev, mc); + amdgpu_gart_location(adev, mc); } /** @@ -786,8 +786,8 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); WREG32(mmVM_L2_CNTL4, tmp); /* setup context0 */ - WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); @@ -842,7 +842,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) gmc_v8_0_gart_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index dd2756ec11b8..677181fdfa00 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -420,7 +420,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, if (!amdgpu_sriov_vf(adev)) base = mmhub_v1_0_get_fb_location(adev); amdgpu_vram_location(adev, &adev->mc, base); - amdgpu_gtt_location(adev, mc); + amdgpu_gart_location(adev, mc); /* base offset of vram pages */ if (adev->flags & AMD_IS_APU) adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); @@ -736,7 +736,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) gmc_v9_0_gart_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 9f2cf78907a3..0780e830b76b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) mmhub_v1_0_init_gart_pt_regs(adev); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->mc.gtt_start >> 12)); + (u32)(adev->mc.gart_start >> 12)); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->mc.gtt_start >> 44)); + (u32)(adev->mc.gart_start >> 44)); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, - (u32)(adev->mc.gtt_end >> 12)); + (u32)(adev->mc.gart_end >> 12)); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, - (u32)(adev->mc.gtt_end >> 44)); + (u32)(adev->mc.gart_end >> 44)); } static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) From bb84284e10ec3447b4bb74cfa493d5c384a0b798 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 7 Jul 2017 13:16:37 +0200 Subject: [PATCH 0323/1795] drm/amdgpu: limit the GTT manager address space MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We should only cover the GART size with the GTT manager. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index f46a97d91675..5e6b90c6794f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -42,6 +42,7 @@ struct amdgpu_gtt_mgr { static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, unsigned long p_size) { + struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr; uint64_t start, size; @@ -50,7 +51,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, return -ENOMEM; start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; - size = p_size - start; + size = (adev->mc.gart_size >> PAGE_SHIFT) - start; drm_mm_init(&mgr->mm, start, size); spin_lock_init(&mgr->lock); mgr->available = p_size; @@ -112,6 +113,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, const struct ttm_place *place, struct ttm_mem_reg *mem) { + struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr = man->priv; struct drm_mm_node *node = mem->mm_node; enum drm_mm_insert_mode mode; @@ -129,7 +131,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, if (place && place->lpfn) lpfn = place->lpfn; else - lpfn = man->size; + lpfn = adev->gart.num_cpu_pages; mode = DRM_MM_INSERT_BEST; if (place && place->flags & TTM_PL_FLAG_TOPDOWN) From 36d3837266c53a92a909f22f6bbf0846c0e6464f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 7 Jul 2017 13:17:45 +0200 Subject: [PATCH 0324/1795] drm/amdgpu: add new gttsize module parameter v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows setting the gtt size independent of the gart size. v2: fix copy and paste typo Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 +++++++++--- 4 files changed, 21 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b19557b8c683..c7436ea425b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -76,6 +76,7 @@ extern int amdgpu_modeset; extern int amdgpu_vram_limit; extern int amdgpu_gart_size; +extern int amdgpu_gtt_size; extern int amdgpu_moverate; extern int amdgpu_benchmarking; extern int amdgpu_testing; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 84ff824ea260..49dd8e0ddd17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1141,6 +1141,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) } } + if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { + /* gtt size must be greater or equal to 32M */ + dev_warn(adev->dev, "gtt size (%d) too small\n", + amdgpu_gtt_size); + amdgpu_gtt_size = -1; + } + amdgpu_check_vm_size(adev); amdgpu_check_block_size(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index d90dc426ee5b..b83e87fef6a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -75,6 +75,7 @@ int amdgpu_vram_limit = 0; int amdgpu_gart_size = -1; /* auto */ +int amdgpu_gtt_size = -1; /* auto */ int amdgpu_moverate = -1; /* auto */ int amdgpu_benchmarking = 0; int amdgpu_testing = 0; @@ -123,6 +124,9 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)"); module_param_named(gartsize, amdgpu_gart_size, int, 0600); +MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)"); +module_param_named(gttsize, amdgpu_gtt_size, int, 0600); + MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)"); module_param_named(moverate, amdgpu_moverate, int, 0600); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 4e711d9af67c..b5420b20c2f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1097,6 +1097,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = { int amdgpu_ttm_init(struct amdgpu_device *adev) { + uint64_t gtt_size; int r; r = amdgpu_ttm_global_init(adev); @@ -1143,14 +1144,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); - r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, - adev->mc.gart_size >> PAGE_SHIFT); + + if (amdgpu_gtt_size == -1) + gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), + adev->mc.mc_vram_size); + else + gtt_size = (uint64_t)amdgpu_gtt_size << 20; + r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); return r; } DRM_INFO("amdgpu: %uM of GTT memory ready.\n", - (unsigned)(adev->mc.gart_size / (1024 * 1024))); + (unsigned)(gtt_size / (1024 * 1024))); adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; From f9321cc4408bd90bb0adcd929c04322aaf6afdcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 7 Jul 2017 13:44:05 +0200 Subject: [PATCH 0325/1795] drm/amdgpu: change gartsize default to 256MB MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Limit the default GART size and save a lot of VRAM. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 +++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 9 +-------- 4 files changed, 10 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c7436ea425b4..019a4b73e5f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -75,7 +75,7 @@ */ extern int amdgpu_modeset; extern int amdgpu_vram_limit; -extern int amdgpu_gart_size; +extern unsigned amdgpu_gart_size; extern int amdgpu_gtt_size; extern int amdgpu_moverate; extern int amdgpu_benchmarking; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 49dd8e0ddd17..0e629931210c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1132,13 +1132,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); } - if (amdgpu_gart_size != -1) { - /* gtt size must be greater or equal to 32M */ - if (amdgpu_gart_size < 32) { - dev_warn(adev->dev, "gart size (%d) too small\n", - amdgpu_gart_size); - amdgpu_gart_size = -1; - } + if (amdgpu_gart_size < 32) { + /* gart size must be greater or equal to 32M */ + dev_warn(adev->dev, "gart size (%d) too small\n", + amdgpu_gart_size); + amdgpu_gart_size = 32; } if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b83e87fef6a8..ed760f477542 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -74,7 +74,7 @@ #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; -int amdgpu_gart_size = -1; /* auto */ +unsigned amdgpu_gart_size = 256; int amdgpu_gtt_size = -1; /* auto */ int amdgpu_moverate = -1; /* auto */ int amdgpu_benchmarking = 0; @@ -121,8 +121,8 @@ int amdgpu_lbpw = -1; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); -MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)"); -module_param_named(gartsize, amdgpu_gart_size, int, 0600); +MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)"); +module_param_named(gartsize, amdgpu_gart_size, uint, 0600); MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)"); module_param_named(gttsize, amdgpu_gtt_size, int, 0600); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index d578ca6f2dfd..5cc4987cd887 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -65,14 +65,7 @@ */ void amdgpu_gart_set_defaults(struct amdgpu_device *adev) { - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gart_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20; + adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20; } /** From 218b5dcde4d30e071eec4201a36af665ccfa7e1c Mon Sep 17 00:00:00 2001 From: John Brooks Date: Tue, 27 Jun 2017 22:33:17 -0400 Subject: [PATCH 0326/1795] drm/amdgpu: Add vis_vramlimit module parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow specifying a limit on visible VRAM via a module parameter. This is helpful for testing performance under visible VRAM pressure. v2: Add cast to 64-bit (Christian König) Signed-off-by: John Brooks Reviewed-by: Michel Dänzer Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 ++++++++ 3 files changed, 13 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 019a4b73e5f5..0d6b0617cdf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -75,6 +75,7 @@ */ extern int amdgpu_modeset; extern int amdgpu_vram_limit; +extern int amdgpu_vis_vram_limit; extern unsigned amdgpu_gart_size; extern int amdgpu_gtt_size; extern int amdgpu_moverate; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index ed760f477542..3c83fe6c5db8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -74,6 +74,7 @@ #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; +int amdgpu_vis_vram_limit = 0; unsigned amdgpu_gart_size = 256; int amdgpu_gtt_size = -1; /* auto */ int amdgpu_moverate = -1; /* auto */ @@ -121,6 +122,9 @@ int amdgpu_lbpw = -1; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); +MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); +module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); + MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)"); module_param_named(gartsize, amdgpu_gart_size, uint, 0600); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b5420b20c2f5..e238084b7142 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1099,6 +1099,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) { uint64_t gtt_size; int r; + u64 vis_vram_limit; r = amdgpu_ttm_global_init(adev); if (r) { @@ -1122,6 +1123,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } + + /* Reduce size of CPU-visible VRAM if requested */ + vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; + if (amdgpu_vis_vram_limit > 0 && + vis_vram_limit <= adev->mc.visible_vram_size) + adev->mc.visible_vram_size = vis_vram_limit; + /* Change the size here instead of the init above so only lpfn is affected */ amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); From 00f06b246a3056bbaa901a90a5a93c9f81ab8e36 Mon Sep 17 00:00:00 2001 From: John Brooks Date: Tue, 27 Jun 2017 22:33:18 -0400 Subject: [PATCH 0327/1795] drm/amdgpu: Throttle visible VRAM moves separately MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The BO move throttling code is designed to allow VRAM to fill quickly if it is relatively empty. However, this does not take into account situations where the visible VRAM is smaller than total VRAM, and total VRAM may not be close to full but the visible VRAM segment is under pressure. In such situations, visible VRAM would experience unrestricted swapping and performance would drop. Add a separate counter specifically for moves involving visible VRAM, and check it before moving BOs there. v2: Only perform calculations for separate counter if visible VRAM is smaller than total VRAM. (Michel Dänzer) v3: [Michel Dänzer] * Use BO's location rather than the AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED flag to determine whether to account a move for visible VRAM in most cases. * Use a single if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { block in amdgpu_cs_get_threshold_for_moves. Fixes: 95844d20ae02 (drm/amdgpu: throttle buffer migrations at CS using a fixed MBps limit (v2)) Signed-off-by: John Brooks Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 92 +++++++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 12 ++- 3 files changed, 87 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 0d6b0617cdf0..c290b262d7da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1117,7 +1117,9 @@ struct amdgpu_cs_parser { struct list_head validated; struct dma_fence *fence; uint64_t bytes_moved_threshold; + uint64_t bytes_moved_vis_threshold; uint64_t bytes_moved; + uint64_t bytes_moved_vis; struct amdgpu_bo_list_entry *evictable; /* user fence */ @@ -1555,6 +1557,7 @@ struct amdgpu_device { spinlock_t lock; s64 last_update_us; s64 accum_us; /* accumulated microseconds */ + s64 accum_us_vis; /* for visible VRAM */ u32 log2_max_MBps; } mm_stats; @@ -1846,7 +1849,8 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_need_post(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); -void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes); +void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, + u64 num_vis_bytes); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5599c01b265d..33789510e663 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -223,10 +223,11 @@ static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) * ticks. The accumulated microseconds (us) are converted to bytes and * returned. */ -static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) +static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, + u64 *max_bytes, + u64 *max_vis_bytes) { s64 time_us, increment_us; - u64 max_bytes; u64 free_vram, total_vram, used_vram; /* Allow a maximum of 200 accumulated ms. This is basically per-IB @@ -238,8 +239,11 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) */ const s64 us_upper_bound = 200000; - if (!adev->mm_stats.log2_max_MBps) - return 0; + if (!adev->mm_stats.log2_max_MBps) { + *max_bytes = 0; + *max_vis_bytes = 0; + return; + } total_vram = adev->mc.real_vram_size - adev->vram_pin_size; used_vram = atomic64_read(&adev->vram_usage); @@ -280,23 +284,45 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); } - /* This returns 0 if the driver is in debt to disallow (optional) + /* This is set to 0 if the driver is in debt to disallow (optional) * buffer moves. */ - max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); + *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); + + /* Do the same for visible VRAM if half of it is free */ + if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { + u64 total_vis_vram = adev->mc.visible_vram_size; + u64 used_vis_vram = atomic64_read(&adev->vram_vis_usage); + + if (used_vis_vram < total_vis_vram) { + u64 free_vis_vram = total_vis_vram - used_vis_vram; + adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + + increment_us, us_upper_bound); + + if (free_vis_vram >= total_vis_vram / 2) + adev->mm_stats.accum_us_vis = + max(bytes_to_us(adev, free_vis_vram / 2), + adev->mm_stats.accum_us_vis); + } + + *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); + } else { + *max_vis_bytes = 0; + } spin_unlock(&adev->mm_stats.lock); - return max_bytes; } /* Report how many bytes have really been moved for the last command * submission. This can result in a debt that can stop buffer migrations * temporarily. */ -void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes) +void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, + u64 num_vis_bytes) { spin_lock(&adev->mm_stats.lock); adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); + adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); spin_unlock(&adev->mm_stats.lock); } @@ -304,7 +330,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - u64 initial_bytes_moved; + u64 initial_bytes_moved, bytes_moved; uint32_t domain; int r; @@ -314,17 +340,35 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, /* Don't move this buffer if we have depleted our allowance * to move it. Don't move anything if the threshold is zero. */ - if (p->bytes_moved < p->bytes_moved_threshold) - domain = bo->prefered_domains; - else + if (p->bytes_moved < p->bytes_moved_threshold) { + if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { + /* And don't move a CPU_ACCESS_REQUIRED BO to limited + * visible VRAM if we've depleted our allowance to do + * that. + */ + if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) + domain = bo->prefered_domains; + else + domain = bo->allowed_domains; + } else { + domain = bo->prefered_domains; + } + } else { domain = bo->allowed_domains; + } retry: amdgpu_ttm_placement_from_domain(bo, domain); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - - initial_bytes_moved; + bytes_moved = atomic64_read(&adev->num_bytes_moved) - + initial_bytes_moved; + p->bytes_moved += bytes_moved; + if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + bo->tbo.mem.mem_type == TTM_PL_VRAM && + bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) + p->bytes_moved_vis += bytes_moved; if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { domain = bo->allowed_domains; @@ -350,7 +394,8 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, struct amdgpu_bo_list_entry *candidate = p->evictable; struct amdgpu_bo *bo = candidate->robj; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - u64 initial_bytes_moved; + u64 initial_bytes_moved, bytes_moved; + bool update_bytes_moved_vis; uint32_t other; /* If we reached our current BO we can forget it */ @@ -370,10 +415,17 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, /* Good we can try to move this BO somewhere else */ amdgpu_ttm_placement_from_domain(bo, other); + update_bytes_moved_vis = + adev->mc.visible_vram_size < adev->mc.real_vram_size && + bo->tbo.mem.mem_type == TTM_PL_VRAM && + bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT; initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - + bytes_moved = atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved; + p->bytes_moved += bytes_moved; + if (update_bytes_moved_vis) + p->bytes_moved_vis += bytes_moved; if (unlikely(r)) break; @@ -554,8 +606,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, list_splice(&need_pages, &p->validated); } - p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); + amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, + &p->bytes_moved_vis_threshold); p->bytes_moved = 0; + p->bytes_moved_vis = 0; p->evictable = list_last_entry(&p->validated, struct amdgpu_bo_list_entry, tv.head); @@ -579,8 +633,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, goto error_validate; } - amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved); - + amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, + p->bytes_moved_vis); fpriv->vm.last_eviction_counter = atomic64_read(&p->adev->num_evictions); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index a85e75327456..e429829ae93d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -322,7 +322,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct amdgpu_bo *bo; enum ttm_bo_type type; unsigned long page_align; - u64 initial_bytes_moved; + u64 initial_bytes_moved, bytes_moved; size_t acc_size; int r; @@ -398,8 +398,14 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, acc_size, sg, resv, &amdgpu_ttm_bo_destroy); - amdgpu_cs_report_moved_bytes(adev, - atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved); + bytes_moved = atomic64_read(&adev->num_bytes_moved) - + initial_bytes_moved; + if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + bo->tbo.mem.mem_type == TTM_PL_VRAM && + bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) + amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved); + else + amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); if (unlikely(r != 0)) return r; From 96cf8271df546d0f4cfc9ddddbc42dce633e0190 Mon Sep 17 00:00:00 2001 From: John Brooks Date: Fri, 30 Jun 2017 11:31:08 -0400 Subject: [PATCH 0328/1795] drm/amdgpu: Set/clear CPU_ACCESS flag on page fault and move to VRAM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a BO is moved to VRAM, clear AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED. This allows it to potentially later move to invisible VRAM if the CPU does not access it again. Setting the CPU_ACCESS flag in amdgpu_bo_fault_reserve_notify() also means that we can remove the loop to restrict lpfn to the end of visible VRAM, because amdgpu_ttm_placement_init() will do it for us. v3 [Michel Dänzer] * Use AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED instead of a new flag (Christian König) * Clear flag in amdgpu_bo_move instead of amdgpu_move_ram_vram (Christian) * Explicitly mention amdgpu_bo_fault_reserve_notify in amdgpu_bo_move * Also clear flag in amdgpu_bo_create_restricted Suggested-by: Michel Dänzer Signed-off-by: John Brooks Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 20 ++++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 9 +++++++++ 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e429829ae93d..93601fbea695 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -432,6 +432,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, trace_amdgpu_bo_create(bo); + /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ + if (type == ttm_bo_type_device) + bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + return 0; fail_unreserve: @@ -945,13 +949,17 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - unsigned long offset, size, lpfn; - int i, r; + unsigned long offset, size; + int r; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) return 0; abo = container_of(bo, struct amdgpu_bo, tbo); + + /* Remember that this BO was accessed by the CPU */ + abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + if (bo->mem.mem_type != TTM_PL_VRAM) return 0; @@ -967,14 +975,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* hurrah the memory is not visible ! */ atomic64_inc(&adev->num_vram_cpu_page_faults); amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); - lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; - for (i = 0; i < abo->placement.num_placement; i++) { - /* Force into visible VRAM */ - if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) && - (!abo->placements[i].lpfn || - abo->placements[i].lpfn > lpfn)) - abo->placements[i].lpfn = lpfn; - } r = ttm_bo_validate(bo, &abo->placement, false, false); if (unlikely(r == -ENOMEM)) { amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e238084b7142..9bbaffbd5f6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -499,6 +499,15 @@ memcpy: } } + if (bo->type == ttm_bo_type_device && + new_mem->mem_type == TTM_PL_VRAM && + old_mem->mem_type != TTM_PL_VRAM) { + /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU + * accesses the BO after it's moved. + */ + abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + } + /* update statistics */ atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); return 0; From 41d9a6a728ea34d697dc74bc5fea1b6f6c5fce80 Mon Sep 17 00:00:00 2001 From: John Brooks Date: Tue, 27 Jun 2017 22:33:21 -0400 Subject: [PATCH 0329/1795] drm/amdgpu: Don't force BOs into visible VRAM for page faults MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no need for page faults to force BOs into visible VRAM if it's full, and the time it takes to do so is great enough to cause noticeable stuttering. Add GTT as a possible placement so that if visible VRAM is full, page faults move BOs to GTT instead of evicting other BOs from VRAM. Suggested-by: Michel Dänzer Signed-off-by: John Brooks Reviewed-by: Michel Dänzer Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 93601fbea695..6e24339ecc46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -974,18 +974,21 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* hurrah the memory is not visible ! */ atomic64_inc(&adev->num_vram_cpu_page_faults); - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); + amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT); + + /* Avoid costly evictions; only set GTT as a busy placement */ + abo->placement.num_busy_placement = 1; + abo->placement.busy_placement = &abo->placements[1]; + r = ttm_bo_validate(bo, &abo->placement, false, false); - if (unlikely(r == -ENOMEM)) { - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); - return ttm_bo_validate(bo, &abo->placement, false, false); - } else if (unlikely(r != 0)) { + if (unlikely(r != 0)) return r; - } offset = bo->mem.start << PAGE_SHIFT; /* this should never happen */ - if ((offset + size) > adev->mc.visible_vram_size) + if (bo->mem.mem_type == TTM_PL_VRAM && + (offset + size) > adev->mc.visible_vram_size) return -EINVAL; return 0; From cb2dd1a6b641b015b30e4272198e6035d7777adb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 4 Jul 2017 17:16:42 +0900 Subject: [PATCH 0330/1795] drm/amdgpu: Try evicting from CPU visible to invisible VRAM first MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This gives BOs which haven't been accessed by the CPU since they were moved to visible VRAM another chance to stay in VRAM when another BO needs to go to visible VRAM. This should allow BOs to stay in VRAM longer in some cases. v2: * Only do this for BOs which don't have the AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED flag set. Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 28 +++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 9bbaffbd5f6a..da8b0e15a30c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -214,7 +214,35 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, adev->mman.buffer_funcs_ring && adev->mman.buffer_funcs_ring->ready == false) { amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); + } else if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { + unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; + struct drm_mm_node *node = bo->mem.mm_node; + unsigned long pages_left; + + for (pages_left = bo->mem.num_pages; + pages_left; + pages_left -= node->size, node++) { + if (node->start < fpfn) + break; + } + + if (!pages_left) + goto gtt; + + /* Try evicting to the CPU inaccessible part of VRAM + * first, but only set GTT as busy placement, so this + * BO will be evicted to GTT rather than causing other + * BOs to be evicted from VRAM + */ + amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT); + abo->placements[0].fpfn = fpfn; + abo->placements[0].lpfn = 0; + abo->placement.busy_placement = &abo->placements[1]; + abo->placement.num_busy_placement = 1; } else { +gtt: amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); } break; From 4048f0f0d63ed372a8d2333f6eec4dd0ca183c6c Mon Sep 17 00:00:00 2001 From: shaoyunl Date: Fri, 4 Dec 2015 15:01:22 -0500 Subject: [PATCH 0331/1795] drm/amdgpu: Enable SDMA_CNTL.ATC_L1_ENABLE for SDMA on CZ For GFX context, the ATC bit in SDMA*_GFX_VIRTUAL_ADDRESS can be cleared to perform in VM mode. For RLC context, to support ATC mode , ATC bit in SDMA*_RLC*_VIRTUAL_ADDRESS should be set. SDMA_CNTL.ATC_L1_ENABLE bit is global setting that enables the L1-L2 translation for ATC address. Signed-off-by: shaoyun liu Reviewed-by: Felix Kuehling Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 1d766ae98dc8..67a29fb3d3b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -556,12 +556,18 @@ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); - if (enable) + if (enable) { f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, 1); - else + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + ATC_L1_ENABLE, 1); + } else { f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, 0); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + ATC_L1_ENABLE, 1); + } + WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); } } From 763dbbfa5f678ceec59b81bc13bc9dc445920e9b Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 15 Jun 2016 16:33:15 -0400 Subject: [PATCH 0332/1795] drm/amdgpu: Enable SDMA context switching for CIK Enable SDMA context switching on CIK (copied from sdma_v3_0.c). Signed-off-by: Felix Kuehling Reviewed-by: Chunming Zhou Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 30 +++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index c216e16826c9..4a9cea03f54f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -341,6 +341,33 @@ static void cik_sdma_rlc_stop(struct amdgpu_device *adev) /* XXX todo */ } +/** + * cik_ctx_switch_enable - stop the async dma engines context switch + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs context switch. + * + * Halt or unhalt the async dma engines context switch (VI). + */ +static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); + if (enable) { + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, 1); + } else { + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, 0); + } + + WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); + } +} + /** * cik_sdma_enable - stop the async dma engines * @@ -537,6 +564,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) /* halt the engine before programing */ cik_sdma_enable(adev, false); + /* enable sdma ring preemption */ + cik_ctx_switch_enable(adev, true); /* start the gfx rings and rlc compute queues */ r = cik_sdma_gfx_resume(adev); @@ -984,6 +1013,7 @@ static int cik_sdma_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cik_ctx_switch_enable(adev, false); cik_sdma_enable(adev, false); return 0; From a667386cb997a136e169de3cf70f007223bb74ee Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 15 Jul 2016 18:37:05 -0400 Subject: [PATCH 0333/1795] drm/amdgpu: Make SDMA phase quantum configurable Set a configurable SDMA phase quantum when enabling SDMA context switching. The default value significantly reduces SDMA latency in page table updates when user-mode SDMA queues have concurrent activity, compared to the initial HW setting. Signed-off-by: Felix Kuehling Reviewed-by: Andres Rodriguez Reviewed-by: Shaoyun Liu Acked-by: Chunming Zhou Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 +++ drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 32 ++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 32 ++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 34 ++++++++++++++++++++++++- 5 files changed, 100 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c290b262d7da..ebd077fdce1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -107,6 +107,7 @@ extern unsigned amdgpu_pcie_gen_cap; extern unsigned amdgpu_pcie_lane_cap; extern unsigned amdgpu_cg_mask; extern unsigned amdgpu_pg_mask; +extern unsigned amdgpu_sdma_phase_quantum; extern char *amdgpu_disable_cu; extern char *amdgpu_virtual_display; extern unsigned amdgpu_pp_feature_mask; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 3c83fe6c5db8..b3d7beb6806c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -108,6 +108,7 @@ unsigned amdgpu_pcie_gen_cap = 0; unsigned amdgpu_pcie_lane_cap = 0; unsigned amdgpu_cg_mask = 0xffffffff; unsigned amdgpu_pg_mask = 0xffffffff; +unsigned amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; unsigned amdgpu_pp_feature_mask = 0xffffffff; @@ -227,6 +228,9 @@ module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444); MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)"); module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); +MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))"); +module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444); + MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 4a9cea03f54f..f508f4d01e4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -351,14 +351,44 @@ static void cik_sdma_rlc_stop(struct amdgpu_device *adev) */ static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { - u32 f32_cntl; + u32 f32_cntl, phase_quantum = 0; int i; + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); if (enable) { f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, 1); + if (amdgpu_sdma_phase_quantum) { + WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i], + phase_quantum); + WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i], + phase_quantum); + } } else { f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 67a29fb3d3b3..b1de44f22824 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -551,9 +551,33 @@ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev) */ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { - u32 f32_cntl; + u32 f32_cntl, phase_quantum = 0; int i; + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); if (enable) { @@ -561,6 +585,12 @@ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) AUTO_CTXSW_ENABLE, 1); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, ATC_L1_ENABLE, 1); + if (amdgpu_sdma_phase_quantum) { + WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i], + phase_quantum); + WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i], + phase_quantum); + } } else { f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4a65697ccc94..591f3e7fb508 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -493,13 +493,45 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev) */ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { - u32 f32_cntl; + u32 f32_cntl, phase_quantum = 0; int i; + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL)); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, enable ? 1 : 0); + if (enable && amdgpu_sdma_phase_quantum) { + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE0_QUANTUM), + phase_quantum); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE1_QUANTUM), + phase_quantum); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE2_QUANTUM), + phase_quantum); + } WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), f32_cntl); } From ba997709a6135355a1f311336c7c4a6c0e37dfe9 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Mon, 9 Nov 2015 17:21:45 -0500 Subject: [PATCH 0334/1795] drm/amdgpu: Correctly establish the suspend/resume hook for amdkfd Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Reviewed-by: Oded Gabbay Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/cik.c | 9 +-------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0e629931210c..078886c6b758 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -55,6 +55,8 @@ #include #include "amdgpu_vf_error.h" +#include "amdgpu_amdkfd.h" + MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -2378,6 +2380,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) } drm_modeset_unlock_all(dev); + amdgpu_amdkfd_suspend(adev); + /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); @@ -2511,6 +2515,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } } } + r = amdgpu_amdkfd_resume(adev); + if (r) + return r; /* blat the mode back in */ if (fbcon) { diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 37a499ab30eb..567c4a5cf90c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1824,21 +1824,14 @@ static int cik_common_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_amdkfd_suspend(adev); - return cik_common_hw_fini(adev); } static int cik_common_resume(void *handle) { - int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = cik_common_hw_init(adev); - if (r) - return r; - - return amdgpu_amdkfd_resume(adev); + return cik_common_hw_init(adev); } static bool cik_common_is_idle(void *handle) From 9f57f7b47d4c9559ae85666eeaf9ffd150096574 Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Wed, 26 Apr 2017 14:51:57 -0500 Subject: [PATCH 0335/1795] drm/amdgpu: Send no-retry XNACK for all fault types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A subset of VM fault types currently send retry XNACK to the client. This causes a storm of interrupts from the VM to the host. Until the storm is throttled by other means send no-retry XNACK for all fault types instead. No change in behavior to the client which will stall indefinitely with the current configuration in any case. Improves system stability under GC or MMHUB faults. Signed-off-by: Jay Cornwall Reviewed-by: Felix Kuehling Reviewed-by: John Bridgman Acked-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 3 +++ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 3ff786cfc947..008ad3dc4afd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -206,6 +206,9 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, adev->vm_manager.block_size - 9); + /* Send no-retry XNACK on fault to suppress VM fault storm. */ + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 0780e830b76b..96f1628541bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -222,6 +222,9 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, adev->vm_manager.block_size - 9); + /* Send no-retry XNACK on fault to suppress VM fault storm. */ + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp); WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); From 79077ee1edceb95d8c0215a9af5e8373232672df Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 10 Jul 2017 10:39:09 -0400 Subject: [PATCH 0336/1795] drm/amdgpu: add get_clock_info for atomfirmware MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The information has moved to different tables, notably smu_info for core refclk and umc_info for mem refclk. Acked-by: Chunming Zhou Acked-by: Christian König Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 93 +++++++++++++++++++ .../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h | 1 + 2 files changed, 94 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index a7d65f033883..f9ffe8ef0cd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -128,3 +128,96 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev) return 0; } + +union firmware_info { + struct atom_firmware_info_v3_1 v31; +}; + +union smu_info { + struct atom_smu_info_v3_1 v31; +}; + +union umc_info { + struct atom_umc_info_v3_1 v31; +}; + +int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct amdgpu_pll *spll = &adev->clock.spll; + struct amdgpu_pll *mpll = &adev->clock.mpll; + uint8_t frev, crev; + uint16_t data_offset; + int ret = -EINVAL, index; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union firmware_info *firmware_info = + (union firmware_info *)(mode_info->atom_context->bios + + data_offset); + + adev->clock.default_sclk = + le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz); + adev->clock.default_mclk = + le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz); + + adev->pm.current_sclk = adev->clock.default_sclk; + adev->pm.current_mclk = adev->clock.default_mclk; + + /* not technically a clock, but... */ + adev->mode_info.firmware_flags = + le32_to_cpu(firmware_info->v31.firmware_capability); + + ret = 0; + } + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + smu_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union smu_info *smu_info = + (union smu_info *)(mode_info->atom_context->bios + + data_offset); + + /* system clock */ + spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz); + + spll->reference_div = 0; + spll->min_post_div = 1; + spll->max_post_div = 1; + spll->min_ref_div = 2; + spll->max_ref_div = 0xff; + spll->min_feedback_div = 4; + spll->max_feedback_div = 0xff; + spll->best_vco = 0; + + ret = 0; + } + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + umc_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union umc_info *umc_info = + (union umc_info *)(mode_info->atom_context->bios + + data_offset); + + /* memory clock */ + mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz); + + mpll->reference_div = 0; + mpll->min_post_div = 1; + mpll->max_post_div = 1; + mpll->min_ref_div = 2; + mpll->max_ref_div = 0xff; + mpll->min_feedback_div = 4; + mpll->max_feedback_div = 0xff; + mpll->best_vco = 0; + + ret = 0; + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index cedafbb9183e..288b97e54347 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -28,5 +28,6 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev) void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); #endif From 88b64e9536746eadc366ac1e23c8c67f14b249f5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 10 Jul 2017 10:43:10 -0400 Subject: [PATCH 0337/1795] drm/amdgpu: call atomfirmware get_clock_info for atomfirmware systems MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than the legacy atombios version. Acked-by: Chunming Zhou Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 078886c6b758..a9eeaad49d18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2189,7 +2189,15 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("GPU post is not needed\n"); } - if (!adev->is_atom_fw) { + if (adev->is_atom_fw) { + /* Initialize clocks */ + r = amdgpu_atomfirmware_get_clock_info(adev); + if (r) { + dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; + } + } else { /* Initialize clocks */ r = amdgpu_atombios_get_clock_info(adev); if (r) { From 1a6ec7ed03e89b859325790221ca77d09fd4a3d0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 11 Jul 2017 18:59:24 -0400 Subject: [PATCH 0338/1795] drm/amdgpu/soc15: drop dead function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Maybe a leftover from bringup? Reviewed-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8dc023296773..f2c3a49f73a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -566,13 +566,6 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) return nbio_v6_1_get_rev_id(adev); } - -int gmc_v9_0_mc_wait_for_idle(struct amdgpu_device *adev) -{ - /* to be implemented in MC IP*/ - return 0; -} - static const struct amdgpu_asic_funcs soc15_asic_funcs = { .read_disabled_bios = &soc15_read_disabled_bios, From 6d949d247379ec92e1a0642fc7a227284488360e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 12 Jul 2017 09:18:07 -0400 Subject: [PATCH 0339/1795] drm/amdgpu: implement si_read_bios_from_rom MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows us to read the vbios image directly from ROM. This is already implemented for other asics, but was not yet available for SI. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/si.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index f45fb0f022b3..812a24dd1204 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1150,6 +1150,33 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev) return r; } +#define mmROM_INDEX 0x2A +#define mmROM_DATA 0x2B + +static bool si_read_bios_from_rom(struct amdgpu_device *adev, + u8 *bios, u32 length_bytes) +{ + u32 *dw_ptr; + u32 i, length_dw; + + if (bios == NULL) + return false; + if (length_bytes == 0) + return false; + /* APU vbios image is part of sbios image */ + if (adev->flags & AMD_IS_APU) + return false; + + dw_ptr = (u32 *)bios; + length_dw = ALIGN(length_bytes, 4) / 4; + /* set rom index to 0 */ + WREG32(mmROM_INDEX, 0); + for (i = 0; i < length_dw; i++) + dw_ptr[i] = RREG32(mmROM_DATA); + + return true; +} + //xxx: not implemented static int si_asic_reset(struct amdgpu_device *adev) { @@ -1206,6 +1233,7 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev) static const struct amdgpu_asic_funcs si_asic_funcs = { .read_disabled_bios = &si_read_disabled_bios, + .read_bios_from_rom = &si_read_bios_from_rom, .read_register = &si_read_register, .reset = &si_asic_reset, .set_vga_state = &si_vga_set_state, From 1d11ee89862ae7c1932e8b742fbb8340598f8874 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 11 Jul 2017 22:53:29 +0300 Subject: [PATCH 0340/1795] drm/amdgpu: Off by one sanity checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is just future proofing code, not something that can be triggered in real life. We're testing to make sure we don't shift wrap when we do "1ull << i" so "i" has to be in the 0-63 range. If it's 64 then we have gone too far. Acked-by: Christian König Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 4bb12ee46315..3fbc7b22446c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4564,7 +4564,7 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) /* This situation may be hit in the future if a new HW * generation exposes more than 64 queues. If so, the * definition of queue_mask needs updating */ - if (WARN_ON(i > (sizeof(queue_mask)*8))) { + if (WARN_ON(i >= (sizeof(queue_mask)*8))) { DRM_ERROR("Invalid KCQ enabled: %d\n", i); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 3a0b69b09ed6..e63925dffd2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2425,7 +2425,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) /* This situation may be hit in the future if a new HW * generation exposes more than 64 queues. If so, the * definition of queue_mask needs updating */ - if (WARN_ON(i > (sizeof(queue_mask)*8))) { + if (WARN_ON(i >= (sizeof(queue_mask)*8))) { DRM_ERROR("Invalid KCQ enabled: %d\n", i); break; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 955aa304ff48..0def783889cd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -671,7 +671,7 @@ static int set_sched_resources(struct device_queue_manager *dqm) /* This situation may be hit in the future if a new HW * generation exposes more than 64 queues. If so, the * definition of res.queue_mask needs updating */ - if (WARN_ON(i > (sizeof(res.queue_mask)*8))) { + if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { pr_err("Invalid queue enabled by amdgpu: %d\n", i); break; } From 5cb818b861be114148e8dbeb4259698148019dd1 Mon Sep 17 00:00:00 2001 From: Jim Qu Date: Wed, 12 Jul 2017 15:52:26 +0800 Subject: [PATCH 0341/1795] drm/amd/amdgpu: fix si_enable_smc_cac() failed issue Signed-off-by: Jim Qu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/si_dpm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index a7ad8390981c..d63873f3f574 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -2055,6 +2055,7 @@ static void si_initialize_powertune_defaults(struct amdgpu_device *adev) case 0x682C: si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_sun_xt; + update_dte_from_pl2 = true; break; case 0x6825: case 0x6827: From fd971375411542f9d81111bb081e58727eb09138 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 11 Jul 2017 11:11:41 -0400 Subject: [PATCH 0342/1795] drm/amdgpu/gfx: keep all compute queues on the same pipe Spreading them causes performance regressions using compute queues on Polaris 11. Cc: Jim Qu Acked-by: Jim Qu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index e26108aad3fe..4f6c68fc1dd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -125,7 +125,8 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) if (mec >= adev->gfx.mec.num_mec) break; - if (adev->gfx.mec.num_mec > 1) { + /* FIXME: spreading the queues across pipes causes perf regressions */ + if (0) { /* policy: amdgpu owns the first two queues of the first MEC */ if (mec == 0 && queue < 2) set_bit(i, adev->gfx.mec.queue_bitmap); From 6f1ceabbd7091b81139e342fc2b08105bc4fa035 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Jul 2017 16:59:21 +0200 Subject: [PATCH 0343/1795] drm/amdgpu: fix VM flush for CPU based updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't have any update fence in that case, so the need for flushing isn't detected automatically. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 30c4322ddce7..84260f7737d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -422,7 +422,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, struct dma_fence *updates = sync->last_vm_update; int r = 0; struct dma_fence *flushed, *tmp; - bool needs_flush = false; + bool needs_flush = vm->use_cpu_for_update; flushed = id->flushed_updates; if ((amdgpu_vm_had_gpu_reset(adev, id)) || @@ -543,11 +543,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, } kfree(fences); - job->vm_needs_flush = false; + job->vm_needs_flush = vm->use_cpu_for_update; /* Check if we can use a VMID already assigned to this VM */ list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { struct dma_fence *flushed; - bool needs_flush = false; + bool needs_flush = vm->use_cpu_for_update; /* Check all the prerequisites to using this VMID */ if (amdgpu_vm_had_gpu_reset(adev, id)) From a33cab7aacb2aa5bfe82ea6beaf51870af84a10e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Jul 2017 17:13:00 +0200 Subject: [PATCH 0344/1795] drm/amdgpu: fix amdgpu_vm_bo_wait MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to wait with the correct owner on unmap operations or otherwise can run into VM faults. Also always wait for the page directory since this is where the reservation object comes from. So rename the function to amdgpu_vm_wait_pd instead as well. Signed-off-by: Christian König Acked-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 84260f7737d3..db9f12e85fb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -996,13 +996,14 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, amdgpu_gart_flush_gpu_tlb(params->adev, 0); } -static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo) +static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, + void *owner) { struct amdgpu_sync sync; int r; amdgpu_sync_create(&sync); - amdgpu_sync_resv(adev, &sync, bo->tbo.resv, AMDGPU_FENCE_OWNER_VM); + amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner); r = amdgpu_sync_wait(&sync, true); amdgpu_sync_free(&sync); @@ -1048,7 +1049,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, r = amdgpu_bo_kmap(parent->bo, (void **)&pd_addr); if (r) return r; - r = amdgpu_vm_bo_wait(adev, parent->bo); + r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); if (unlikely(r)) { amdgpu_bo_kunmap(parent->bo); return r; @@ -1445,6 +1446,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, params.vm = vm; params.src = src; + /* sync to everything on unmapping */ + if (!(flags & AMDGPU_PTE_VALID)) + owner = AMDGPU_FENCE_OWNER_UNDEFINED; + if (vm->use_cpu_for_update) { /* params.src is used as flag to indicate system Memory */ if (pages_addr) @@ -1453,7 +1458,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* Wait for PT BOs to be free. PTs share the same resv. object * as the root PD BO */ - r = amdgpu_vm_bo_wait(adev, vm->root.bo); + r = amdgpu_vm_wait_pd(adev, vm, owner); if (unlikely(r)) return r; @@ -1465,10 +1470,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - /* sync to everything on unmapping */ - if (!(flags & AMDGPU_PTE_VALID)) - owner = AMDGPU_FENCE_OWNER_UNDEFINED; - nptes = last - start + 1; /* From 6927798572a930a4a56f66eb36916d861ff56ad6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 13 Jul 2017 15:37:11 -0400 Subject: [PATCH 0345/1795] drm/amdgpu: remove VM shadow WARN_ONs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Printing a warning into the logs that we will certainly run into a BUG() is completely nonsense, the BUG() is more than noisy enough. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index db9f12e85fb4..74cbe10a0115 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1044,8 +1044,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, params.adev = adev; shadow = parent->bo->shadow; - WARN_ON(vm->use_cpu_for_update && shadow); - if (vm->use_cpu_for_update && !shadow) { + if (vm->use_cpu_for_update) { r = amdgpu_bo_kmap(parent->bo, (void **)&pd_addr); if (r) return r; @@ -1310,9 +1309,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, r = amdgpu_bo_kmap(pt, (void *)&pe_start); if (r) return r; - - WARN_ONCE(pt->shadow, - "CPU VM update doesn't support shadow pages"); } else { if (pt->shadow) { pe_start = amdgpu_bo_gpu_offset(pt->shadow); From 03918b36f6602df298b5ce7925ef77f7ecf0756a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Jul 2017 17:15:37 +0200 Subject: [PATCH 0346/1795] drm/amdgpu: trace setting VM page tables with the CPU as well MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Handy for debugging. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 74cbe10a0115..5638c16887d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -982,6 +982,8 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, unsigned int i; uint64_t value; + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); + for (i = 0; i < count; i++) { value = params->pages_addr ? amdgpu_vm_map_gart(params->pages_addr, addr) : From 68c62306b378451ddb1a14c08022d18df3848b4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Jul 2017 17:23:29 +0200 Subject: [PATCH 0347/1795] drm/amdgpu: flush the HDP only once for CPU based VM updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to do this after every single update. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5638c16887d8..24879cf3da9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -992,10 +992,6 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, i, value, flags); addr += incr; } - - /* Flush HDP */ - mb(); - amdgpu_gart_flush_gpu_tlb(params->adev, 0); } static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -1238,6 +1234,12 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, if (r) amdgpu_vm_invalidate_level(&vm->root); + if (vm->use_cpu_for_update) { + /* Flush HDP */ + mb(); + amdgpu_gart_flush_gpu_tlb(adev, 0); + } + return r; } @@ -1745,6 +1747,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, list_add(&bo_va->vm_status, &vm->cleared); spin_unlock(&vm->status_lock); + if (vm->use_cpu_for_update) { + /* Flush HDP */ + mb(); + amdgpu_gart_flush_gpu_tlb(adev, 0); + } + return 0; } From 6375bbb4d259416ad867ab887333ee88d1b90323 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Jul 2017 17:25:49 +0200 Subject: [PATCH 0348/1795] drm/amdgpu: make sure BOs are always kunmapped MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a BO is moved or destroyed it shouldn't be kmapped any more. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6e24339ecc46..a019556a8e71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -93,6 +93,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) bo = container_of(tbo, struct amdgpu_bo, tbo); + amdgpu_bo_kunmap(bo); amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL); drm_gem_object_release(&bo->gem_base); @@ -931,6 +932,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, abo = container_of(bo, struct amdgpu_bo, tbo); amdgpu_vm_bo_invalidate(adev, abo); + amdgpu_bo_kunmap(abo); + /* remember the eviction */ if (evict) atomic64_inc(&adev->num_evictions); From 0a096fb66a60def858d1d893dad92b3dd0564b79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 12 Jul 2017 10:01:48 +0200 Subject: [PATCH 0349/1795] drm/amdgpu: map VM BOs for CPU based updates only once MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to try to map them every time. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 85 +++++++++++++++----------- 1 file changed, 50 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 24879cf3da9b..b017b54e45ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -159,11 +159,17 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, */ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, int (*validate)(void *, struct amdgpu_bo *), - void *param) + void *param, bool use_cpu_for_update) { unsigned i; int r; + if (use_cpu_for_update) { + r = amdgpu_bo_kmap(parent->bo, NULL); + if (r) + return r; + } + if (!parent->entries) return 0; @@ -181,7 +187,8 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, * Recurse into the sub directory. This is harmless because we * have only a maximum of 5 layers. */ - r = amdgpu_vm_validate_level(entry, validate, param); + r = amdgpu_vm_validate_level(entry, validate, param, + use_cpu_for_update); if (r) return r; } @@ -212,7 +219,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (num_evictions == vm->last_eviction_counter) return 0; - return amdgpu_vm_validate_level(&vm->root, validate, param); + return amdgpu_vm_validate_level(&vm->root, validate, param, + vm->use_cpu_for_update); } /** @@ -329,6 +337,14 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, if (r) return r; + if (vm->use_cpu_for_update) { + r = amdgpu_bo_kmap(pt, NULL); + if (r) { + amdgpu_bo_unref(&pt); + return r; + } + } + /* Keep a reference to the root directory to avoid * freeing them up in the wrong order. */ @@ -1043,14 +1059,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, shadow = parent->bo->shadow; if (vm->use_cpu_for_update) { - r = amdgpu_bo_kmap(parent->bo, (void **)&pd_addr); - if (r) - return r; + pd_addr = (unsigned long)parent->bo->kptr; r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); - if (unlikely(r)) { - amdgpu_bo_kunmap(parent->bo); + if (unlikely(r)) return r; - } + params.func = amdgpu_vm_cpu_set_ptes; } else { if (shadow) { @@ -1145,28 +1158,29 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, count, incr, AMDGPU_PTE_VALID); } - if (params.func == amdgpu_vm_cpu_set_ptes) - amdgpu_bo_kunmap(parent->bo); - else if (params.ib->length_dw == 0) { - amdgpu_job_free(job); - } else { - amdgpu_ring_pad_ib(ring, params.ib); - amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv, - AMDGPU_FENCE_OWNER_VM); - if (shadow) - amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv, + if (!vm->use_cpu_for_update) { + if (params.ib->length_dw == 0) { + amdgpu_job_free(job); + } else { + amdgpu_ring_pad_ib(ring, params.ib); + amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv, AMDGPU_FENCE_OWNER_VM); + if (shadow) + amdgpu_sync_resv(adev, &job->sync, + shadow->tbo.resv, + AMDGPU_FENCE_OWNER_VM); - WARN_ON(params.ib->length_dw > ndw); - r = amdgpu_job_submit(job, ring, &vm->entity, - AMDGPU_FENCE_OWNER_VM, &fence); - if (r) - goto error_free; + WARN_ON(params.ib->length_dw > ndw); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); + if (r) + goto error_free; - amdgpu_bo_fence(parent->bo, fence, true); - dma_fence_put(vm->last_dir_update); - vm->last_dir_update = dma_fence_get(fence); - dma_fence_put(fence); + amdgpu_bo_fence(parent->bo, fence, true); + dma_fence_put(vm->last_dir_update); + vm->last_dir_update = dma_fence_get(fence); + dma_fence_put(fence); + } } /* * Recurse into the subdirectories. This recursion is harmless because @@ -1292,7 +1306,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, uint64_t addr, pe_start; struct amdgpu_bo *pt; unsigned nptes; - int r; bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes); @@ -1310,9 +1323,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); if (use_cpu_update) { - r = amdgpu_bo_kmap(pt, (void *)&pe_start); - if (r) - return r; + pe_start = (unsigned long)pt->kptr; } else { if (pt->shadow) { pe_start = amdgpu_bo_gpu_offset(pt->shadow); @@ -1328,9 +1339,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, AMDGPU_GPU_PAGE_SIZE, flags); dst += nptes * AMDGPU_GPU_PAGE_SIZE; - - if (use_cpu_update) - amdgpu_bo_kunmap(pt); } return 0; @@ -2458,6 +2466,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, goto error_free_root; vm->last_eviction_counter = atomic64_read(&adev->num_evictions); + + if (vm->use_cpu_for_update) { + r = amdgpu_bo_kmap(vm->root.bo, NULL); + if (r) + goto error_free_root; + } + amdgpu_bo_unreserve(vm->root.bo); return 0; From 9d63c03444ac92d6412fd72429478b81a8378de7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 13 Jul 2017 12:21:00 +0200 Subject: [PATCH 0350/1795] drm/amdgpu: fix amdgpu_bo_gpu_accessible() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The test was relaxed a bit to much. Signed-off-by: Christian König Acked-by: Tom St Denis Reviewed-and-Tested-by: Roger He Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 382485115b06..833b172a2c2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -120,7 +120,11 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) */ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) { - return bo->tbo.mem.mem_type != TTM_PL_SYSTEM; + switch (bo->tbo.mem.mem_type) { + case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm); + case TTM_PL_VRAM: return true; + default: return false; + } } int amdgpu_bo_create(struct amdgpu_device *adev, From 53a4b90d26044541e4051c5a83397b6eb9e1f6d4 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 12 Jul 2017 19:28:03 +0800 Subject: [PATCH 0351/1795] drm/amd/powerplay: add profile mode for vega10. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 130 ++++++++++++++---- .../drm/amd/powerplay/hwmgr/vega10_hwmgr.h | 4 + 2 files changed, 109 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 8d567620576a..aca4a7cb1d9c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -78,6 +78,8 @@ uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L +static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask); const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic); @@ -4224,34 +4226,30 @@ static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr) return 0; } -static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, - enum amd_dpm_forced_level level) +static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, + uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) { - int ret = 0; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = vega10_force_dpm_highest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = vega10_force_dpm_lowest(hwmgr); - if (ret) - return ret; - break; - case AMD_DPM_FORCED_LEVEL_AUTO: - ret = vega10_unforce_dpm_levels(hwmgr); - if (ret) - return ret; - break; - default: - break; + if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL && + table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL && + table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) { + *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL; + *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL; + *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL; } - hwmgr->dpm_level = level; - - return ret; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + *sclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { + *mclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; + *soc_mask = table_info->vdd_dep_on_socclk->count - 1; + *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; + } + return 0; } static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) @@ -4278,6 +4276,86 @@ static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) return result; } +static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + uint32_t sclk_mask = 0; + uint32_t mclk_mask = 0; + uint32_t soc_mask = 0; + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + + if (level == hwmgr->dpm_level) + return ret; + + if (!(hwmgr->dpm_level & profile_mode_mask)) { + /* enter profile mode, save current level, disable gfx cg*/ + if (level & profile_mode_mask) { + hwmgr->saved_dpm_level = hwmgr->dpm_level; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); + } + } else { + /* exit profile mode, restore level, enable gfx cg*/ + if (!(level & profile_mode_mask)) { + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) + level = hwmgr->saved_dpm_level; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_GATE); + } + } + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = vega10_force_dpm_highest(hwmgr); + if (ret) + return ret; + hwmgr->dpm_level = level; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = vega10_force_dpm_lowest(hwmgr); + if (ret) + return ret; + hwmgr->dpm_level = level; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = vega10_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + hwmgr->dpm_level = level; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); + if (ret) + return ret; + hwmgr->dpm_level = level; + vega10_force_clock_level(hwmgr, PP_SCLK, 1<dpm_level = level; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: + default: + break; + } + + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); + else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); + + return 0; +} + static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); @@ -4523,7 +4601,9 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); int i; - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | + AMD_DPM_FORCED_LEVEL_LOW | + AMD_DPM_FORCED_LEVEL_HIGH)) return -EINVAL; switch (type) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h index 5c97a8b6c46a..676cd7735883 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -434,6 +434,10 @@ struct vega10_hwmgr { #define PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ #define PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define VEGA10_UMD_PSTATE_GFXCLK_LEVEL 0x3 +#define VEGA10_UMD_PSTATE_SOCCLK_LEVEL 0x3 +#define VEGA10_UMD_PSTATE_MCLK_LEVEL 0x2 + extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr); From d432d6922b1e68f051cf80c66111787fac5ca4a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Noralf=20Tr=C3=B8nnes?= Date: Thu, 8 Jun 2017 17:14:32 +0200 Subject: [PATCH 0352/1795] of: Add vendor prefix for Pervasive Displays MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pervasive Displays Inc. designs, develops, and manufactures low-power electrophoretic (e-ink) display modules and supporting electronics for commercial and industrial display applications. Acked-by: Rob Herring Signed-off-by: Noralf Trønnes Link: http://patchwork.freedesktop.org/patch/msgid/1496934875-51984-2-git-send-email-noralf@tronnes.org --- Documentation/devicetree/bindings/vendor-prefixes.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index f08284e7439b..834554fc15c0 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -241,6 +241,7 @@ oxsemi Oxford Semiconductor, Ltd. panasonic Panasonic Corporation parade Parade Technologies Inc. pericom Pericom Technology Inc. +pervasive Pervasive Displays, Inc. phytec PHYTEC Messtechnik GmbH picochip Picochip Ltd pine64 Pine64 From 7f0dc77772821e7889f945105689bf2b67587bfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Noralf=20Tr=C3=B8nnes?= Date: Thu, 8 Jun 2017 17:14:33 +0200 Subject: [PATCH 0353/1795] dt-bindings: Add Pervasive Displays RePaper bindings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add device-tree binding documentation for the 1.44", 1.9", 2.0" and 2.7" display panels. Acked-by: Rob Herring Signed-off-by: Noralf Trønnes Link: http://patchwork.freedesktop.org/patch/msgid/1496934875-51984-3-git-send-email-noralf@tronnes.org --- .../devicetree/bindings/display/repaper.txt | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 Documentation/devicetree/bindings/display/repaper.txt diff --git a/Documentation/devicetree/bindings/display/repaper.txt b/Documentation/devicetree/bindings/display/repaper.txt new file mode 100644 index 000000000000..f5f9f9cf6a25 --- /dev/null +++ b/Documentation/devicetree/bindings/display/repaper.txt @@ -0,0 +1,52 @@ +Pervasive Displays RePaper branded e-ink displays + +Required properties: +- compatible: "pervasive,e1144cs021" for 1.44" display + "pervasive,e1190cs021" for 1.9" display + "pervasive,e2200cs021" for 2.0" display + "pervasive,e2271cs021" for 2.7" display + +- panel-on-gpios: Timing controller power control +- discharge-gpios: Discharge control +- reset-gpios: RESET pin +- busy-gpios: BUSY pin + +Required property for e2271cs021: +- border-gpios: Border control + +The node for this driver must be a child node of a SPI controller, hence +all mandatory properties described in ../spi/spi-bus.txt must be specified. + +Optional property: +- pervasive,thermal-zone: name of thermometer's thermal zone + +Example: + + display_temp: lm75@48 { + compatible = "lm75b"; + reg = <0x48>; + #thermal-sensor-cells = <0>; + }; + + thermal-zones { + display { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&display_temp>; + }; + }; + + papirus27@0{ + compatible = "pervasive,e2271cs021"; + reg = <0>; + + spi-max-frequency = <8000000>; + + panel-on-gpios = <&gpio 23 0>; + border-gpios = <&gpio 14 0>; + discharge-gpios = <&gpio 15 0>; + reset-gpios = <&gpio 24 0>; + busy-gpios = <&gpio 25 0>; + + pervasive,thermal-zone = "display"; + }; From 379ea9a1a59a5a32c8db6f164e80a3fd00cb3781 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Noralf=20Tr=C3=B8nnes?= Date: Thu, 8 Jun 2017 17:14:34 +0200 Subject: [PATCH 0354/1795] drm/tinydrm: Add tinydrm_xrgb8888_to_gray8() helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drm has no monochrome or greyscale support so add a conversion from the common format XR24. Also reorder includes into the common order. Acked-by: Daniel Vetter Signed-off-by: Noralf Trønnes Link: http://patchwork.freedesktop.org/patch/msgid/1496934875-51984-4-git-send-email-noralf@tronnes.org --- .../gpu/drm/tinydrm/core/tinydrm-helpers.c | 74 ++++++++++++++++++- include/drm/tinydrm/tinydrm-helpers.h | 1 + 2 files changed, 73 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c index d4cda3308ac7..75808bb84c9a 100644 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c @@ -7,13 +7,15 @@ * (at your option) any later version. */ -#include -#include #include +#include #include #include #include +#include +#include + static unsigned int spi_max; module_param(spi_max, uint, 0400); MODULE_PARM_DESC(spi_max, "Set a lower SPI max transfer size"); @@ -180,6 +182,74 @@ void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr, } EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565); +/** + * tinydrm_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale + * @dst: 8-bit grayscale destination buffer + * @fb: DRM framebuffer + * + * Drm doesn't have native monochrome or grayscale support. + * Such drivers can announce the commonly supported XR24 format to userspace + * and use this function to convert to the native format. + * + * Monochrome drivers will use the most significant bit, + * where 1 means foreground color and 0 background color. + * + * ITU BT.601 is used for the RGB -> luma (brightness) conversion. + * + * Returns: + * Zero on success, negative error code on failure. + */ +int tinydrm_xrgb8888_to_gray8(u8 *dst, struct drm_framebuffer *fb) +{ + struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; + unsigned int x, y, pitch = fb->pitches[0]; + int ret = 0; + void *buf; + u32 *src; + + if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888)) + return -EINVAL; + /* + * The cma memory is write-combined so reads are uncached. + * Speed up by fetching one line at a time. + */ + buf = kmalloc(pitch, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (import_attach) { + ret = dma_buf_begin_cpu_access(import_attach->dmabuf, + DMA_FROM_DEVICE); + if (ret) + goto err_free; + } + + for (y = 0; y < fb->height; y++) { + src = cma_obj->vaddr + (y * pitch); + memcpy(buf, src, pitch); + src = buf; + for (x = 0; x < fb->width; x++) { + u8 r = (*src & 0x00ff0000) >> 16; + u8 g = (*src & 0x0000ff00) >> 8; + u8 b = *src & 0x000000ff; + + /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */ + *dst++ = (3 * r + 6 * g + b) / 10; + src++; + } + } + + if (import_attach) + ret = dma_buf_end_cpu_access(import_attach->dmabuf, + DMA_FROM_DEVICE); +err_free: + kfree(buf); + + return ret; +} +EXPORT_SYMBOL(tinydrm_xrgb8888_to_gray8); + /** * tinydrm_of_find_backlight - Find backlight device in device-tree * @dev: Device diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h index 9b9b6cfe3ba5..a6c387f91eff 100644 --- a/include/drm/tinydrm/tinydrm-helpers.h +++ b/include/drm/tinydrm/tinydrm-helpers.h @@ -43,6 +43,7 @@ void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr, struct drm_framebuffer *fb, struct drm_clip_rect *clip, bool swap); +int tinydrm_xrgb8888_to_gray8(u8 *dst, struct drm_framebuffer *fb); struct backlight_device *tinydrm_of_find_backlight(struct device *dev); int tinydrm_enable_backlight(struct backlight_device *backlight); From 3589211e9b0316884f55acf3aeb0a979db79db9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Noralf=20Tr=C3=B8nnes?= Date: Thu, 8 Jun 2017 17:14:35 +0200 Subject: [PATCH 0355/1795] drm/tinydrm: Add RePaper e-ink driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for the Pervasive Displays RePaper branded displays. The controller code is taken from the userspace driver available through repaper.org. Only the V231 film is supported since the others are EOL. Acked-by: Daniel Vetter Signed-off-by: Noralf Trønnes Link: http://patchwork.freedesktop.org/patch/msgid/1496934875-51984-5-git-send-email-noralf@tronnes.org --- MAINTAINERS | 6 + drivers/gpu/drm/tinydrm/Kconfig | 12 + drivers/gpu/drm/tinydrm/Makefile | 1 + drivers/gpu/drm/tinydrm/repaper.c | 1095 +++++++++++++++++++++++++++++ 4 files changed, 1114 insertions(+) create mode 100644 drivers/gpu/drm/tinydrm/repaper.c diff --git a/MAINTAINERS b/MAINTAINERS index 068fd67bb637..0e1efa0bd46a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4453,6 +4453,12 @@ M: Dave Airlie S: Odd Fixes F: drivers/gpu/drm/mgag200/ +DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS +M: Noralf Trønnes +S: Maintained +F: drivers/gpu/drm/tinydrm/repaper.c +F: Documentation/devicetree/bindings/display/repaper.txt + DRM DRIVER FOR RAGE 128 VIDEO CARDS S: Orphan / Obsolete F: drivers/gpu/drm/r128/ diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig index 3504c53846da..9596e447f877 100644 --- a/drivers/gpu/drm/tinydrm/Kconfig +++ b/drivers/gpu/drm/tinydrm/Kconfig @@ -19,3 +19,15 @@ config TINYDRM_MI0283QT help DRM driver for the Multi-Inno MI0283QT display panel If M is selected the module will be called mi0283qt. + +config TINYDRM_REPAPER + tristate "DRM support for Pervasive Displays RePaper panels (V231)" + depends on DRM_TINYDRM && SPI + help + DRM driver for the following Pervasive Displays panels: + 1.44" TFT EPD Panel (E1144CS021) + 1.90" TFT EPD Panel (E1190CS021) + 2.00" TFT EPD Panel (E2200CS021) + 2.71" TFT EPD Panel (E2271CS021) + + If M is selected the module will be called repaper. diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile index 7a3604cf4fc2..95bb4d4fa785 100644 --- a/drivers/gpu/drm/tinydrm/Makefile +++ b/drivers/gpu/drm/tinydrm/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o # Displays obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o +obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c new file mode 100644 index 000000000000..3343d3f15a90 --- /dev/null +++ b/drivers/gpu/drm/tinydrm/repaper.c @@ -0,0 +1,1095 @@ +/* + * DRM driver for Pervasive Displays RePaper branded e-ink panels + * + * Copyright 2013-2017 Pervasive Displays, Inc. + * Copyright 2017 Noralf Trønnes + * + * The driver supports: + * Material Film: Aurora Mb (V231) + * Driver IC: G2 (eTC) + * + * The controller code was taken from the userspace driver: + * https://github.com/repaper/gratis + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define REPAPER_RID_G2_COG_ID 0x12 + +enum repaper_model { + E1144CS021 = 1, + E1190CS021, + E2200CS021, + E2271CS021, +}; + +enum repaper_stage { /* Image pixel -> Display pixel */ + REPAPER_COMPENSATE, /* B -> W, W -> B (Current Image) */ + REPAPER_WHITE, /* B -> N, W -> W (Current Image) */ + REPAPER_INVERSE, /* B -> N, W -> B (New Image) */ + REPAPER_NORMAL /* B -> B, W -> W (New Image) */ +}; + +enum repaper_epd_border_byte { + REPAPER_BORDER_BYTE_NONE, + REPAPER_BORDER_BYTE_ZERO, + REPAPER_BORDER_BYTE_SET, +}; + +struct repaper_epd { + struct tinydrm_device tinydrm; + struct spi_device *spi; + + struct gpio_desc *panel_on; + struct gpio_desc *border; + struct gpio_desc *discharge; + struct gpio_desc *reset; + struct gpio_desc *busy; + + struct thermal_zone_device *thermal; + + unsigned int height; + unsigned int width; + unsigned int bytes_per_scan; + const u8 *channel_select; + unsigned int stage_time; + unsigned int factored_stage_time; + bool middle_scan; + bool pre_border_byte; + enum repaper_epd_border_byte border_byte; + + u8 *line_buffer; + void *current_frame; + + bool enabled; + bool cleared; + bool partial; +}; + +static inline struct repaper_epd * +epd_from_tinydrm(struct tinydrm_device *tdev) +{ + return container_of(tdev, struct repaper_epd, tinydrm); +} + +static int repaper_spi_transfer(struct spi_device *spi, u8 header, + const void *tx, void *rx, size_t len) +{ + void *txbuf = NULL, *rxbuf = NULL; + struct spi_transfer tr[2] = {}; + u8 *headerbuf; + int ret; + + headerbuf = kmalloc(1, GFP_KERNEL); + if (!headerbuf) + return -ENOMEM; + + headerbuf[0] = header; + tr[0].tx_buf = headerbuf; + tr[0].len = 1; + + /* Stack allocated tx? */ + if (tx && len <= 32) { + txbuf = kmalloc(len, GFP_KERNEL); + if (!txbuf) { + ret = -ENOMEM; + goto out_free; + } + memcpy(txbuf, tx, len); + } + + if (rx) { + rxbuf = kmalloc(len, GFP_KERNEL); + if (!rxbuf) { + ret = -ENOMEM; + goto out_free; + } + } + + tr[1].tx_buf = txbuf ? txbuf : tx; + tr[1].rx_buf = rxbuf; + tr[1].len = len; + + ndelay(80); + ret = spi_sync_transfer(spi, tr, 2); + if (rx && !ret) + memcpy(rx, rxbuf, len); + +out_free: + kfree(headerbuf); + kfree(txbuf); + kfree(rxbuf); + + return ret; +} + +static int repaper_write_buf(struct spi_device *spi, u8 reg, + const u8 *buf, size_t len) +{ + int ret; + + ret = repaper_spi_transfer(spi, 0x70, ®, NULL, 1); + if (ret) + return ret; + + return repaper_spi_transfer(spi, 0x72, buf, NULL, len); +} + +static int repaper_write_val(struct spi_device *spi, u8 reg, u8 val) +{ + return repaper_write_buf(spi, reg, &val, 1); +} + +static int repaper_read_val(struct spi_device *spi, u8 reg) +{ + int ret; + u8 val; + + ret = repaper_spi_transfer(spi, 0x70, ®, NULL, 1); + if (ret) + return ret; + + ret = repaper_spi_transfer(spi, 0x73, NULL, &val, 1); + + return ret ? ret : val; +} + +static int repaper_read_id(struct spi_device *spi) +{ + int ret; + u8 id; + + ret = repaper_spi_transfer(spi, 0x71, NULL, &id, 1); + + return ret ? ret : id; +} + +static void repaper_spi_mosi_low(struct spi_device *spi) +{ + const u8 buf[1] = { 0 }; + + spi_write(spi, buf, 1); +} + +/* pixels on display are numbered from 1 so even is actually bits 1,3,5,... */ +static void repaper_even_pixels(struct repaper_epd *epd, u8 **pp, + const u8 *data, u8 fixed_value, const u8 *mask, + enum repaper_stage stage) +{ + unsigned int b; + + for (b = 0; b < (epd->width / 8); b++) { + if (data) { + u8 pixels = data[b] & 0xaa; + u8 pixel_mask = 0xff; + u8 p1, p2, p3, p4; + + if (mask) { + pixel_mask = (mask[b] ^ pixels) & 0xaa; + pixel_mask |= pixel_mask >> 1; + } + + switch (stage) { + case REPAPER_COMPENSATE: /* B -> W, W -> B (Current) */ + pixels = 0xaa | ((pixels ^ 0xaa) >> 1); + break; + case REPAPER_WHITE: /* B -> N, W -> W (Current) */ + pixels = 0x55 + ((pixels ^ 0xaa) >> 1); + break; + case REPAPER_INVERSE: /* B -> N, W -> B (New) */ + pixels = 0x55 | (pixels ^ 0xaa); + break; + case REPAPER_NORMAL: /* B -> B, W -> W (New) */ + pixels = 0xaa | (pixels >> 1); + break; + } + + pixels = (pixels & pixel_mask) | (~pixel_mask & 0x55); + p1 = (pixels >> 6) & 0x03; + p2 = (pixels >> 4) & 0x03; + p3 = (pixels >> 2) & 0x03; + p4 = (pixels >> 0) & 0x03; + pixels = (p1 << 0) | (p2 << 2) | (p3 << 4) | (p4 << 6); + *(*pp)++ = pixels; + } else { + *(*pp)++ = fixed_value; + } + } +} + +/* pixels on display are numbered from 1 so odd is actually bits 0,2,4,... */ +static void repaper_odd_pixels(struct repaper_epd *epd, u8 **pp, + const u8 *data, u8 fixed_value, const u8 *mask, + enum repaper_stage stage) +{ + unsigned int b; + + for (b = epd->width / 8; b > 0; b--) { + if (data) { + u8 pixels = data[b - 1] & 0x55; + u8 pixel_mask = 0xff; + + if (mask) { + pixel_mask = (mask[b - 1] ^ pixels) & 0x55; + pixel_mask |= pixel_mask << 1; + } + + switch (stage) { + case REPAPER_COMPENSATE: /* B -> W, W -> B (Current) */ + pixels = 0xaa | (pixels ^ 0x55); + break; + case REPAPER_WHITE: /* B -> N, W -> W (Current) */ + pixels = 0x55 + (pixels ^ 0x55); + break; + case REPAPER_INVERSE: /* B -> N, W -> B (New) */ + pixels = 0x55 | ((pixels ^ 0x55) << 1); + break; + case REPAPER_NORMAL: /* B -> B, W -> W (New) */ + pixels = 0xaa | pixels; + break; + } + + pixels = (pixels & pixel_mask) | (~pixel_mask & 0x55); + *(*pp)++ = pixels; + } else { + *(*pp)++ = fixed_value; + } + } +} + +/* interleave bits: (byte)76543210 -> (16 bit).7.6.5.4.3.2.1 */ +static inline u16 repaper_interleave_bits(u16 value) +{ + value = (value | (value << 4)) & 0x0f0f; + value = (value | (value << 2)) & 0x3333; + value = (value | (value << 1)) & 0x5555; + + return value; +} + +/* pixels on display are numbered from 1 */ +static void repaper_all_pixels(struct repaper_epd *epd, u8 **pp, + const u8 *data, u8 fixed_value, const u8 *mask, + enum repaper_stage stage) +{ + unsigned int b; + + for (b = epd->width / 8; b > 0; b--) { + if (data) { + u16 pixels = repaper_interleave_bits(data[b - 1]); + u16 pixel_mask = 0xffff; + + if (mask) { + pixel_mask = repaper_interleave_bits(mask[b - 1]); + + pixel_mask = (pixel_mask ^ pixels) & 0x5555; + pixel_mask |= pixel_mask << 1; + } + + switch (stage) { + case REPAPER_COMPENSATE: /* B -> W, W -> B (Current) */ + pixels = 0xaaaa | (pixels ^ 0x5555); + break; + case REPAPER_WHITE: /* B -> N, W -> W (Current) */ + pixels = 0x5555 + (pixels ^ 0x5555); + break; + case REPAPER_INVERSE: /* B -> N, W -> B (New) */ + pixels = 0x5555 | ((pixels ^ 0x5555) << 1); + break; + case REPAPER_NORMAL: /* B -> B, W -> W (New) */ + pixels = 0xaaaa | pixels; + break; + } + + pixels = (pixels & pixel_mask) | (~pixel_mask & 0x5555); + *(*pp)++ = pixels >> 8; + *(*pp)++ = pixels; + } else { + *(*pp)++ = fixed_value; + *(*pp)++ = fixed_value; + } + } +} + +/* output one line of scan and data bytes to the display */ +static void repaper_one_line(struct repaper_epd *epd, unsigned int line, + const u8 *data, u8 fixed_value, const u8 *mask, + enum repaper_stage stage) +{ + u8 *p = epd->line_buffer; + unsigned int b; + + repaper_spi_mosi_low(epd->spi); + + if (epd->pre_border_byte) + *p++ = 0x00; + + if (epd->middle_scan) { + /* data bytes */ + repaper_odd_pixels(epd, &p, data, fixed_value, mask, stage); + + /* scan line */ + for (b = epd->bytes_per_scan; b > 0; b--) { + if (line / 4 == b - 1) + *p++ = 0x03 << (2 * (line & 0x03)); + else + *p++ = 0x00; + } + + /* data bytes */ + repaper_even_pixels(epd, &p, data, fixed_value, mask, stage); + } else { + /* + * even scan line, but as lines on display are numbered from 1, + * line: 1,3,5,... + */ + for (b = 0; b < epd->bytes_per_scan; b++) { + if (0 != (line & 0x01) && line / 8 == b) + *p++ = 0xc0 >> (line & 0x06); + else + *p++ = 0x00; + } + + /* data bytes */ + repaper_all_pixels(epd, &p, data, fixed_value, mask, stage); + + /* + * odd scan line, but as lines on display are numbered from 1, + * line: 0,2,4,6,... + */ + for (b = epd->bytes_per_scan; b > 0; b--) { + if (0 == (line & 0x01) && line / 8 == b - 1) + *p++ = 0x03 << (line & 0x06); + else + *p++ = 0x00; + } + } + + switch (epd->border_byte) { + case REPAPER_BORDER_BYTE_NONE: + break; + + case REPAPER_BORDER_BYTE_ZERO: + *p++ = 0x00; + break; + + case REPAPER_BORDER_BYTE_SET: + switch (stage) { + case REPAPER_COMPENSATE: + case REPAPER_WHITE: + case REPAPER_INVERSE: + *p++ = 0x00; + break; + case REPAPER_NORMAL: + *p++ = 0xaa; + break; + } + break; + } + + repaper_write_buf(epd->spi, 0x0a, epd->line_buffer, + p - epd->line_buffer); + + /* Output data to panel */ + repaper_write_val(epd->spi, 0x02, 0x07); + + repaper_spi_mosi_low(epd->spi); +} + +static void repaper_frame_fixed(struct repaper_epd *epd, u8 fixed_value, + enum repaper_stage stage) +{ + unsigned int line; + + for (line = 0; line < epd->height; line++) + repaper_one_line(epd, line, NULL, fixed_value, NULL, stage); +} + +static void repaper_frame_data(struct repaper_epd *epd, const u8 *image, + const u8 *mask, enum repaper_stage stage) +{ + unsigned int line; + + if (!mask) { + for (line = 0; line < epd->height; line++) { + repaper_one_line(epd, line, + &image[line * (epd->width / 8)], + 0, NULL, stage); + } + } else { + for (line = 0; line < epd->height; line++) { + size_t n = line * epd->width / 8; + + repaper_one_line(epd, line, &image[n], 0, &mask[n], + stage); + } + } +} + +static void repaper_frame_fixed_repeat(struct repaper_epd *epd, u8 fixed_value, + enum repaper_stage stage) +{ + u64 start = local_clock(); + u64 end = start + (epd->factored_stage_time * 1000 * 1000); + + do { + repaper_frame_fixed(epd, fixed_value, stage); + } while (local_clock() < end); +} + +static void repaper_frame_data_repeat(struct repaper_epd *epd, const u8 *image, + const u8 *mask, enum repaper_stage stage) +{ + u64 start = local_clock(); + u64 end = start + (epd->factored_stage_time * 1000 * 1000); + + do { + repaper_frame_data(epd, image, mask, stage); + } while (local_clock() < end); +} + +static void repaper_get_temperature(struct repaper_epd *epd) +{ + int ret, temperature = 0; + unsigned int factor10x; + + if (!epd->thermal) + return; + + ret = thermal_zone_get_temp(epd->thermal, &temperature); + if (ret) { + dev_err(&epd->spi->dev, "Failed to get temperature (%d)\n", + ret); + return; + } + + temperature /= 1000; + + if (temperature <= -10) + factor10x = 170; + else if (temperature <= -5) + factor10x = 120; + else if (temperature <= 5) + factor10x = 80; + else if (temperature <= 10) + factor10x = 40; + else if (temperature <= 15) + factor10x = 30; + else if (temperature <= 20) + factor10x = 20; + else if (temperature <= 40) + factor10x = 10; + else + factor10x = 7; + + epd->factored_stage_time = epd->stage_time * factor10x / 10; +} + +static void repaper_gray8_to_mono_reversed(u8 *buf, u32 width, u32 height) +{ + u8 *gray8 = buf, *mono = buf; + int y, xb, i; + + for (y = 0; y < height; y++) + for (xb = 0; xb < width / 8; xb++) { + u8 byte = 0x00; + + for (i = 0; i < 8; i++) { + int x = xb * 8 + i; + + byte >>= 1; + if (gray8[y * width + x] >> 7) + byte |= BIT(7); + } + *mono++ = byte; + } +} + +static int repaper_fb_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips) +{ + struct tinydrm_device *tdev = fb->dev->dev_private; + struct repaper_epd *epd = epd_from_tinydrm(tdev); + u8 *buf = NULL; + int ret = 0; + + mutex_lock(&tdev->dirty_lock); + + if (!epd->enabled) + goto out_unlock; + + /* fbdev can flush even when we're not interested */ + if (tdev->pipe.plane.fb != fb) + goto out_unlock; + + repaper_get_temperature(epd); + + DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id, + epd->factored_stage_time); + + buf = kmalloc(fb->width * fb->height, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out_unlock; + } + + ret = tinydrm_xrgb8888_to_gray8(buf, fb); + if (ret) + goto out_unlock; + + repaper_gray8_to_mono_reversed(buf, fb->width, fb->height); + + if (epd->partial) { + repaper_frame_data_repeat(epd, buf, epd->current_frame, + REPAPER_NORMAL); + } else if (epd->cleared) { + repaper_frame_data_repeat(epd, epd->current_frame, NULL, + REPAPER_COMPENSATE); + repaper_frame_data_repeat(epd, epd->current_frame, NULL, + REPAPER_WHITE); + repaper_frame_data_repeat(epd, buf, NULL, REPAPER_INVERSE); + repaper_frame_data_repeat(epd, buf, NULL, REPAPER_NORMAL); + + epd->partial = true; + } else { + /* Clear display (anything -> white) */ + repaper_frame_fixed_repeat(epd, 0xff, REPAPER_COMPENSATE); + repaper_frame_fixed_repeat(epd, 0xff, REPAPER_WHITE); + repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_INVERSE); + repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_NORMAL); + + /* Assuming a clear (white) screen output an image */ + repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_COMPENSATE); + repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_WHITE); + repaper_frame_data_repeat(epd, buf, NULL, REPAPER_INVERSE); + repaper_frame_data_repeat(epd, buf, NULL, REPAPER_NORMAL); + + epd->cleared = true; + epd->partial = true; + } + + memcpy(epd->current_frame, buf, fb->width * fb->height / 8); + + /* + * An extra frame write is needed if pixels are set in the bottom line, + * or else grey lines rises up from the pixels + */ + if (epd->pre_border_byte) { + unsigned int x; + + for (x = 0; x < (fb->width / 8); x++) + if (buf[x + (fb->width * (fb->height - 1) / 8)]) { + repaper_frame_data_repeat(epd, buf, + epd->current_frame, + REPAPER_NORMAL); + break; + } + } + +out_unlock: + mutex_unlock(&tdev->dirty_lock); + + if (ret) + dev_err(fb->dev->dev, "Failed to update display (%d)\n", ret); + kfree(buf); + + return ret; +} + +static const struct drm_framebuffer_funcs repaper_fb_funcs = { + .destroy = drm_fb_cma_destroy, + .create_handle = drm_fb_cma_create_handle, + .dirty = repaper_fb_dirty, +}; + +static void power_off(struct repaper_epd *epd) +{ + /* Turn off power and all signals */ + gpiod_set_value_cansleep(epd->reset, 0); + gpiod_set_value_cansleep(epd->panel_on, 0); + if (epd->border) + gpiod_set_value_cansleep(epd->border, 0); + + /* Ensure SPI MOSI and CLOCK are Low before CS Low */ + repaper_spi_mosi_low(epd->spi); + + /* Discharge pulse */ + gpiod_set_value_cansleep(epd->discharge, 1); + msleep(150); + gpiod_set_value_cansleep(epd->discharge, 0); +} + +static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state) +{ + struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); + struct repaper_epd *epd = epd_from_tinydrm(tdev); + struct spi_device *spi = epd->spi; + struct device *dev = &spi->dev; + bool dc_ok = false; + int i, ret; + + DRM_DEBUG_DRIVER("\n"); + + /* Power up sequence */ + gpiod_set_value_cansleep(epd->reset, 0); + gpiod_set_value_cansleep(epd->panel_on, 0); + gpiod_set_value_cansleep(epd->discharge, 0); + if (epd->border) + gpiod_set_value_cansleep(epd->border, 0); + repaper_spi_mosi_low(spi); + usleep_range(5000, 10000); + + gpiod_set_value_cansleep(epd->panel_on, 1); + /* + * This delay comes from the repaper.org userspace driver, it's not + * mentioned in the datasheet. + */ + usleep_range(10000, 15000); + gpiod_set_value_cansleep(epd->reset, 1); + if (epd->border) + gpiod_set_value_cansleep(epd->border, 1); + usleep_range(5000, 10000); + gpiod_set_value_cansleep(epd->reset, 0); + usleep_range(5000, 10000); + gpiod_set_value_cansleep(epd->reset, 1); + usleep_range(5000, 10000); + + /* Wait for COG to become ready */ + for (i = 100; i > 0; i--) { + if (!gpiod_get_value_cansleep(epd->busy)) + break; + + usleep_range(10, 100); + } + + if (!i) { + dev_err(dev, "timeout waiting for panel to become ready.\n"); + power_off(epd); + return; + } + + repaper_read_id(spi); + ret = repaper_read_id(spi); + if (ret != REPAPER_RID_G2_COG_ID) { + if (ret < 0) + dev_err(dev, "failed to read chip (%d)\n", ret); + else + dev_err(dev, "wrong COG ID 0x%02x\n", ret); + power_off(epd); + return; + } + + /* Disable OE */ + repaper_write_val(spi, 0x02, 0x40); + + ret = repaper_read_val(spi, 0x0f); + if (ret < 0 || !(ret & 0x80)) { + if (ret < 0) + dev_err(dev, "failed to read chip (%d)\n", ret); + else + dev_err(dev, "panel is reported broken\n"); + power_off(epd); + return; + } + + /* Power saving mode */ + repaper_write_val(spi, 0x0b, 0x02); + /* Channel select */ + repaper_write_buf(spi, 0x01, epd->channel_select, 8); + /* High power mode osc */ + repaper_write_val(spi, 0x07, 0xd1); + /* Power setting */ + repaper_write_val(spi, 0x08, 0x02); + /* Vcom level */ + repaper_write_val(spi, 0x09, 0xc2); + /* Power setting */ + repaper_write_val(spi, 0x04, 0x03); + /* Driver latch on */ + repaper_write_val(spi, 0x03, 0x01); + /* Driver latch off */ + repaper_write_val(spi, 0x03, 0x00); + usleep_range(5000, 10000); + + /* Start chargepump */ + for (i = 0; i < 4; ++i) { + /* Charge pump positive voltage on - VGH/VDL on */ + repaper_write_val(spi, 0x05, 0x01); + msleep(240); + + /* Charge pump negative voltage on - VGL/VDL on */ + repaper_write_val(spi, 0x05, 0x03); + msleep(40); + + /* Charge pump Vcom on - Vcom driver on */ + repaper_write_val(spi, 0x05, 0x0f); + msleep(40); + + /* check DC/DC */ + ret = repaper_read_val(spi, 0x0f); + if (ret < 0) { + dev_err(dev, "failed to read chip (%d)\n", ret); + power_off(epd); + return; + } + + if (ret & 0x40) { + dc_ok = true; + break; + } + } + + if (!dc_ok) { + dev_err(dev, "dc/dc failed\n"); + power_off(epd); + return; + } + + /* + * Output enable to disable + * The userspace driver sets this to 0x04, but the datasheet says 0x06 + */ + repaper_write_val(spi, 0x02, 0x04); + + epd->enabled = true; + epd->partial = false; +} + +static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe) +{ + struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); + struct repaper_epd *epd = epd_from_tinydrm(tdev); + struct spi_device *spi = epd->spi; + unsigned int line; + + DRM_DEBUG_DRIVER("\n"); + + mutex_lock(&tdev->dirty_lock); + epd->enabled = false; + mutex_unlock(&tdev->dirty_lock); + + /* Nothing frame */ + for (line = 0; line < epd->height; line++) + repaper_one_line(epd, 0x7fffu, NULL, 0x00, NULL, + REPAPER_COMPENSATE); + + /* 2.7" */ + if (epd->border) { + /* Dummy line */ + repaper_one_line(epd, 0x7fffu, NULL, 0x00, NULL, + REPAPER_COMPENSATE); + msleep(25); + gpiod_set_value_cansleep(epd->border, 0); + msleep(200); + gpiod_set_value_cansleep(epd->border, 1); + } else { + /* Border dummy line */ + repaper_one_line(epd, 0x7fffu, NULL, 0x00, NULL, + REPAPER_NORMAL); + msleep(200); + } + + /* not described in datasheet */ + repaper_write_val(spi, 0x0b, 0x00); + /* Latch reset turn on */ + repaper_write_val(spi, 0x03, 0x01); + /* Power off charge pump Vcom */ + repaper_write_val(spi, 0x05, 0x03); + /* Power off charge pump neg voltage */ + repaper_write_val(spi, 0x05, 0x01); + msleep(120); + /* Discharge internal */ + repaper_write_val(spi, 0x04, 0x80); + /* turn off all charge pumps */ + repaper_write_val(spi, 0x05, 0x00); + /* Turn off osc */ + repaper_write_val(spi, 0x07, 0x01); + msleep(50); + + power_off(epd); +} + +static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = { + .enable = repaper_pipe_enable, + .disable = repaper_pipe_disable, + .update = tinydrm_display_pipe_update, + .prepare_fb = tinydrm_display_pipe_prepare_fb, +}; + +static const uint32_t repaper_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static const struct drm_display_mode repaper_e1144cs021_mode = { + TINYDRM_MODE(128, 96, 29, 22), +}; + +static const u8 repaper_e1144cs021_cs[] = { 0x00, 0x00, 0x00, 0x00, + 0x00, 0x0f, 0xff, 0x00 }; + +static const struct drm_display_mode repaper_e1190cs021_mode = { + TINYDRM_MODE(144, 128, 36, 32), +}; + +static const u8 repaper_e1190cs021_cs[] = { 0x00, 0x00, 0x00, 0x03, + 0xfc, 0x00, 0x00, 0xff }; + +static const struct drm_display_mode repaper_e2200cs021_mode = { + TINYDRM_MODE(200, 96, 46, 22), +}; + +static const u8 repaper_e2200cs021_cs[] = { 0x00, 0x00, 0x00, 0x00, + 0x01, 0xff, 0xe0, 0x00 }; + +static const struct drm_display_mode repaper_e2271cs021_mode = { + TINYDRM_MODE(264, 176, 57, 38), +}; + +static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f, + 0xff, 0xfe, 0x00, 0x00 }; + +DEFINE_DRM_GEM_CMA_FOPS(repaper_fops); + +static struct drm_driver repaper_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | + DRIVER_ATOMIC, + .fops = &repaper_fops, + TINYDRM_GEM_DRIVER_OPS, + .name = "repaper", + .desc = "Pervasive Displays RePaper e-ink panels", + .date = "20170405", + .major = 1, + .minor = 0, +}; + +static const struct of_device_id repaper_of_match[] = { + { .compatible = "pervasive,e1144cs021", .data = (void *)E1144CS021 }, + { .compatible = "pervasive,e1190cs021", .data = (void *)E1190CS021 }, + { .compatible = "pervasive,e2200cs021", .data = (void *)E2200CS021 }, + { .compatible = "pervasive,e2271cs021", .data = (void *)E2271CS021 }, + {}, +}; +MODULE_DEVICE_TABLE(of, repaper_of_match); + +static const struct spi_device_id repaper_id[] = { + { "e1144cs021", E1144CS021 }, + { "e1190cs021", E1190CS021 }, + { "e2200cs021", E2200CS021 }, + { "e2271cs021", E2271CS021 }, + { }, +}; +MODULE_DEVICE_TABLE(spi, repaper_id); + +static int repaper_probe(struct spi_device *spi) +{ + const struct drm_display_mode *mode; + const struct spi_device_id *spi_id; + const struct of_device_id *match; + struct device *dev = &spi->dev; + struct tinydrm_device *tdev; + enum repaper_model model; + const char *thermal_zone; + struct repaper_epd *epd; + size_t line_buffer_size; + int ret; + + match = of_match_device(repaper_of_match, dev); + if (match) { + model = (enum repaper_model)match->data; + } else { + spi_id = spi_get_device_id(spi); + model = spi_id->driver_data; + } + + /* The SPI device is used to allocate dma memory */ + if (!dev->coherent_dma_mask) { + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_warn(dev, "Failed to set dma mask %d\n", ret); + return ret; + } + } + + epd = devm_kzalloc(dev, sizeof(*epd), GFP_KERNEL); + if (!epd) + return -ENOMEM; + + epd->spi = spi; + + epd->panel_on = devm_gpiod_get(dev, "panel-on", GPIOD_OUT_LOW); + if (IS_ERR(epd->panel_on)) { + ret = PTR_ERR(epd->panel_on); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get gpio 'panel-on'\n"); + return ret; + } + + epd->discharge = devm_gpiod_get(dev, "discharge", GPIOD_OUT_LOW); + if (IS_ERR(epd->discharge)) { + ret = PTR_ERR(epd->discharge); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get gpio 'discharge'\n"); + return ret; + } + + epd->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(epd->reset)) { + ret = PTR_ERR(epd->reset); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get gpio 'reset'\n"); + return ret; + } + + epd->busy = devm_gpiod_get(dev, "busy", GPIOD_IN); + if (IS_ERR(epd->busy)) { + ret = PTR_ERR(epd->busy); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get gpio 'busy'\n"); + return ret; + } + + if (!device_property_read_string(dev, "pervasive,thermal-zone", + &thermal_zone)) { + epd->thermal = thermal_zone_get_zone_by_name(thermal_zone); + if (IS_ERR(epd->thermal)) { + dev_err(dev, "Failed to get thermal zone: %s\n", + thermal_zone); + return PTR_ERR(epd->thermal); + } + } + + switch (model) { + case E1144CS021: + mode = &repaper_e1144cs021_mode; + epd->channel_select = repaper_e1144cs021_cs; + epd->stage_time = 480; + epd->bytes_per_scan = 96 / 4; + epd->middle_scan = true; /* data-scan-data */ + epd->pre_border_byte = false; + epd->border_byte = REPAPER_BORDER_BYTE_ZERO; + break; + + case E1190CS021: + mode = &repaper_e1190cs021_mode; + epd->channel_select = repaper_e1190cs021_cs; + epd->stage_time = 480; + epd->bytes_per_scan = 128 / 4 / 2; + epd->middle_scan = false; /* scan-data-scan */ + epd->pre_border_byte = false; + epd->border_byte = REPAPER_BORDER_BYTE_SET; + break; + + case E2200CS021: + mode = &repaper_e2200cs021_mode; + epd->channel_select = repaper_e2200cs021_cs; + epd->stage_time = 480; + epd->bytes_per_scan = 96 / 4; + epd->middle_scan = true; /* data-scan-data */ + epd->pre_border_byte = true; + epd->border_byte = REPAPER_BORDER_BYTE_NONE; + break; + + case E2271CS021: + epd->border = devm_gpiod_get(dev, "border", GPIOD_OUT_LOW); + if (IS_ERR(epd->border)) { + ret = PTR_ERR(epd->border); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get gpio 'border'\n"); + return ret; + } + + mode = &repaper_e2271cs021_mode; + epd->channel_select = repaper_e2271cs021_cs; + epd->stage_time = 630; + epd->bytes_per_scan = 176 / 4; + epd->middle_scan = true; /* data-scan-data */ + epd->pre_border_byte = true; + epd->border_byte = REPAPER_BORDER_BYTE_NONE; + break; + + default: + return -ENODEV; + } + + epd->width = mode->hdisplay; + epd->height = mode->vdisplay; + epd->factored_stage_time = epd->stage_time; + + line_buffer_size = 2 * epd->width / 8 + epd->bytes_per_scan + 2; + epd->line_buffer = devm_kzalloc(dev, line_buffer_size, GFP_KERNEL); + if (!epd->line_buffer) + return -ENOMEM; + + epd->current_frame = devm_kzalloc(dev, epd->width * epd->height / 8, + GFP_KERNEL); + if (!epd->current_frame) + return -ENOMEM; + + tdev = &epd->tinydrm; + + ret = devm_tinydrm_init(dev, tdev, &repaper_fb_funcs, &repaper_driver); + if (ret) + return ret; + + ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs, + DRM_MODE_CONNECTOR_VIRTUAL, + repaper_formats, + ARRAY_SIZE(repaper_formats), mode, 0); + if (ret) + return ret; + + drm_mode_config_reset(tdev->drm); + + ret = devm_tinydrm_register(tdev); + if (ret) + return ret; + + spi_set_drvdata(spi, tdev); + + DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n", + tdev->drm->driver->name, dev_name(dev), + spi->max_speed_hz / 1000000, + tdev->drm->primary->index); + + return 0; +} + +static void repaper_shutdown(struct spi_device *spi) +{ + struct tinydrm_device *tdev = spi_get_drvdata(spi); + + tinydrm_shutdown(tdev); +} + +static struct spi_driver repaper_spi_driver = { + .driver = { + .name = "repaper", + .owner = THIS_MODULE, + .of_match_table = repaper_of_match, + }, + .id_table = repaper_id, + .probe = repaper_probe, + .shutdown = repaper_shutdown, +}; +module_spi_driver(repaper_spi_driver); + +MODULE_DESCRIPTION("Pervasive Displays RePaper DRM driver"); +MODULE_AUTHOR("Noralf Trønnes"); +MODULE_LICENSE("GPL"); From 0c1f528cb13708ff3ba462a5c757d5588fc47d36 Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Thu, 13 Jul 2017 21:03:07 +0530 Subject: [PATCH 0356/1795] drm: handle HDMI 2.0 VICs in AVI info-frames MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HDMI 1.4b support the CEA video modes as per range of CEA-861-D (VIC 1-64). For any other mode, the VIC filed in AVI infoframes should be 0. HDMI 2.0 sinks, support video modes range as per CEA-861-F spec, which is extended to (VIC 1-107). This patch adds a bool input variable, which indicates if the connected sink is a HDMI 2.0 sink or not. This will make sure that we don't pass a HDMI 2.0 VIC to a HDMI 1.4 sink. This patch touches all drm drivers, who are callers of this function drm_hdmi_avi_infoframe_from_display_mode but to make sure there is no change in current behavior, is_hdmi2 is kept as false. In case of I915 driver, this patch: - checks if the connected display is HDMI 2.0. - HDMI infoframes carry one of this two type of information: - VIC for 4K modes for HDMI 1.4 sinks - S3D information for S3D modes As CEA-861-F has already defined VICs for 4K videomodes, this patch doesn't allow sending HDMI infoframes for HDMI 2.0 sinks, until the mode is 3D. Cc: Ville Syrjala Cc: Jose Abreu Cc: Andrzej Hajda Cc: Alex Deucher Cc: Daniel Vetter PS: This patch touches a few lines in few files, which were already above 80 char, so checkpatch gives 80 char warning again. - gpu/drm/omapdrm/omap_encoder.c - gpu/drm/i915/intel_sdvo.c V2: Rebase, Added r-b from Andrzej V3: Addressed review comment from Ville: - Do not send VICs in both AVI-IF and HDMI-IF send only one of it. V4: Rebase V5: Added r-b from Neil. Addressed review comments from Ville - Do not block HDMI vendor IF, instead check for VIC while handling AVI infoframes V6: Rebase V7: Rebase Reviewed-by: Andrzej Hajda Reviewed-by: Neil Armstrong Signed-off-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1499960000-9232-2-git-send-email-shashank.sharma@intel.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 2 +- drivers/gpu/drm/bridge/analogix-anx78xx.c | 3 ++- drivers/gpu/drm/bridge/sii902x.c | 2 +- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 2 +- drivers/gpu/drm/drm_edid.c | 26 ++++++++++++++++++++++- drivers/gpu/drm/exynos/exynos_hdmi.c | 2 +- drivers/gpu/drm/i2c/tda998x_drv.c | 2 +- drivers/gpu/drm/i915/intel_hdmi.c | 5 ++++- drivers/gpu/drm/i915/intel_sdvo.c | 3 ++- drivers/gpu/drm/mediatek/mtk_hdmi.c | 2 +- drivers/gpu/drm/msm/hdmi/hdmi_bridge.c | 2 +- drivers/gpu/drm/nouveau/nv50_display.c | 3 ++- drivers/gpu/drm/omapdrm/omap_encoder.c | 3 ++- drivers/gpu/drm/radeon/radeon_audio.c | 2 +- drivers/gpu/drm/rockchip/inno_hdmi.c | 2 +- drivers/gpu/drm/sti/sti_hdmi.c | 2 +- drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c | 2 +- drivers/gpu/drm/tegra/hdmi.c | 2 +- drivers/gpu/drm/tegra/sor.c | 2 +- drivers/gpu/drm/vc4/vc4_hdmi.c | 2 +- drivers/gpu/drm/zte/zx_hdmi.c | 2 +- include/drm/drm_edid.h | 3 ++- 25 files changed, 57 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9f78c03a2e31..aff1f48c947e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1867,7 +1867,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder, dce_v10_0_audio_write_sad_regs(encoder); dce_v10_0_audio_write_latency_fields(encoder, mode); - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 4bcf01dc567a..2df650dfa727 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1851,7 +1851,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, dce_v11_0_audio_write_sad_regs(encoder); dce_v11_0_audio_write_latency_fields(encoder, mode); - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index fd134a4629d7..0c3891fa62f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1597,7 +1597,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder, ssize_t err; u32 tmp; - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index a9e869554627..c164bef82846 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1750,7 +1750,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, dce_v8_0_audio_write_sad_regs(encoder); dce_v8_0_audio_write_latency_fields(encoder, mode); - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c index c2fac3947006..dc045e0c32fc 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c @@ -1097,7 +1097,8 @@ static void anx78xx_bridge_mode_set(struct drm_bridge *bridge, mutex_lock(&anx78xx->lock); - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode, + false); if (err) { DRM_ERROR("Failed to setup AVI infoframe: %d\n", err); goto unlock; diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index b8d10e599df0..9efb7b8fad57 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -269,7 +269,7 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge, if (ret) return; - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj); + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj, false); if (ret < 0) { DRM_ERROR("couldn't fill AVI infoframe\n"); return; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index de1308b61390..60faf2d2bc6b 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1317,7 +1317,7 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode) u8 val; /* Initialise info frame from DRM mode */ - drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format)) frame.colorspace = HDMI_COLORSPACE_YUV444; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 2e55599816aa..0667b0744b17 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4334,12 +4334,14 @@ EXPORT_SYMBOL(drm_set_preferred_mode); * data from a DRM display mode * @frame: HDMI AVI infoframe * @mode: DRM display mode + * @is_hdmi2_sink: Sink is HDMI 2.0 compliant * * Return: 0 on success or a negative error code on failure. */ int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, - const struct drm_display_mode *mode) + const struct drm_display_mode *mode, + bool is_hdmi2_sink) { int err; @@ -4355,6 +4357,28 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, frame->video_code = drm_match_cea_mode(mode); + /* + * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but + * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we + * have to make sure we dont break HDMI 1.4 sinks. + */ + if (!is_hdmi2_sink && frame->video_code > 64) + frame->video_code = 0; + + /* + * HDMI spec says if a mode is found in HDMI 1.4b 4K modes + * we should send its VIC in vendor infoframes, else send the + * VIC in AVI infoframes. Lets check if this mode is present in + * HDMI 1.4b 4K modes + */ + if (frame->video_code) { + u8 vendor_if_vic = drm_match_hdmi_mode(mode); + bool is_s3d = mode->flags & DRM_MODE_FLAG_3D_MASK; + + if (drm_valid_hdmi_vic(vendor_if_vic) && !is_s3d) + frame->video_code = 0; + } + frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; /* diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 06bfbe400cf1..c953927fb0cb 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -784,7 +784,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata) } ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, - &hdata->current_mode); + &hdata->current_mode, false); if (!ret) ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf)); if (ret > 0) { diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 86f47e190309..d1e7ac540199 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -712,7 +712,7 @@ tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode) { union hdmi_infoframe frame; - drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode); + drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false); frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_FULL; tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 916340f03882..2f831cfdd243 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -459,11 +459,14 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; + struct drm_connector *connector = &intel_hdmi->attached_connector->base; + bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported; union hdmi_infoframe frame; int ret; ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - adjusted_mode); + adjusted_mode, + is_hdmi2_sink); if (ret < 0) { DRM_ERROR("couldn't fill AVI infoframe\n"); return; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 3f8f30b412cd..85d9ff361e74 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -996,7 +996,8 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, ssize_t len; ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - &pipe_config->base.adjusted_mode); + &pipe_config->base.adjusted_mode, + false); if (ret < 0) { DRM_ERROR("couldn't fill AVI infoframe\n"); return false; diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 0a4ffd724146..5c0d02444bd3 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -975,7 +975,7 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi, u8 buffer[17]; ssize_t err; - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { dev_err(hdmi->dev, "Failed to get AVI infoframe from mode: %zd\n", err); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index ae40e7179d4f..13ac822dee5d 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -97,7 +97,7 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi) u32 val; int len; - drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode); + drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false); len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer)); if (len < 0) { diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 42a85c14aea0..5f71e304022e 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -2762,7 +2762,8 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode) if (!drm_detect_hdmi_monitor(nv_connector->edid)) return; - ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode); + ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode, + false); if (!ret) { /* We have an AVI InfoFrame, populate it to the display */ args.pwr.avi_infoframe_length diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 86c977b7189a..624f5b50b755 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -85,7 +85,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, if (hdmi_mode && dssdev->driver->set_hdmi_infoframe) { struct hdmi_avi_infoframe avi; - r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode); + r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode, + false); if (r == 0) dssdev->driver->set_hdmi_infoframe(dssdev, &avi); } diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index aaacac190d26..770e31f5fd1b 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -516,7 +516,7 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, if (!connector) return -EINVAL; - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %d\n", err); return err; diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index 7d9b75eb6c44..7149968aa25a 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -294,7 +294,7 @@ static int inno_hdmi_config_video_avi(struct inno_hdmi *hdmi, union hdmi_infoframe frame; int rc; - rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode); + rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false); if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444) frame.avi.colorspace = HDMI_COLORSPACE_YUV444; diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index a59c95a8081b..dbc6a195d6f9 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -434,7 +434,7 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi) DRM_DEBUG_DRIVER("\n"); - ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode); + ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode, false); if (ret < 0) { DRM_ERROR("failed to setup AVI infoframe: %d\n", ret); return ret; diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index d3398f6250ef..83b7a2a025f2 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -52,7 +52,7 @@ static int sun4i_hdmi_setup_avi_infoframes(struct sun4i_hdmi *hdmi, u8 buffer[17]; int i, ret; - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (ret < 0) { DRM_ERROR("Failed to get infoframes from mode\n"); return ret; diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index cda0491ed6bf..718d8db406a6 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -734,7 +734,7 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi, u8 buffer[17]; ssize_t err; - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err); return; diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index a8f528925009..fb2709c0c461 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -1904,7 +1904,7 @@ tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor, value &= ~INFOFRAME_CTRL_ENABLE; tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL); - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err); return err; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index ed63d4e85762..406d6d83b6c6 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -395,7 +395,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) union hdmi_infoframe frame; int ret; - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode); + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false); if (ret < 0) { DRM_ERROR("couldn't fill AVI infoframe\n"); return; diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c index 0df7366e594b..7e834e3eeed9 100644 --- a/drivers/gpu/drm/zte/zx_hdmi.c +++ b/drivers/gpu/drm/zte/zx_hdmi.c @@ -124,7 +124,7 @@ static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi, union hdmi_infoframe frame; int ret; - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode); + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false); if (ret) { DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n", ret); diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 7b9f48b62e07..89c00626d654 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -343,7 +343,8 @@ drm_load_edid_firmware(struct drm_connector *connector) int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, - const struct drm_display_mode *mode); + const struct drm_display_mode *mode, + bool is_hdmi2_sink); int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, const struct drm_display_mode *mode); From 8ec6e0755565192b328059b64f982adabbecda78 Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Thu, 13 Jul 2017 21:03:08 +0530 Subject: [PATCH 0357/1795] drm/edid: complete CEA modedb(VIC 1-107) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CEA-861-F specs defines new video modes to be used with HDMI 2.0 EDIDs. The VIC range has been extended from 1-64 to 1-107. Our existing CEA modedb contains only 64 modes (VIC=1 to VIC=64). Now to be able to parse new CEA modes using the existing methods, we have to complete the modedb (VIC=65 onwards). This patch adds: - Timings for existing CEA video modes (from VIC=65 till VIC=92) - Newly added 4k modes (from VIC=93 to VIC=107). The patch was originaly discussed and reviewed here: https://patchwork.freedesktop.org/patch/135810/ Cc: Ville Syrjala Cc: Jose Abreu Cc: Andrzej Hajda Cc: Alex Deucher Cc: Harry Wentland V2: Rebase V3: Rebase V4: Added native bit handling as per CEA-861-F spec (Ville) V5: Fix timings for VIC 77:1920x1080 and 104:3840x2160p (Ville) Remove unnecessary paranthesis from function svd_to_vic (Ville) Added r-b (Neil) V6: Rebase V7: Fix indentation for modes from VIC 80 Reviewed-by: Jose Abreu Reviewed-by: Alex Deucher Reviewed-by: Neil Armstrong Acked-by: Harry Wentland Signed-off-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1499960000-9232-3-git-send-email-shashank.sharma@intel.com [vsyrjala: Fix up remaining formatting/indentation issues] Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/drm_edid.c | 226 ++++++++++++++++++++++++++++++++++++- 1 file changed, 225 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 0667b0744b17..42c83add9d37 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1006,6 +1006,221 @@ static const struct drm_display_mode edid_cea_modes[] = { 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 65 - 1280x720@24Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 66 - 1280x720@25Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, + 3740, 3960, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 67 - 1280x720@30Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 68 - 1280x720@50Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 69 - 1280x720@60Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 70 - 1280x720@100Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 71 - 1280x720@120Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 72 - 1920x1080@24Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, + 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 73 - 1920x1080@25Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 74 - 1920x1080@30Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 75 - 1920x1080@50Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 76 - 1920x1080@60Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 77 - 1920x1080@100Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 78 - 1920x1080@120Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 79 - 1680x720@24Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 80 - 1680x720@25Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908, + 2948, 3168, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 81 - 1680x720@30Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380, + 2420, 2640, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 82 - 1680x720@50Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940, + 1980, 2200, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 83 - 1680x720@60Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940, + 1980, 2200, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 84 - 1680x720@100Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740, + 1780, 2000, 0, 720, 725, 730, 825, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 85 - 1680x720@120Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740, + 1780, 2000, 0, 720, 725, 730, 825, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 86 - 2560x1080@24Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558, + 3602, 3750, 0, 1080, 1084, 1089, 1100, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 87 - 2560x1080@25Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008, + 3052, 3200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 88 - 2560x1080@30Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328, + 3372, 3520, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 89 - 2560x1080@50Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108, + 3152, 3300, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 90 - 2560x1080@60Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808, + 2852, 3000, 0, 1080, 1084, 1089, 1100, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 91 - 2560x1080@100Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778, + 2822, 2970, 0, 1080, 1084, 1089, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 92 - 2560x1080@120Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108, + 3152, 3300, 0, 1080, 1084, 1089, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 93 - 3840x2160p@24Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 94 - 3840x2160p@25Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 95 - 3840x2160p@30Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 96 - 3840x2160p@50Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 97 - 3840x2160p@60Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 98 - 4096x2160p@24Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + /* 99 - 4096x2160p@25Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064, + 5152, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + /* 100 - 4096x2160p@30Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184, + 4272, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + /* 101 - 4096x2160p@50Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064, + 5152, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + /* 102 - 4096x2160p@60Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184, + 4272, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + /* 103 - 3840x2160p@24Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 104 - 3840x2160p@25Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 105 - 3840x2160p@30Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 106 - 3840x2160p@50Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 107 - 3840x2160p@60Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, }; /* @@ -2902,6 +3117,15 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid) return modes; } +static u8 svd_to_vic(u8 svd) +{ + /* 0-6 bit vic, 7th bit native mode indicator */ + if ((svd >= 1 && svd <= 64) || (svd >= 129 && svd <= 192)) + return svd & 127; + + return svd; +} + static struct drm_display_mode * drm_display_mode_from_vic_index(struct drm_connector *connector, const u8 *video_db, u8 video_len, @@ -2915,7 +3139,7 @@ drm_display_mode_from_vic_index(struct drm_connector *connector, return NULL; /* CEA modes are numbered 1..127 */ - vic = (video_db[video_index] & 127); + vic = svd_to_vic(video_db[video_index]); if (!drm_valid_cea_vic(vic)) return NULL; From 0f0f87083015af9bcfd4dc04bae92258c980861c Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Thu, 13 Jul 2017 21:03:09 +0530 Subject: [PATCH 0358/1795] drm/edid: parse sink information before CEA blocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks. This block contains a map of indexes of CEA modes, which can support YCBCR 420 output also. To avoid multiple parsing of same CEA block, let's parse the sink information and get this map, before parsing CEA modes. This patch moves the call to drm_add_display_info function, before the mode parsing block. V4: Introduced new patch in the series V5: Move this patch before 4:2:0 parsing patch (ville) Added r-b from Ville V6: Rebase V7: Rebase Reviewed-by: Ville Syrjälä Signed-off-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1499960000-9232-4-git-send-email-shashank.sharma@intel.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/drm_edid.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 42c83add9d37..dc0eb31355c9 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4438,6 +4438,13 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) quirks = edid_get_quirks(edid); + /* + * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks. + * To avoid multiple parsing of same block, lets parse that map + * from sink info, before parsing CEA modes. + */ + drm_add_display_info(connector, edid); + /* * EDID spec says modes should be preferred in this order: * - preferred detailed mode @@ -4465,8 +4472,6 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) edid_fixup_preferred(connector, quirks); - drm_add_display_info(connector, edid); - if (quirks & EDID_QUIRK_FORCE_6BPC) connector->display_info.bpc = 6; From 87563fc0301f44f7a37ecba9490a60507ae9bbf2 Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Thu, 13 Jul 2017 21:03:10 +0530 Subject: [PATCH 0359/1795] drm/edid: cleanup patch for CEA extended-tag macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CEA-861-F introduces extended tag codes for EDID extension blocks, which indicates the actual type of the data block. The code for using exteded tag is 0x7, whereas in the existing code, the corresponding macro is named as "VIDEO_CAPABILITY_BLOCK" This patch renames the macro and usages from "VIDEO_CAPABILITY_BLOCK" to "USE_EXTENDED_TAG" V2: Add extended tag code check for video capabilitiy block (ville) V3: Ville: - Use suggested names for macros - Check the block length first, before checking the extended tag V4: Fix commit message (David) V5: Introduced this patch into HDMI-YCBCR-output series V6: Rebase V7: Rebase Cc: Ville Syrjala Signed-off-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1499960000-9232-5-git-send-email-shashank.sharma@intel.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/drm_edid.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index dc0eb31355c9..9a3937527058 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2781,7 +2781,8 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, #define VIDEO_BLOCK 0x02 #define VENDOR_BLOCK 0x03 #define SPEAKER_BLOCK 0x04 -#define VIDEO_CAPABILITY_BLOCK 0x07 +#define USE_EXTENDED_TAG 0x07 +#define EXT_VIDEO_CAPABILITY_BLOCK 0x00 #define EDID_BASIC_AUDIO (1 << 6) #define EDID_CEA_YCRCB444 (1 << 5) #define EDID_CEA_YCRCB422 (1 << 4) @@ -3441,6 +3442,12 @@ cea_db_payload_len(const u8 *db) return db[0] & 0x1f; } +static int +cea_db_extended_tag(const u8 *db) +{ + return db[1]; +} + static int cea_db_tag(const u8 *db) { @@ -4017,8 +4024,10 @@ bool drm_rgb_quant_range_selectable(struct edid *edid) return false; for_each_cea_db(edid_ext, i, start, end) { - if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK && - cea_db_payload_len(&edid_ext[i]) == 2) { + if (cea_db_tag(&edid_ext[i]) == USE_EXTENDED_TAG && + cea_db_payload_len(&edid_ext[i]) == 2 && + cea_db_extended_tag(&edid_ext[i]) == + EXT_VIDEO_CAPABILITY_BLOCK) { DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]); return edid_ext[i + 2] & EDID_CEA_VCDB_QS; } From d85231530b0719e23a62d92ee35712da966e281a Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Thu, 13 Jul 2017 21:03:11 +0530 Subject: [PATCH 0360/1795] drm: add helper to validate YCBCR420 modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit YCBCR420 modes are supported only on HDMI 2.0 capable sources. This patch adds: - A drm helper to validate YCBCR420-only mode on a particular connector. This function will help pruning the YCBCR420-only modes from the connector's modelist. - A bool variable (ycbcr_420_allowed) in the drm connector structure. While handling the EDID from HDMI 2.0 sinks, its important to know if the source is capable of handling YCBCR420 output, so that no YCBCR 420 modes will be listed for sources which can't handle it. A driver should set this variable if it wants to see YCBCR420 modes in the modedb. V5: Introduced the patch in series. V6: Squashed two patches (validate YCBCR420 and add YCBCR420 identifier) V7: Addressed review comments from Vile: - Move this patch before we add 420 modes from EDID. - No need for drm_valid_cea_vic() check, function back to non-static. - Update MODE_STATUS with NO_420 condition. - Introduce y420_vdb_modes variable in this patch Cc: Ville Syrjala Signed-off-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1499960000-9232-6-git-send-email-shashank.sharma@intel.com [vsyrjala: Drop the now bogus EXPORT_SYMBOL(drm_valid_cea_vic)] Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/drm_modes.c | 29 +++++++++++++++++++++++++++++ drivers/gpu/drm/drm_probe_helper.c | 4 ++++ include/drm/drm_connector.h | 17 +++++++++++++++++ include/drm/drm_modes.h | 5 +++++ 4 files changed, 55 insertions(+) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index f2493b9b82e6..35630b80cd48 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1083,6 +1083,34 @@ drm_mode_validate_size(const struct drm_display_mode *mode, } EXPORT_SYMBOL(drm_mode_validate_size); +/** + * drm_mode_validate_ycbcr420 - add 'ycbcr420-only' modes only when allowed + * @mode: mode to check + * @connector: drm connector under action + * + * This function is a helper which can be used to filter out any YCBCR420 + * only mode, when the source doesn't support it. + * + * Returns: + * The mode status + */ +enum drm_mode_status +drm_mode_validate_ycbcr420(const struct drm_display_mode *mode, + struct drm_connector *connector) +{ + u8 vic = drm_match_cea_mode(mode); + enum drm_mode_status status = MODE_OK; + struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; + + if (test_bit(vic, hdmi->y420_vdb_modes)) { + if (!connector->ycbcr_420_allowed) + status = MODE_NO_420; + } + + return status; +} +EXPORT_SYMBOL(drm_mode_validate_ycbcr420); + #define MODE_STATUS(status) [MODE_ ## status + 3] = #status static const char * const drm_mode_status_names[] = { @@ -1122,6 +1150,7 @@ static const char * const drm_mode_status_names[] = { MODE_STATUS(ONE_SIZE), MODE_STATUS(NO_REDUCED), MODE_STATUS(NO_STEREO), + MODE_STATUS(NO_420), MODE_STATUS(STALE), MODE_STATUS(BAD), MODE_STATUS(ERROR), diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 00e6832a8c1a..904966cde32b 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -528,6 +528,10 @@ retry: if (mode->status == MODE_OK) mode->status = drm_mode_validate_pipeline(mode, connector); + + if (mode->status == MODE_OK) + mode->status = drm_mode_validate_ycbcr420(mode, + connector); } prune: diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index ae5b7dc316c8..26dd3eb9767d 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -135,6 +135,14 @@ struct drm_scdc { struct drm_hdmi_info { /** @scdc: sink's scdc support and capabilities */ struct drm_scdc scdc; + + /** + * @y420_vdb_modes: bitmap of modes which can support ycbcr420 + * output only (not normal RGB/YCBCR444/422 outputs). There are total + * 107 VICs defined by CEA-861-F spec, so the size is 128 bits to map + * upto 128 VICs; + */ + unsigned long y420_vdb_modes[BITS_TO_LONGS(128)]; }; /** @@ -726,6 +734,15 @@ struct drm_connector { bool interlace_allowed; bool doublescan_allowed; bool stereo_allowed; + + /** + * @ycbcr_420_allowed : This bool indicates if this connector is + * capable of handling YCBCR 420 output. While parsing the EDID + * blocks, its very helpful to know, if the source is capable of + * handling YCBCR 420 outputs. + */ + bool ycbcr_420_allowed; + /** * @registered: Is this connector exposed (registered) with userspace? * Protected by @mutex. diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 94ac771fe460..f8a1268dfbcb 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -80,6 +80,7 @@ struct videomode; * @MODE_ONE_SIZE: only one resolution is supported * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking * @MODE_NO_STEREO: stereo modes not supported + * @MODE_NO_420: ycbcr 420 modes not supported * @MODE_STALE: mode has become stale * @MODE_BAD: unspecified reason * @MODE_ERROR: error condition @@ -124,6 +125,7 @@ enum drm_mode_status { MODE_ONE_SIZE, MODE_NO_REDUCED, MODE_NO_STEREO, + MODE_NO_420, MODE_STALE = -3, MODE_BAD = -2, MODE_ERROR = -1 @@ -496,6 +498,9 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode); enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode, int maxX, int maxY); +enum drm_mode_status +drm_mode_validate_ycbcr420(const struct drm_display_mode *mode, + struct drm_connector *connector); void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); void drm_mode_sort(struct list_head *mode_list); From 832d4f2f417d1786769c7a91a0a6363ea58cfc10 Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Fri, 14 Jul 2017 16:03:46 +0530 Subject: [PATCH 0361/1795] drm/edid: parse YCBCR420 videomodes from EDID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HDMI 2.0 spec adds support for YCBCR420 sub-sampled output. CEA-861-F adds two new blocks in EDID's CEA extension blocks, to provide information about sink's YCBCR420 output capabilities. These blocks are: - YCBCR420vdb(YCBCR 420 video data block): This block contains VICs of video modes, which can be sopported only in YCBCR420 output mode (Not in RGB/YCBCR444/422. Its like a normal SVD block, valid for YCBCR420 modes only. - YCBCR420cmdb(YCBCR 420 capability map data block): This block gives information about video modes which can support YCBCR420 output mode also (along with RGB,YCBCR444/422 etc) This block contains a bitmap index of normal svd videomodes, which can support YCBCR420 output too. So if bit 0 from first vcb byte is set, first video mode in the svd list can support YCBCR420 output too. Bit 1 means second video mode from svd list can support YCBCR420 output too, and so on. This patch adds two bitmaps in display's hdmi_info structure, one each for VCB and VDB modes. If the source is HDMI 2.0 capable, this patch adds: - VDB modes (YCBCR 420 only modes) in connector's mode list, also makes an entry in the vdb_bitmap per vic. - VCB modes (YCBCR 420 also modes) only entry in the vcb_bitmap. Cc: Ville Syrjala Cc: Jose Abreu Cc: Emil Velikov V2: Addressed Review comments from Emil: - Use 1ULL< 64 modes in capability map block. - Use y420cmdb in function names and macros while dealing with vcb to be aligned with spec. - Move the display information parsing block ahead of mode parsing blocks. V3: Addressed design/review comments from Ville - Do not add flags in video modes, else we have to expose them to user - There should not be a UABI change, and kernel should detect the choice of the output based on type of mode, and the bitmaps. - Use standard bitops from kernel bitmap header, instead of calculating bit positions manually. V4: Addressed review comments from Ville: - s/ycbcr_420_vdb/y420vdb - s/ycbcr_420_vcb/y420cmdb - Be less verbose on description of do_y420vdb_modes - Move newmode variable in the loop scope. - Use svd_to_vic() to get a VIC, instead of 0x7f - Remove bitmap description for CMDB modes & VDB modes - Dont add connector->ycbcr_420_allowed check for cmdb modes - Remove 'len' variable, in is_y420cmdb function, which is used only once - Add length check in is_y420vdb function - Remove unnecessary if (!db) check in function parse_y420cmdb_bitmap - Do not add print about YCBCR 420 modes - Fix indentation in few places - Move ycbcr420_dc_modes in next patch, where its used - Add a separate patch for movement of drm_add_display_info() V5: Addressed review comments from Ville: - Add the patch which cleans up the current EXTENDED_TAG usage - Make y420_cmdb_map u64 - Do not block ycbcr420 modes while parsing the EDID, rather add a separate helper function to prune ycbcr420-only modes from connector's probed modes. V6: Rebase V7: Move this patch after the 420_only validation patch (Ville) V8: Addressed review comments from Ville - use cea_vic_valid check before adding cmdb/vdb modes - add check for i < 64 while adding cmdb modes - use 1ULL while checking bitmap Signed-off-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1500028426-14883-1-git-send-email-shashank.sharma@intel.com [vsyrjala: Fix checkpatch complaints and indentation] Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/drm_edid.c | 152 +++++++++++++++++++++++++++++++++++- include/drm/drm_connector.h | 12 +++ 2 files changed, 162 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 9a3937527058..31881a71118d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2783,6 +2783,8 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, #define SPEAKER_BLOCK 0x04 #define USE_EXTENDED_TAG 0x07 #define EXT_VIDEO_CAPABILITY_BLOCK 0x00 +#define EXT_VIDEO_DATA_BLOCK_420 0x0E +#define EXT_VIDEO_CAP_BLOCK_Y420CMDB 0x0F #define EDID_BASIC_AUDIO (1 << 6) #define EDID_CEA_YCRCB444 (1 << 5) #define EDID_CEA_YCRCB422 (1 << 4) @@ -3153,15 +3155,85 @@ drm_display_mode_from_vic_index(struct drm_connector *connector, return newmode; } +/* + * do_y420vdb_modes - Parse YCBCR 420 only modes + * @connector: connector corresponding to the HDMI sink + * @svds: start of the data block of CEA YCBCR 420 VDB + * @len: length of the CEA YCBCR 420 VDB + * + * Parse the CEA-861-F YCBCR 420 Video Data Block (Y420VDB) + * which contains modes which can be supported in YCBCR 420 + * output format only. + */ +static int do_y420vdb_modes(struct drm_connector *connector, + const u8 *svds, u8 svds_len) +{ + int modes = 0, i; + struct drm_device *dev = connector->dev; + struct drm_display_info *info = &connector->display_info; + struct drm_hdmi_info *hdmi = &info->hdmi; + + for (i = 0; i < svds_len; i++) { + u8 vic = svd_to_vic(svds[i]); + struct drm_display_mode *newmode; + + if (!drm_valid_cea_vic(vic)) + continue; + + newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]); + if (!newmode) + break; + bitmap_set(hdmi->y420_vdb_modes, vic, 1); + drm_mode_probed_add(connector, newmode); + modes++; + } + + if (modes > 0) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB420; + return modes; +} + +/* + * drm_add_cmdb_modes - Add a YCBCR 420 mode into bitmap + * @connector: connector corresponding to the HDMI sink + * @vic: CEA vic for the video mode to be added in the map + * + * Makes an entry for a videomode in the YCBCR 420 bitmap + */ +static void +drm_add_cmdb_modes(struct drm_connector *connector, u8 svd) +{ + u8 vic = svd_to_vic(svd); + struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; + + if (!drm_valid_cea_vic(vic)) + return; + + bitmap_set(hdmi->y420_cmdb_modes, vic, 1); +} + static int do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len) { int i, modes = 0; + struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; for (i = 0; i < len; i++) { struct drm_display_mode *mode; mode = drm_display_mode_from_vic_index(connector, db, len, i); if (mode) { + /* + * YCBCR420 capability block contains a bitmap which + * gives the index of CEA modes from CEA VDB, which + * can support YCBCR 420 sampling output also (apart + * from RGB/YCBCR444 etc). + * For example, if the bit 0 in bitmap is set, + * first mode in VDB can support YCBCR420 output too. + * Add YCBCR420 modes only if sink is HDMI 2.0 capable. + */ + if (i < 64 && hdmi->y420_cmdb_map & (1ULL << i)) + drm_add_cmdb_modes(connector, db[i]); + drm_mode_probed_add(connector, mode); modes++; } @@ -3503,9 +3575,77 @@ static bool cea_db_is_hdmi_forum_vsdb(const u8 *db) return oui == HDMI_FORUM_IEEE_OUI; } +static bool cea_db_is_y420cmdb(const u8 *db) +{ + if (cea_db_tag(db) != USE_EXTENDED_TAG) + return false; + + if (!cea_db_payload_len(db)) + return false; + + if (cea_db_extended_tag(db) != EXT_VIDEO_CAP_BLOCK_Y420CMDB) + return false; + + return true; +} + +static bool cea_db_is_y420vdb(const u8 *db) +{ + if (cea_db_tag(db) != USE_EXTENDED_TAG) + return false; + + if (!cea_db_payload_len(db)) + return false; + + if (cea_db_extended_tag(db) != EXT_VIDEO_DATA_BLOCK_420) + return false; + + return true; +} + #define for_each_cea_db(cea, i, start, end) \ for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) +static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector, + const u8 *db) +{ + struct drm_display_info *info = &connector->display_info; + struct drm_hdmi_info *hdmi = &info->hdmi; + u8 map_len = cea_db_payload_len(db) - 1; + u8 count; + u64 map = 0; + + if (map_len == 0) { + /* All CEA modes support ycbcr420 sampling also.*/ + hdmi->y420_cmdb_map = U64_MAX; + info->color_formats |= DRM_COLOR_FORMAT_YCRCB420; + return; + } + + /* + * This map indicates which of the existing CEA block modes + * from VDB can support YCBCR420 output too. So if bit=0 is + * set, first mode from VDB can support YCBCR420 output too. + * We will parse and keep this map, before parsing VDB itself + * to avoid going through the same block again and again. + * + * Spec is not clear about max possible size of this block. + * Clamping max bitmap block size at 8 bytes. Every byte can + * address 8 CEA modes, in this way this map can address + * 8*8 = first 64 SVDs. + */ + if (WARN_ON_ONCE(map_len > 8)) + map_len = 8; + + for (count = 0; count < map_len; count++) + map |= (u64)db[2 + count] << (8 * count); + + if (map) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB420; + + hdmi->y420_cmdb_map = map; +} + static int add_cea_modes(struct drm_connector *connector, struct edid *edid) { @@ -3528,10 +3668,16 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid) video = db + 1; video_len = dbl; modes += do_cea_modes(connector, video, dbl); - } - else if (cea_db_is_hdmi_vsdb(db)) { + } else if (cea_db_is_hdmi_vsdb(db)) { hdmi = db; hdmi_len = dbl; + } else if (cea_db_is_y420vdb(db)) { + const u8 *vdb420 = &db[2]; + + /* Add 4:2:0(only) modes present in EDID */ + modes += do_y420vdb_modes(connector, + vdb420, + dbl - 1); } } } @@ -4214,6 +4360,8 @@ static void drm_parse_cea_ext(struct drm_connector *connector, drm_parse_hdmi_vsdb_video(connector, db); if (cea_db_is_hdmi_forum_vsdb(db)) drm_parse_hdmi_forum_vsdb(connector, db); + if (cea_db_is_y420cmdb(db)) + drm_parse_y420cmdb_bitmap(connector, db); } } diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 26dd3eb9767d..225e09256645 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -143,6 +143,17 @@ struct drm_hdmi_info { * upto 128 VICs; */ unsigned long y420_vdb_modes[BITS_TO_LONGS(128)]; + + /** + * @y420_cmdb_modes: bitmap of modes which can support ycbcr420 + * output also, along with normal HDMI outputs. There are total 107 + * VICs defined by CEA-861-F spec, so the size is 128 bits to map upto + * 128 VICs; + */ + unsigned long y420_cmdb_modes[BITS_TO_LONGS(128)]; + + /** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */ + u64 y420_cmdb_map; }; /** @@ -206,6 +217,7 @@ struct drm_display_info { #define DRM_COLOR_FORMAT_RGB444 (1<<0) #define DRM_COLOR_FORMAT_YCRCB444 (1<<1) #define DRM_COLOR_FORMAT_YCRCB422 (1<<2) +#define DRM_COLOR_FORMAT_YCRCB420 (1<<3) /** * @color_formats: HDMI Color formats, selects between RGB and YCrCb From e6a9a2c3dc4377f62650faf1c978d073c55d62ec Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Thu, 13 Jul 2017 21:03:13 +0530 Subject: [PATCH 0362/1795] drm/edid: parse ycbcr 420 deep color information MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CEA-861-F spec adds ycbcr420 deep color support information in hf-vsdb block. This patch extends the existing hf-vsdb parsing function by adding parsing of ycbcr420 deep color support from the EDID and adding it into display information stored. V2: Rebase V3: Rebase V4: Moved definition of y420_dc_modes into this patch, where its used (Ville) V5: Optimize function, if(conditions) not reqd (Ville) V6: Rebase V7: Rebase Cc: Ville Syrjälä Cc: Jose Abreu Signed-off-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1499960000-9232-8-git-send-email-shashank.sharma@intel.com [vsyrjala: Fix sparse indentation warn] Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/drm_edid.c | 12 ++++++++++++ include/drm/drm_connector.h | 3 +++ include/drm/drm_edid.h | 8 ++++++++ 3 files changed, 23 insertions(+) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 31881a71118d..6bb6337be920 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4202,6 +4202,16 @@ drm_default_rgb_quant_range(const struct drm_display_mode *mode) } EXPORT_SYMBOL(drm_default_rgb_quant_range); +static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector, + const u8 *db) +{ + u8 dc_mask; + struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; + + dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK; + hdmi->y420_dc_modes |= dc_mask; +} + static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector, const u8 *hf_vsdb) { @@ -4242,6 +4252,8 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector, scdc->scrambling.low_rates = true; } } + + drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb); } static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector, diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 225e09256645..4bc088269d05 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -154,6 +154,9 @@ struct drm_hdmi_info { /** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */ u64 y420_cmdb_map; + + /** @y420_dc_modes: bitmap of deep color support index */ + u8 y420_dc_modes; }; /** diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 89c00626d654..1e1908a6b1d6 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -213,6 +213,14 @@ struct detailed_timing { #define DRM_EDID_HDMI_DC_30 (1 << 4) #define DRM_EDID_HDMI_DC_Y444 (1 << 3) +/* YCBCR 420 deep color modes */ +#define DRM_EDID_YCBCR420_DC_48 (1 << 6) +#define DRM_EDID_YCBCR420_DC_36 (1 << 5) +#define DRM_EDID_YCBCR420_DC_30 (1 << 4) +#define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \ + DRM_EDID_YCBCR420_DC_36 | \ + DRM_EDID_YCBCR420_DC_30) + /* ELD Header Block */ #define DRM_ELD_HEADER_BLOCK_SIZE 4 From 2570fe2586254ff174c2ba5a20dabbde707dbb9b Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Thu, 13 Jul 2017 21:03:14 +0530 Subject: [PATCH 0363/1795] drm: add helper functions for YCBCR420 handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds helper functions for YCBCR 420 handling. These functions do: - check if a given video mode is YCBCR 420 only mode. - check if a given video mode is YCBCR 420 also mode. V2: Added YCBCR functions as helpers in DRM layer, instead of keeping it in I915 layer. V3: Added handling for YCBCR-420 only modes too. V4: EXPORT_SYMBOL(drm_find_hdmi_output_type) V5: Addressed review comments from Danvet: - %s/drm_find_hdmi_output_type/drm_display_info_hdmi_output_type - %s/drm_can_support_ycbcr_output/drm_display_supports_ycbcr_output - %s/drm_can_support_this_ycbcr_output/ drm_display_supports_this_ycbcr_output - pass drm_display_info instead of drm_connector for consistency - For drm_get_highest_quality_ycbcr_supported doc, move the variable description above, and then the function description. V6: Add only YCBCR420 helpers (Ville) V7: Addressed review comments from Ville - Remove cea_vic_valid() check. - Fix indentation. - Make input parameters to helpers, const. Cc: Ville Syrjala Cc: Jose Abreu Cc: Daniel Vetter Signed-off-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1499960000-9232-9-git-send-email-shashank.sharma@intel.com [vsyrjala: Fix sparse indentation warn] Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/drm_modes.c | 58 +++++++++++++++++++++++++++++++++++++ include/drm/drm_modes.h | 6 ++++ 2 files changed, 64 insertions(+) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 35630b80cd48..c1aec532281c 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1605,3 +1605,61 @@ int drm_mode_convert_umode(struct drm_display_mode *out, out: return ret; } + +/** + * drm_mode_is_420_only - if a given videomode can be only supported in YCBCR420 + * output format + * + * @connector: drm connector under action. + * @mode: video mode to be tested. + * + * Returns: + * true if the mode can be supported in YCBCR420 format + * false if not. + */ +bool drm_mode_is_420_only(const struct drm_display_info *display, + const struct drm_display_mode *mode) +{ + u8 vic = drm_match_cea_mode(mode); + + return test_bit(vic, display->hdmi.y420_vdb_modes); +} +EXPORT_SYMBOL(drm_mode_is_420_only); + +/** + * drm_mode_is_420_also - if a given videomode can be supported in YCBCR420 + * output format also (along with RGB/YCBCR444/422) + * + * @display: display under action. + * @mode: video mode to be tested. + * + * Returns: + * true if the mode can be support YCBCR420 format + * false if not. + */ +bool drm_mode_is_420_also(const struct drm_display_info *display, + const struct drm_display_mode *mode) +{ + u8 vic = drm_match_cea_mode(mode); + + return test_bit(vic, display->hdmi.y420_cmdb_modes); +} +EXPORT_SYMBOL(drm_mode_is_420_also); +/** + * drm_mode_is_420 - if a given videomode can be supported in YCBCR420 + * output format + * + * @display: display under action. + * @mode: video mode to be tested. + * + * Returns: + * true if the mode can be supported in YCBCR420 format + * false if not. + */ +bool drm_mode_is_420(const struct drm_display_info *display, + const struct drm_display_mode *mode) +{ + return drm_mode_is_420_only(display, mode) || + drm_mode_is_420_also(display, mode); +} +EXPORT_SYMBOL(drm_mode_is_420); diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index f8a1268dfbcb..9f3421c8efcd 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -452,6 +452,12 @@ int drm_mode_convert_umode(struct drm_display_mode *out, const struct drm_mode_modeinfo *in); void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); +bool drm_mode_is_420_only(const struct drm_display_info *display, + const struct drm_display_mode *mode); +bool drm_mode_is_420_also(const struct drm_display_info *display, + const struct drm_display_mode *mode); +bool drm_mode_is_420(const struct drm_display_info *display, + const struct drm_display_mode *mode); struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, From e65d51126f89a0d67ee6c5df58363730b1410ab5 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 27 Jun 2017 12:58:32 -0700 Subject: [PATCH 0364/1795] drm/vc4: Fix DSI T_INIT timing. The DPHY spec requires a much larger T_INIT than I was specifying before. In the absence of clear specs from the slave of what their timing is, just use the value that the firmware was using. Signed-off-by: Eric Anholt Link: http://patchwork.freedesktop.org/patch/msgid/20170627195839.3338-2-eric@anholt.net Reviewed-by: Andrzej Hajda --- drivers/gpu/drm/vc4/vc4_dsi.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 5e8b81eaa168..15f6d5005ab9 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -1035,7 +1035,17 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) DSI_HS_DLT4_TRAIL) | VC4_SET_FIELD(0, DSI_HS_DLT4_ANLAT)); - DSI_PORT_WRITE(HS_DLT5, VC4_SET_FIELD(dsi_hs_timing(ui_ns, 1000, 5000), + /* T_INIT is how long STOP is driven after power-up to + * indicate to the slave (also coming out of power-up) that + * master init is complete, and should be greater than the + * maximum of two value: T_INIT,MASTER and T_INIT,SLAVE. The + * D-PHY spec gives a minimum 100us for T_INIT,MASTER and + * T_INIT,SLAVE, while allowing protocols on top of it to give + * greater minimums. The vc4 firmware uses an extremely + * conservative 5ms, and we maintain that here. + */ + DSI_PORT_WRITE(HS_DLT5, VC4_SET_FIELD(dsi_hs_timing(ui_ns, + 5 * 1000 * 1000, 0), DSI_HS_DLT5_INIT)); DSI_PORT_WRITE(HS_DLT6, From ec878c0756a0c202e86256dca1de307ab1189ab8 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 27 Jun 2017 12:58:33 -0700 Subject: [PATCH 0365/1795] drm/vc4: Fix misleading name of the continuous flag. The logic was all right in the end, the name was just backwards. Signed-off-by: Eric Anholt Link: http://patchwork.freedesktop.org/patch/msgid/20170627195839.3338-3-eric@anholt.net Reviewed-by: Andrzej Hajda --- drivers/gpu/drm/vc4/vc4_dsi.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 15f6d5005ab9..629d372633e6 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -736,18 +736,18 @@ static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch) /* Enters or exits Ultra Low Power State. */ static void vc4_dsi_ulps(struct vc4_dsi *dsi, bool ulps) { - bool continuous = dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS; - u32 phyc_ulps = ((continuous ? DSI_PORT_BIT(PHYC_CLANE_ULPS) : 0) | + bool non_continuous = dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS; + u32 phyc_ulps = ((non_continuous ? DSI_PORT_BIT(PHYC_CLANE_ULPS) : 0) | DSI_PHYC_DLANE0_ULPS | (dsi->lanes > 1 ? DSI_PHYC_DLANE1_ULPS : 0) | (dsi->lanes > 2 ? DSI_PHYC_DLANE2_ULPS : 0) | (dsi->lanes > 3 ? DSI_PHYC_DLANE3_ULPS : 0)); - u32 stat_ulps = ((continuous ? DSI1_STAT_PHY_CLOCK_ULPS : 0) | + u32 stat_ulps = ((non_continuous ? DSI1_STAT_PHY_CLOCK_ULPS : 0) | DSI1_STAT_PHY_D0_ULPS | (dsi->lanes > 1 ? DSI1_STAT_PHY_D1_ULPS : 0) | (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_ULPS : 0) | (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_ULPS : 0)); - u32 stat_stop = ((continuous ? DSI1_STAT_PHY_CLOCK_STOP : 0) | + u32 stat_stop = ((non_continuous ? DSI1_STAT_PHY_CLOCK_STOP : 0) | DSI1_STAT_PHY_D0_STOP | (dsi->lanes > 1 ? DSI1_STAT_PHY_D1_STOP : 0) | (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_STOP : 0) | From 934d1431929bb5ecde68fb38fe4b2a6f4ab0fba6 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 3 Jul 2017 21:35:20 +0530 Subject: [PATCH 0366/1795] drm/sun4i: constify drm_plane_helper_funcs drm_plane_helper_funcs are not supposed to change at runtime. All functions working with drm_plane_helper_funcs provided by work with const drm_plane_helper_funcs. So mark the non-const structs as const. File size before: text data bss dec hex filename 981 40 0 1021 3fd drivers/gpu/drm/sun4i/sun4i_layer.o File size After adding 'const': text data bss dec hex filename 1021 0 0 1021 3fd drivers/gpu/drm/sun4i/sun4i_layer.o Signed-off-by: Arvind Yadav Signed-off-by: Maxime Ripard --- drivers/gpu/drm/sun4i/sun4i_layer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c index ead4f9d4c1ee..6a887ac28aae 100644 --- a/drivers/gpu/drm/sun4i/sun4i_layer.c +++ b/drivers/gpu/drm/sun4i/sun4i_layer.c @@ -52,7 +52,7 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane, sun4i_backend_layer_enable(backend, layer->id, true); } -static struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = { +static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = { .atomic_check = sun4i_backend_layer_atomic_check, .atomic_disable = sun4i_backend_layer_atomic_disable, .atomic_update = sun4i_backend_layer_atomic_update, From f0a3dd33ba685bc50f78455aec832ebcc129a687 Mon Sep 17 00:00:00 2001 From: Jonathan Liu Date: Sun, 2 Jul 2017 17:27:10 +1000 Subject: [PATCH 0367/1795] drm/sun4i: hdmi: Implement I2C adapter for A10s DDC bus The documentation for drm_do_get_edid in drivers/gpu/drm/drm_edid.c states: "As in the general case the DDC bus is accessible by the kernel at the I2C level, drivers must make all reasonable efforts to expose it as an I2C adapter and use drm_get_edid() instead of abusing this function." Exposing the DDC bus as an I2C adapter is more beneficial as it can be used for purposes other than reading the EDID such as modifying the EDID or using the HDMI DDC pins as an I2C bus through the I2C dev interface from userspace (e.g. i2c-tools). Implement this for A10s. Signed-off-by: Jonathan Liu Signed-off-by: Maxime Ripard --- drivers/gpu/drm/sun4i/Makefile | 1 + drivers/gpu/drm/sun4i/sun4i_hdmi.h | 24 +++ drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c | 101 ++---------- drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c | 220 +++++++++++++++++++++++++ 4 files changed, 256 insertions(+), 90 deletions(-) create mode 100644 drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile index e29fd3a2ba9c..43c753cafc88 100644 --- a/drivers/gpu/drm/sun4i/Makefile +++ b/drivers/gpu/drm/sun4i/Makefile @@ -2,6 +2,7 @@ sun4i-drm-y += sun4i_drv.o sun4i-drm-y += sun4i_framebuffer.o sun4i-drm-hdmi-y += sun4i_hdmi_enc.o +sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h index 2f2f2ff1ea63..0957ff2076ac 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h @@ -96,6 +96,7 @@ #define SUN4I_HDMI_DDC_CTRL_ENABLE BIT(31) #define SUN4I_HDMI_DDC_CTRL_START_CMD BIT(30) #define SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK BIT(8) +#define SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE (1 << 8) #define SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ (0 << 8) #define SUN4I_HDMI_DDC_CTRL_RESET BIT(0) @@ -105,14 +106,34 @@ #define SUN4I_HDMI_DDC_ADDR_OFFSET(off) (((off) & 0xff) << 8) #define SUN4I_HDMI_DDC_ADDR_SLAVE(addr) ((addr) & 0xff) +#define SUN4I_HDMI_DDC_INT_STATUS_REG 0x50c +#define SUN4I_HDMI_DDC_INT_STATUS_ILLEGAL_FIFO_OPERATION BIT(7) +#define SUN4I_HDMI_DDC_INT_STATUS_DDC_RX_FIFO_UNDERFLOW BIT(6) +#define SUN4I_HDMI_DDC_INT_STATUS_DDC_TX_FIFO_OVERFLOW BIT(5) +#define SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST BIT(4) +#define SUN4I_HDMI_DDC_INT_STATUS_ARBITRATION_ERROR BIT(3) +#define SUN4I_HDMI_DDC_INT_STATUS_ACK_ERROR BIT(2) +#define SUN4I_HDMI_DDC_INT_STATUS_BUS_ERROR BIT(1) +#define SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE BIT(0) + #define SUN4I_HDMI_DDC_FIFO_CTRL_REG 0x510 #define SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR BIT(31) +#define SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES(n) (((n) & 0xf) << 4) +#define SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MASK GENMASK(7, 4) +#define SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MAX (BIT(4) - 1) +#define SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES(n) ((n) & 0xf) +#define SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MASK GENMASK(3, 0) +#define SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MAX (BIT(4) - 1) #define SUN4I_HDMI_DDC_FIFO_DATA_REG 0x518 + #define SUN4I_HDMI_DDC_BYTE_COUNT_REG 0x51c +#define SUN4I_HDMI_DDC_BYTE_COUNT_MAX (BIT(10) - 1) #define SUN4I_HDMI_DDC_CMD_REG 0x520 #define SUN4I_HDMI_DDC_CMD_EXPLICIT_EDDC_READ 6 +#define SUN4I_HDMI_DDC_CMD_IMPLICIT_READ 5 +#define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3 #define SUN4I_HDMI_DDC_CLK_REG 0x528 #define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3) @@ -146,6 +167,8 @@ struct sun4i_hdmi { struct clk *ddc_clk; struct clk *tmds_clk; + struct i2c_adapter *i2c; + struct sun4i_drv *drv; bool hdmi_monitor; @@ -153,5 +176,6 @@ struct sun4i_hdmi { int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *clk); int sun4i_tmds_create(struct sun4i_hdmi *hdmi); +int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi); #endif /* _SUN4I_HDMI_H_ */ diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index d3398f6250ef..b74607feb35c 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -29,8 +29,6 @@ #include "sun4i_hdmi.h" #include "sun4i_tcon.h" -#define DDC_SEGMENT_ADDR 0x30 - static inline struct sun4i_hdmi * drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder) { @@ -184,93 +182,13 @@ static const struct drm_encoder_funcs sun4i_hdmi_funcs = { .destroy = drm_encoder_cleanup, }; -static int sun4i_hdmi_read_sub_block(struct sun4i_hdmi *hdmi, - unsigned int blk, unsigned int offset, - u8 *buf, unsigned int count) -{ - unsigned long reg; - int i; - - reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); - reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK; - writel(reg | SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ, - hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); - - writel(SUN4I_HDMI_DDC_ADDR_SEGMENT(offset >> 8) | - SUN4I_HDMI_DDC_ADDR_EDDC(DDC_SEGMENT_ADDR << 1) | - SUN4I_HDMI_DDC_ADDR_OFFSET(offset) | - SUN4I_HDMI_DDC_ADDR_SLAVE(DDC_ADDR), - hdmi->base + SUN4I_HDMI_DDC_ADDR_REG); - - reg = readl(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG); - writel(reg | SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR, - hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG); - - writel(count, hdmi->base + SUN4I_HDMI_DDC_BYTE_COUNT_REG); - writel(SUN4I_HDMI_DDC_CMD_EXPLICIT_EDDC_READ, - hdmi->base + SUN4I_HDMI_DDC_CMD_REG); - - reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); - writel(reg | SUN4I_HDMI_DDC_CTRL_START_CMD, - hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); - - if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg, - !(reg & SUN4I_HDMI_DDC_CTRL_START_CMD), - 100, 100000)) - return -EIO; - - for (i = 0; i < count; i++) - buf[i] = readb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG); - - return 0; -} - -static int sun4i_hdmi_read_edid_block(void *data, u8 *buf, unsigned int blk, - size_t length) -{ - struct sun4i_hdmi *hdmi = data; - int retry = 2, i; - - do { - for (i = 0; i < length; i += SUN4I_HDMI_DDC_FIFO_SIZE) { - unsigned char offset = blk * EDID_LENGTH + i; - unsigned int count = min((unsigned int)SUN4I_HDMI_DDC_FIFO_SIZE, - length - i); - int ret; - - ret = sun4i_hdmi_read_sub_block(hdmi, blk, offset, - buf + i, count); - if (ret) - return ret; - } - } while (!drm_edid_block_valid(buf, blk, true, NULL) && (retry--)); - - return 0; -} - static int sun4i_hdmi_get_modes(struct drm_connector *connector) { struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector); - unsigned long reg; struct edid *edid; int ret; - /* Reset i2c controller */ - writel(SUN4I_HDMI_DDC_CTRL_ENABLE | SUN4I_HDMI_DDC_CTRL_RESET, - hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); - if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg, - !(reg & SUN4I_HDMI_DDC_CTRL_RESET), - 100, 2000)) - return -EIO; - - writel(SUN4I_HDMI_DDC_LINE_CTRL_SDA_ENABLE | - SUN4I_HDMI_DDC_LINE_CTRL_SCL_ENABLE, - hdmi->base + SUN4I_HDMI_DDC_LINE_CTRL_REG); - - clk_prepare_enable(hdmi->ddc_clk); - clk_set_rate(hdmi->ddc_clk, 100000); - - edid = drm_do_get_edid(connector, sun4i_hdmi_read_edid_block, hdmi); + edid = drm_get_edid(connector, hdmi->i2c); if (!edid) return 0; @@ -282,8 +200,6 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector) ret = drm_add_edid_modes(connector, edid); kfree(edid); - clk_disable_unprepare(hdmi->ddc_clk); - return ret; } @@ -407,9 +323,9 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, SUN4I_HDMI_PLL_CTRL_PLL_EN; writel(reg, hdmi->base + SUN4I_HDMI_PLL_CTRL_REG); - ret = sun4i_ddc_create(hdmi, hdmi->tmds_clk); + ret = sun4i_hdmi_i2c_create(dev, hdmi); if (ret) { - dev_err(dev, "Couldn't create the DDC clock\n"); + dev_err(dev, "Couldn't create the HDMI I2C adapter\n"); return ret; } @@ -422,13 +338,15 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, NULL); if (ret) { dev_err(dev, "Couldn't initialise the HDMI encoder\n"); - return ret; + goto err_del_i2c_adapter; } hdmi->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); - if (!hdmi->encoder.possible_crtcs) - return -EPROBE_DEFER; + if (!hdmi->encoder.possible_crtcs) { + ret = -EPROBE_DEFER; + goto err_del_i2c_adapter; + } drm_connector_helper_add(&hdmi->connector, &sun4i_hdmi_connector_helper_funcs); @@ -451,6 +369,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, err_cleanup_connector: drm_encoder_cleanup(&hdmi->encoder); +err_del_i2c_adapter: + i2c_del_adapter(hdmi->i2c); return ret; } @@ -461,6 +381,7 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master, drm_connector_cleanup(&hdmi->connector); drm_encoder_cleanup(&hdmi->encoder); + i2c_del_adapter(hdmi->i2c); } static const struct component_ops sun4i_hdmi_ops = { diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c new file mode 100644 index 000000000000..2e42d09ab42e --- /dev/null +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2016 Maxime Ripard + * Copyright (C) 2017 Jonathan Liu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include +#include +#include + +#include "sun4i_hdmi.h" + +#define SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK ( \ + SUN4I_HDMI_DDC_INT_STATUS_ILLEGAL_FIFO_OPERATION | \ + SUN4I_HDMI_DDC_INT_STATUS_DDC_RX_FIFO_UNDERFLOW | \ + SUN4I_HDMI_DDC_INT_STATUS_DDC_TX_FIFO_OVERFLOW | \ + SUN4I_HDMI_DDC_INT_STATUS_ARBITRATION_ERROR | \ + SUN4I_HDMI_DDC_INT_STATUS_ACK_ERROR | \ + SUN4I_HDMI_DDC_INT_STATUS_BUS_ERROR \ +) + +/* FIFO request bit is set when FIFO level is above RX_THRESHOLD during read */ +#define RX_THRESHOLD SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MAX +/* FIFO request bit is set when FIFO level is below TX_THRESHOLD during write */ +#define TX_THRESHOLD 1 + +static int fifo_transfer(struct sun4i_hdmi *hdmi, u8 *buf, int len, bool read) +{ + /* + * 1 byte takes 9 clock cycles (8 bits + 1 ACK) = 90 us for 100 kHz + * clock. As clock rate is fixed, just round it up to 100 us. + */ + const unsigned long byte_time_ns = 100; + const u32 mask = SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK | + SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST | + SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE; + u32 reg; + + /* Limit transfer length by FIFO threshold */ + len = min_t(int, len, read ? (RX_THRESHOLD + 1) : + (SUN4I_HDMI_DDC_FIFO_SIZE - TX_THRESHOLD + 1)); + + /* Wait until error, FIFO request bit set or transfer complete */ + if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG, reg, + reg & mask, len * byte_time_ns, 100000)) + return -ETIMEDOUT; + + if (reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK) + return -EIO; + + if (read) + readsb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len); + else + writesb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len); + + /* Clear FIFO request bit */ + writel(SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST, + hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG); + + return len; +} + +static int xfer_msg(struct sun4i_hdmi *hdmi, struct i2c_msg *msg) +{ + int i, len; + u32 reg; + + /* Set FIFO direction */ + reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); + reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK; + reg |= (msg->flags & I2C_M_RD) ? + SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ : + SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE; + writel(reg, hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); + + /* Set I2C address */ + writel(SUN4I_HDMI_DDC_ADDR_SLAVE(msg->addr), + hdmi->base + SUN4I_HDMI_DDC_ADDR_REG); + + /* Set FIFO RX/TX thresholds and clear FIFO */ + reg = readl(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG); + reg |= SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR; + reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MASK; + reg |= SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES(RX_THRESHOLD); + reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MASK; + reg |= SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES(TX_THRESHOLD); + writel(reg, hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG); + if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG, + reg, + !(reg & SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR), + 100, 2000)) + return -EIO; + + /* Set transfer length */ + writel(msg->len, hdmi->base + SUN4I_HDMI_DDC_BYTE_COUNT_REG); + + /* Set command */ + writel(msg->flags & I2C_M_RD ? + SUN4I_HDMI_DDC_CMD_IMPLICIT_READ : + SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE, + hdmi->base + SUN4I_HDMI_DDC_CMD_REG); + + /* Clear interrupt status bits */ + writel(SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK | + SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST | + SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE, + hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG); + + /* Start command */ + reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); + writel(reg | SUN4I_HDMI_DDC_CTRL_START_CMD, + hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); + + /* Transfer bytes */ + for (i = 0; i < msg->len; i += len) { + len = fifo_transfer(hdmi, msg->buf + i, msg->len - i, + msg->flags & I2C_M_RD); + if (len <= 0) + return len; + } + + /* Wait for command to finish */ + if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, + reg, + !(reg & SUN4I_HDMI_DDC_CTRL_START_CMD), + 100, 100000)) + return -EIO; + + /* Check for errors */ + reg = readl(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG); + if ((reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK) || + !(reg & SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE)) { + return -EIO; + } + + return 0; +} + +static int sun4i_hdmi_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct sun4i_hdmi *hdmi = i2c_get_adapdata(adap); + u32 reg; + int err, i, ret = num; + + for (i = 0; i < num; i++) { + if (!msgs[i].len) + return -EINVAL; + if (msgs[i].len > SUN4I_HDMI_DDC_BYTE_COUNT_MAX) + return -EINVAL; + } + + /* Reset I2C controller */ + writel(SUN4I_HDMI_DDC_CTRL_ENABLE | SUN4I_HDMI_DDC_CTRL_RESET, + hdmi->base + SUN4I_HDMI_DDC_CTRL_REG); + if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg, + !(reg & SUN4I_HDMI_DDC_CTRL_RESET), + 100, 2000)) + return -EIO; + + writel(SUN4I_HDMI_DDC_LINE_CTRL_SDA_ENABLE | + SUN4I_HDMI_DDC_LINE_CTRL_SCL_ENABLE, + hdmi->base + SUN4I_HDMI_DDC_LINE_CTRL_REG); + + clk_prepare_enable(hdmi->ddc_clk); + clk_set_rate(hdmi->ddc_clk, 100000); + + for (i = 0; i < num; i++) { + err = xfer_msg(hdmi, &msgs[i]); + if (err) { + ret = err; + break; + } + } + + clk_disable_unprepare(hdmi->ddc_clk); + return ret; +} + +static u32 sun4i_hdmi_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm sun4i_hdmi_i2c_algorithm = { + .master_xfer = sun4i_hdmi_i2c_xfer, + .functionality = sun4i_hdmi_i2c_func, +}; + +int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi) +{ + struct i2c_adapter *adap; + int ret = 0; + + ret = sun4i_ddc_create(hdmi, hdmi->tmds_clk); + if (ret) + return ret; + + adap = devm_kzalloc(dev, sizeof(*adap), GFP_KERNEL); + if (!adap) + return -ENOMEM; + + adap->owner = THIS_MODULE; + adap->class = I2C_CLASS_DDC; + adap->algo = &sun4i_hdmi_i2c_algorithm; + strlcpy(adap->name, "sun4i_hdmi_i2c adapter", sizeof(adap->name)); + i2c_set_adapdata(adap, hdmi); + + ret = i2c_add_adapter(adap); + if (ret) + return ret; + + hdmi->i2c = adap; + + return ret; +} From 2a596fc9d974bb040eda9ab70bf8756fcaaa6afe Mon Sep 17 00:00:00 2001 From: Jonathan Liu Date: Mon, 10 Jul 2017 16:55:04 +1000 Subject: [PATCH 0368/1795] drm/sun4i: Implement drm_driver lastclose to restore fbdev console The drm_driver lastclose callback is called when the last userspace DRM client has closed. Call drm_fbdev_cma_restore_mode to restore the fbdev console otherwise the fbdev console will stop working. Fixes: 9026e0d122ac ("drm: Add Allwinner A10 Display Engine support") Cc: stable@vger.kernel.org Tested-by: Olliver Schinagl Reviewed-by: Chen-Yu Tsai Signed-off-by: Jonathan Liu Signed-off-by: Maxime Ripard --- drivers/gpu/drm/sun4i/sun4i_drv.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index abc7d8fe06b4..a45a627283a1 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -25,12 +25,20 @@ #include "sun4i_framebuffer.h" #include "sun4i_tcon.h" +static void sun4i_drv_lastclose(struct drm_device *dev) +{ + struct sun4i_drv *drv = dev->dev_private; + + drm_fbdev_cma_restore_mode(drv->fbdev); +} + DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); static struct drm_driver sun4i_drv_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, /* Generic Operations */ + .lastclose = sun4i_drv_lastclose, .fops = &sun4i_drv_fops, .name = "sun4i-drm", .desc = "Allwinner sun4i Display Engine", From 58947144af34a08eee3388c1f039f199e80c0c6f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 17 Jul 2017 09:06:19 +0200 Subject: [PATCH 0369/1795] drm/i915: Update DRIVER_DATE to 20170717 Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b4716ce32ca2..559fdc7bb393 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -80,8 +80,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20170703" -#define DRIVER_TIMESTAMP 1499064041 +#define DRIVER_DATE "20170717" +#define DRIVER_TIMESTAMP 1500275179 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions From 58dff39904c02199b80395dac2fa3dec0d8f3861 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Fri, 19 May 2017 17:47:33 +0200 Subject: [PATCH 0370/1795] drm/imx: ipuv3-plane: use fb local variable instead of state->fb We already have a local variable assigned to state->fb, use it. Signed-off-by: Philipp Zabel --- drivers/gpu/drm/imx/ipuv3-plane.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 49546222c6d3..553dc9926e49 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -525,8 +525,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, drm_rect_width(&state->src) >> 16, drm_rect_height(&state->src) >> 16, - state->fb->pitches[0], - state->fb->format->format, &eba); + fb->pitches[0], + fb->format->format, &eba); } if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) { @@ -553,11 +553,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); break; case IPU_DP_FLOW_SYNC_FG: - ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format); + ics = ipu_drm_fourcc_to_colorspace(fb->format->format); ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_UNKNOWN); /* Enable local alpha on partial plane */ - switch (state->fb->format->format) { + switch (fb->format->format) { case DRM_FORMAT_ARGB1555: case DRM_FORMAT_ABGR1555: case DRM_FORMAT_RGBA5551: @@ -587,10 +587,10 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_cpmem_set_resolution(ipu_plane->ipu_ch, drm_rect_width(&state->src) >> 16, drm_rect_height(&state->src) >> 16); - ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->format->format); + ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->format->format); ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); - ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]); + ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]); ipu_cpmem_set_axi_id(ipu_plane->ipu_ch, axi_id); switch (fb->format->format) { case DRM_FORMAT_YUV420: @@ -644,8 +644,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8); ipu_cpmem_set_high_priority(ipu_plane->alpha_ch); ipu_idmac_set_double_buffer(ipu_plane->alpha_ch, 1); - ipu_cpmem_set_stride(ipu_plane->alpha_ch, - state->fb->pitches[1]); + ipu_cpmem_set_stride(ipu_plane->alpha_ch, fb->pitches[1]); ipu_cpmem_set_burstsize(ipu_plane->alpha_ch, 16); ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 0, alpha_eba); ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 1, alpha_eba); From 790cb4c7c9545953d22d3d425e49b36a711bae5b Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Fri, 19 May 2017 16:05:51 +0200 Subject: [PATCH 0371/1795] drm/imx: lock scanout transfers for consecutive bursts Because of its shallow queues and limited reordering ability, the i.MX6Q memory controller likes AXI bursts of consecutive addresses a lot. To optimize memory access performance, lock the IPU scanout channels for a number of burst accesses each, before switching to the next channel. The burst size and length of a locked burst chain are chosen not to overshoot the stride. Enabling the 8-burst channel lock on a single 1920x1080@60Hz RGBx scanout (474 MiB/s of 64-byte IPU memory read accesses) reduces the reported memory controller busy cycles from 46% to below 28% on an otherwise idle i.MX6Q. Tested-by: Lucas Stach Signed-off-by: Philipp Zabel --- drivers/gpu/drm/imx/ipuv3-plane.c | 38 ++++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 553dc9926e49..0be08c654a7a 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -496,6 +496,27 @@ static int ipu_chan_assign_axi_id(int ipu_chan) } } +static void ipu_calculate_bursts(u32 width, u32 cpp, u32 stride, + u8 *burstsize, u8 *num_bursts) +{ + const unsigned int width_bytes = width * cpp; + unsigned int npb, bursts; + + /* Maximum number of pixels per burst without overshooting stride */ + for (npb = 64 / cpp; npb > 0; --npb) { + if (round_up(width_bytes, npb * cpp) <= stride) + break; + } + *burstsize = npb; + + /* Maximum number of consecutive bursts without overshooting stride */ + for (bursts = 8; bursts > 1; bursts /= 2) { + if (round_up(width_bytes, npb * cpp * bursts) <= stride) + break; + } + *num_bursts = bursts; +} + static void ipu_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -509,6 +530,9 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, unsigned long alpha_eba = 0; enum ipu_color_space ics; unsigned int axi_id = 0; + const struct drm_format_info *info; + u8 burstsize, num_bursts; + u32 width, height; int active; if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_FG) @@ -583,15 +607,21 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst)); + width = drm_rect_width(&state->src) >> 16; + height = drm_rect_height(&state->src) >> 16; + info = drm_format_info(fb->format->format); + ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0], + &burstsize, &num_bursts); + ipu_cpmem_zero(ipu_plane->ipu_ch); - ipu_cpmem_set_resolution(ipu_plane->ipu_ch, - drm_rect_width(&state->src) >> 16, - drm_rect_height(&state->src) >> 16); + ipu_cpmem_set_resolution(ipu_plane->ipu_ch, width, height); ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->format->format); + ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, burstsize); ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]); ipu_cpmem_set_axi_id(ipu_plane->ipu_ch, axi_id); + switch (fb->format->format) { case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: @@ -631,6 +661,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, case DRM_FORMAT_RGBX8888_A8: case DRM_FORMAT_BGRX8888_A8: alpha_eba = drm_plane_state_to_eba(state, 1); + num_bursts = 0; dev_dbg(ipu_plane->base.dev->dev, "phys = %lu %lu, x = %d, y = %d", eba, alpha_eba, state->src.x1 >> 16, state->src.y1 >> 16); @@ -656,6 +687,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, } ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba); ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba); + ipu_idmac_lock_enable(ipu_plane->ipu_ch, num_bursts); ipu_plane_enable(ipu_plane); } From 635f56c342cd195a8059f24296fe7fd795aaa33d Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 14 Jul 2017 18:12:41 +0300 Subject: [PATCH 0372/1795] drm/i915: Fix error checking/locking in perf/lookup_context() 1acfc104cdf8 missed to convert this one caller to be lockless. The side effect of that was that the error check in lookup_context() became incorrect. Convert now this caller too. Fixes: 1acfc104cdf ("drm/i915: Enable rcu-only context lookups") Cc: Chris Wilson Cc: Joonas Lahtinen Reviewed-by: Chris Wilson Reviewed-by: Lionel Landwerlin Signed-off-by: Imre Deak Link: http://patchwork.freedesktop.org/patch/msgid/20170714151242.517-1-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_perf.c | 32 +++++--------------------------- 1 file changed, 5 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index d9f77a4d85db..96682fd86f82 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2483,27 +2483,6 @@ static const struct file_operations fops = { }; -static struct i915_gem_context * -lookup_context(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *file_priv, - u32 ctx_user_handle) -{ - struct i915_gem_context *ctx; - int ret; - - ret = i915_mutex_lock_interruptible(&dev_priv->drm); - if (ret) - return ERR_PTR(ret); - - ctx = i915_gem_context_lookup(file_priv, ctx_user_handle); - if (!IS_ERR(ctx)) - i915_gem_context_get(ctx); - - mutex_unlock(&dev_priv->drm.struct_mutex); - - return ctx; -} - /** * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD * @dev_priv: i915 device instance @@ -2545,12 +2524,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, u32 ctx_handle = props->ctx_handle; struct drm_i915_file_private *file_priv = file->driver_priv; - specific_ctx = lookup_context(dev_priv, file_priv, ctx_handle); - if (IS_ERR(specific_ctx)) { - ret = PTR_ERR(specific_ctx); - if (ret != -EINTR) - DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", - ctx_handle); + specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); + if (!specific_ctx) { + DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", + ctx_handle); + ret = -ENOENT; goto err; } } From edd9003f7f9dddd28fdd768e6e7569d996c769cb Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 14 Jul 2017 18:12:42 +0300 Subject: [PATCH 0373/1795] drm/i915: Fix user ptr check size in eb_relocate_vma() Fix the sizeof(ptr) vs. sizeof(*ptr) typo. Fixes: 2889caa92321 ("drm/i915: Eliminate lots of iterations over the execobjects array") Cc: Chris Wilson Cc: Joonas Lahtinen Signed-off-by: Imre Deak Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20170714151242.517-2-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 929f275e67aa..fe3e0d40034c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1459,7 +1459,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) * to read. However, if the array is not writable the user loses * the updated relocation values. */ - if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs)))) + if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs)))) return -EFAULT; do { From eb42ea6d0b8ed9ca8e73cc24fa801f0d8ab28905 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 21 Jun 2017 13:00:07 +0200 Subject: [PATCH 0374/1795] drm/crc: Handle opening and closing crc better When I was doing a grep . -r /sys/kernel/debug/dri/0 I noticed a WARN appearing when I aborted the grep with ^C. After investigating I've also noticed that the error handling was lacking and there are race conditions involving multiple calls to open/close simultaneously. Fix this by setting the opened flag first and using crc->entries to decide when crc can be collected. Also call unset crc source before cleaning up, this way there is no race with a future open(). This patch has been tested with all the tests in igt with CRC in their name. Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/20170621110007.11674-1-maarten.lankhorst@linux.intel.com Reviewed-by: Tomeu Vizoso [mlankhorst: Add description that this patch has been tested with IGT, based on tomeu's feedback] --- drivers/gpu/drm/drm_debugfs_crc.c | 48 ++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index 1722d8f21449..d0ea4627a093 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -136,20 +136,37 @@ static int crtc_crc_data_count(struct drm_crtc_crc *crc) return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR); } +static void crtc_crc_cleanup(struct drm_crtc_crc *crc) +{ + kfree(crc->entries); + crc->entries = NULL; + crc->head = 0; + crc->tail = 0; + crc->values_cnt = 0; + crc->opened = false; +} + static int crtc_crc_open(struct inode *inode, struct file *filep) { struct drm_crtc *crtc = inode->i_private; struct drm_crtc_crc *crc = &crtc->crc; struct drm_crtc_crc_entry *entries = NULL; size_t values_cnt; - int ret; + int ret = 0; - if (crc->opened) - return -EBUSY; + spin_lock_irq(&crc->lock); + if (!crc->opened) + crc->opened = true; + else + ret = -EBUSY; + spin_unlock_irq(&crc->lock); + + if (ret) + return ret; ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt); if (ret) - return ret; + goto err; if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) { ret = -EINVAL; @@ -170,7 +187,6 @@ static int crtc_crc_open(struct inode *inode, struct file *filep) spin_lock_irq(&crc->lock); crc->entries = entries; crc->values_cnt = values_cnt; - crc->opened = true; /* * Only return once we got a first frame, so userspace doesn't have to @@ -182,12 +198,17 @@ static int crtc_crc_open(struct inode *inode, struct file *filep) crc->lock); spin_unlock_irq(&crc->lock); - WARN_ON(ret); + if (ret) + goto err_disable; return 0; err_disable: crtc->funcs->set_crc_source(crtc, NULL, &values_cnt); +err: + spin_lock_irq(&crc->lock); + crtc_crc_cleanup(crc); + spin_unlock_irq(&crc->lock); return ret; } @@ -197,17 +218,12 @@ static int crtc_crc_release(struct inode *inode, struct file *filep) struct drm_crtc_crc *crc = &crtc->crc; size_t values_cnt; - spin_lock_irq(&crc->lock); - kfree(crc->entries); - crc->entries = NULL; - crc->head = 0; - crc->tail = 0; - crc->values_cnt = 0; - crc->opened = false; - spin_unlock_irq(&crc->lock); - crtc->funcs->set_crc_source(crtc, NULL, &values_cnt); + spin_lock_irq(&crc->lock); + crtc_crc_cleanup(crc); + spin_unlock_irq(&crc->lock); + return 0; } @@ -334,7 +350,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, spin_lock(&crc->lock); /* Caller may not have noticed yet that userspace has stopped reading */ - if (!crc->opened) { + if (!crc->entries) { spin_unlock(&crc->lock); return -EINVAL; } From 8038e09be5a3ac061118bd80c7a505829920b50f Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 6 Jul 2017 15:03:15 +0200 Subject: [PATCH 0375/1795] drm/crc: Only open CRC on atomic drivers when the CRTC is active. Commit e8fa5671183c ("drm: crc: Wait for a frame before returning from open()") adds a wait for CRC frame, but with the CRTC off this will never be generated. For atomic drivers we know if a CRTC is active through crtc_state->active, so when inactive reject the open with -EIO. Just like with the previous patch changing debugfs opening semantics, this patch has been tested against igt. Signed-off-by: Maarten Lankhorst Fixes: e8fa5671183c ("drm: crc: Wait for a frame before returning from open()") Testcase: debugfs_test.read_all_entries Link: http://patchwork.freedesktop.org/patch/msgid/15f9d300-65d3-63aa-00e3-e83f5e4d5a7a@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/drm_debugfs_crc.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index d0ea4627a093..f9e26dda56d6 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -154,6 +154,19 @@ static int crtc_crc_open(struct inode *inode, struct file *filep) size_t values_cnt; int ret = 0; + if (drm_drv_uses_atomic_modeset(crtc->dev)) { + ret = drm_modeset_lock_interruptible(&crtc->mutex, NULL); + if (ret) + return ret; + + if (!crtc->state->active) + ret = -EIO; + drm_modeset_unlock(&crtc->mutex); + + if (ret) + return ret; + } + spin_lock_irq(&crc->lock); if (!crc->opened) crc->opened = true; From bc240eec4b074f5dc2753f295e980e66b72c90fb Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Mon, 26 Jun 2017 13:50:41 -0600 Subject: [PATCH 0376/1795] ntb: use correct mw_count function in ntb_tool and ntb_transport After converting to the new API, both ntb_tool and ntb_transport are using ntb_mw_count to iterate through ntb_peer_get_addr when they should be using ntb_peer_mw_count. This probably isn't an issue with the Intel and AMD drivers but this will matter for any future driver with asymetric memory window counts. Signed-off-by: Logan Gunthorpe Acked-by: Allen Hubbe Signed-off-by: Jon Mason Fixes: 443b9a14ecbe ("NTB: Alter MW API to support multi-ports devices") --- drivers/ntb/ntb_transport.c | 2 +- drivers/ntb/test/ntb_tool.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 9a03c5871efe..b29558ddfe95 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1059,7 +1059,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) int node; int rc, i; - mw_count = ntb_mw_count(ndev, PIDX); + mw_count = ntb_peer_mw_count(ndev); if (!ndev->ops->mw_set_trans) { dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c index f002bf48a08d..a69815c45ce6 100644 --- a/drivers/ntb/test/ntb_tool.c +++ b/drivers/ntb/test/ntb_tool.c @@ -959,7 +959,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb) tc->ntb = ntb; init_waitqueue_head(&tc->link_wq); - tc->mw_count = min(ntb_mw_count(tc->ntb, PIDX), MAX_MWS); + tc->mw_count = min(ntb_peer_mw_count(tc->ntb), MAX_MWS); for (i = 0; i < tc->mw_count; i++) { rc = tool_init_mw(tc, i); if (rc) From ad364f447e367a4b997cc75093600663caeedb68 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 14 Jul 2017 20:12:12 -0700 Subject: [PATCH 0377/1795] drm/vgem: add compat_ioctl support DRM drivers should supply a compat version if they're going to provide an ioctl implementation at all. This can confuse 32-bit user space on a 64-bit system. Signed-off-by: Brian Norris Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20170715031212.108695-1-briannorris@chromium.org --- drivers/gpu/drm/vgem/vgem_drv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index c938af8c40cf..12289673f457 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -278,6 +278,7 @@ static const struct file_operations vgem_driver_fops = { .poll = drm_poll, .read = drm_read, .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, .release = drm_release, }; From 212fa2f84dbf36682ee6491f203471a27997043e Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 14 Jul 2017 13:41:16 -0300 Subject: [PATCH 0378/1795] changes.rst: Update Sphinx minimal requirements The kfigure module doesn't work with Sphinx version 1.2. So, update the minimal requirements accordingly. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet --- Documentation/process/changes.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index adbb50ae5246..ceddf1d1e646 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -53,7 +53,7 @@ mcelog 0.6 mcelog --version iptables 1.4.2 iptables -V openssl & libcrypto 1.0.0 openssl version bc 1.06.95 bc --version -Sphinx\ [#f1]_ 1.2 sphinx-build --version +Sphinx\ [#f1]_ 1.3 sphinx-build --version ====================== =============== ======================================== .. [#f1] Sphinx is needed only to build the Kernel documentation @@ -310,8 +310,8 @@ Sphinx ------ The ReST markups currently used by the Documentation/ files are meant to be -built with ``Sphinx`` version 1.2 or upper. If you're desiring to build -PDF outputs, it is recommended to use version 1.4.6. +built with ``Sphinx`` version 1.3 or upper. If you're desiring to build +PDF outputs, it is recommended to use version 1.4.6 or upper. .. note:: From b8b07b5c8dfc1fdf2f93d7419e0cac1034abc937 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 14 Jul 2017 13:41:17 -0300 Subject: [PATCH 0379/1795] docs-rst: move Sphinx install instructions to sphinx.rst The toolchain used by Sphinx is somewhat complex, and installing it should be part of the doc-guide. Move it out of changes.rst. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet --- Documentation/doc-guide/sphinx.rst | 17 +++++++++++++++++ Documentation/process/changes.rst | 14 ++------------ 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst index 84e8e8a9cbdb..1ee109b19322 100644 --- a/Documentation/doc-guide/sphinx.rst +++ b/Documentation/doc-guide/sphinx.rst @@ -19,6 +19,23 @@ Finally, there are thousands of plain text documentation files scattered around ``Documentation``. Some of these will likely be converted to reStructuredText over time, but the bulk of them will remain in plain text. +.. _sphinx_install: + +Sphinx Install +============== + +The ReST markups currently used by the Documentation/ files are meant to be +built with ``Sphinx`` version 1.3 or upper. If you're desiring to build +PDF outputs, it is recommended to use version 1.4.6 or upper. + +.. note:: + + Please notice that, for PDF and LaTeX output, you'll also need ``XeLaTeX`` + version 3.14159265. Depending on the distribution, you may also need to + install a series of ``texlive`` packages that provide the minimal set of + functionalities required for ``XeLaTex`` to work. For PDF output you'll also + need ``convert(1)`` from ImageMagick (https://www.imagemagick.org). + Sphinx Build ============ diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index ceddf1d1e646..560beaef5a7c 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -309,18 +309,8 @@ Kernel documentation Sphinx ------ -The ReST markups currently used by the Documentation/ files are meant to be -built with ``Sphinx`` version 1.3 or upper. If you're desiring to build -PDF outputs, it is recommended to use version 1.4.6 or upper. - -.. note:: - - Please notice that, for PDF and LaTeX output, you'll also need ``XeLaTeX`` - version 3.14159265. Depending on the distribution, you may also need to - install a series of ``texlive`` packages that provide the minimal set of - functionalities required for ``XeLaTex`` to work. For PDF output you'll also - need ``convert(1)`` from ImageMagick (https://www.imagemagick.org). - +Please see :ref:`sphinx_install` in ``Documentation/doc-guide/sphinx.rst`` +for details about Sphinx requirements. Getting updated software ======================== From 58ef4a42dd42580945985c533d4d5c3d6628ef92 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 14 Jul 2017 13:41:18 -0300 Subject: [PATCH 0380/1795] sphinx.rst: explain the usage of virtual environment As the Sphinx build seems very fragile, specially for PDF output, add a notice about how to use it on a virtual environment. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet --- Documentation/doc-guide/sphinx.rst | 32 ++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst index 1ee109b19322..3278262164da 100644 --- a/Documentation/doc-guide/sphinx.rst +++ b/Documentation/doc-guide/sphinx.rst @@ -28,6 +28,38 @@ The ReST markups currently used by the Documentation/ files are meant to be built with ``Sphinx`` version 1.3 or upper. If you're desiring to build PDF outputs, it is recommended to use version 1.4.6 or upper. +Most distributions are shipped with Sphinx, but its toolchain is fragile, +and it is not uncommon that upgrading it or some other Python packages +on your machine would cause the documentation build to break. + +A way to get rid of that is to use a different version than the one shipped +on your distributions. In order to do that, it is recommended to install +Sphinx inside a virtual environment, using ``virtualenv-3`` +or ``virtualenv``, depending on how your distribution packaged Python 3. + +.. note:: + + #) Sphinx versions below 1.5 don't work properly with Python's + docutils version 0.13.1 or upper. So, if you're willing to use + those versions, you should run ``pip install 'docutils==0.12'``. + + #) It is recommended to use the RTD theme for html output. Depending + on the Sphinx version, it should be installed in separate, + with ``pip install sphinx_rtd_theme``. + +In summary, if you want to install Sphinx version 1.4.9, you should do:: + + $ virtualenv sphinx_1.4 + $ . sphinx_1.4/bin/activate + (sphinx_1.4) $ pip install 'docutils==0.12' + (sphinx_1.4) $ pip install 'Sphinx==1.4.9' + (sphinx_1.4) $ pip install sphinx_rtd_theme + +After running ``. sphinx_1.4/bin/activate``, the prompt will change, +in order to indicate that you're using the new environment. If you +open a new shell, you need to rerun this command to enter again at +the virtual environment before building the documentation. + .. note:: Please notice that, for PDF and LaTeX output, you'll also need ``XeLaTeX`` From 29fd35bd02568f27b463657218889276215b1662 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 14 Jul 2017 13:41:19 -0300 Subject: [PATCH 0381/1795] sphinx.rst: fix unknown reference There's no "Sphinx C Domain" reference at the Kernel documentation. So, don't use references for it. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet --- Documentation/doc-guide/sphinx.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst index 3278262164da..4c6cb454eaa0 100644 --- a/Documentation/doc-guide/sphinx.rst +++ b/Documentation/doc-guide/sphinx.rst @@ -167,7 +167,7 @@ Here are some specific guidelines for the kernel documentation: the C domain ------------ -The `Sphinx C Domain`_ (name c) is suited for documentation of C API. E.g. a +The **Sphinx C Domain** (name c) is suited for documentation of C API. E.g. a function prototype: .. code-block:: rst From d43e5ae9748f327c60014973e3e290a6f02577ea Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 14 Jul 2017 13:41:20 -0300 Subject: [PATCH 0382/1795] sphinx.rst: describe the install requirements for kfigure As we now have a document describing the install requirements for Sphinx, add there the need for GraphViz and ImageMagick. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet --- Documentation/doc-guide/sphinx.rst | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst index 4c6cb454eaa0..3a41483ed499 100644 --- a/Documentation/doc-guide/sphinx.rst +++ b/Documentation/doc-guide/sphinx.rst @@ -68,6 +68,18 @@ the virtual environment before building the documentation. functionalities required for ``XeLaTex`` to work. For PDF output you'll also need ``convert(1)`` from ImageMagick (https://www.imagemagick.org). +Image output +------------ + +The kernel documentation build system contains an extension that +handles images on both GraphViz and SVG formats (see +:ref:`sphinx_kfigure`). + +For it to work, you need to install both GraphViz and ImageMagick +packages. If those packages are not installed, the build system will +still build the documentation, but won't include any images at the +output. + Sphinx Build ============ @@ -278,6 +290,7 @@ Rendered as: - column 3 +.. _sphinx_kfigure: Figures & Images ================ From 6e322a17fbe574a1558abab10bb89205031ec921 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 14 Jul 2017 13:41:21 -0300 Subject: [PATCH 0383/1795] sphinx.rst: better organize the documentation about PDF build Instead of having it on just one note, add a separate section. This way, we could later improve it, providing a better guide about the needed steps for PDF builds. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jonathan Corbet --- Documentation/doc-guide/sphinx.rst | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst index 3a41483ed499..5521213efa68 100644 --- a/Documentation/doc-guide/sphinx.rst +++ b/Documentation/doc-guide/sphinx.rst @@ -60,14 +60,6 @@ in order to indicate that you're using the new environment. If you open a new shell, you need to rerun this command to enter again at the virtual environment before building the documentation. -.. note:: - - Please notice that, for PDF and LaTeX output, you'll also need ``XeLaTeX`` - version 3.14159265. Depending on the distribution, you may also need to - install a series of ``texlive`` packages that provide the minimal set of - functionalities required for ``XeLaTex`` to work. For PDF output you'll also - need ``convert(1)`` from ImageMagick (https://www.imagemagick.org). - Image output ------------ @@ -80,6 +72,19 @@ packages. If those packages are not installed, the build system will still build the documentation, but won't include any images at the output. +PDF and LaTeX builds +-------------------- + +Such builds are currently supported only with Sphinx versions 1.4 and 1.5. + +Currently, it is not possible to do pdf builds with Sphinx version 1.6. + +For PDF and LaTeX output, you'll also need ``XeLaTeX`` version 3.14159265. + +Depending on the distribution, you may also need to install a series of +``texlive`` packages that provide the minimal set of functionalities +required for ``XeLaTex`` to work. + Sphinx Build ============ From 7673f5b14f6b28416ff3f4ed28bf97db7dc21546 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 17 Jul 2017 15:39:28 +0200 Subject: [PATCH 0384/1795] Documentation: arm: Replace use of virt_to_phys with __pa_symbol All low-level PM/SMP code using virt_to_phys() should actually use __pa_symbol() against kernel symbols. Update the documentation to move away from virt_to_phys(). Cfr. commit 6996cbb2372189f7 ("ARM: 8641/1: treewide: Replace uses of virt_to_phys with __pa_symbol") Signed-off-by: Geert Uytterhoeven Signed-off-by: Jonathan Corbet --- Documentation/arm/firmware.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/arm/firmware.txt b/Documentation/arm/firmware.txt index da6713adac8a..7f175dbb427e 100644 --- a/Documentation/arm/firmware.txt +++ b/Documentation/arm/firmware.txt @@ -60,7 +60,7 @@ Example of using a firmware operation: /* some platform code, e.g. SMP initialization */ - __raw_writel(virt_to_phys(exynos4_secondary_startup), + __raw_writel(__pa_symbol(exynos4_secondary_startup), CPU1_BOOT_REG); /* Call Exynos specific smc call */ From 916f677adab9b0d4b0a5028ba5113a16ddf11192 Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Sat, 17 Jun 2017 10:17:20 +0200 Subject: [PATCH 0385/1795] docs RDT theme: fix bottom margin of lists items List items with two ore more blocks are not well rendered. E.g. the gap between last block (l1-b2) of the first list item and the following list item (L2) is to small:: * L1 xxxxxxxxxx xxxxxxxxxxxxx l1-b2 xxxxxxx xxxxxxxxxxxxx * L2 xxxxxxxxxx xxxxxxxxxxxxx So that it can be read more liquidly, a distance was added to the last block (l1-b2):: * L1 xxxxxxxxxx xxxxxxxxxxxxx l1-b2 xxxxxxx xxxxxxxxxxxxx * L2 xxxxxxxxxx xxxxxxxxxxxxx Signed-off-by: Markus Heiser Signed-off-by: Jonathan Corbet --- Documentation/sphinx-static/theme_overrides.css | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/sphinx-static/theme_overrides.css b/Documentation/sphinx-static/theme_overrides.css index d5764a4de5a2..1c9a9ab0f48c 100644 --- a/Documentation/sphinx-static/theme_overrides.css +++ b/Documentation/sphinx-static/theme_overrides.css @@ -56,6 +56,12 @@ font-family: "Courier New", Courier, monospace } + /* fix bottom margin of lists items */ + + .rst-content .section ul li:last-child, .rst-content .section ul li p:last-child { + margin-bottom: 12px; + } + /* inline literal: drop the borderbox, padding and red color */ code, .rst-content tt, .rst-content code { From 4c0477899f2e4a865db070b0526987ca31e525b1 Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Sat, 17 Jun 2017 10:17:21 +0200 Subject: [PATCH 0386/1795] docs RTD theme: code-block with line nos - lines and line numbers don't line up. In a code-block with line numbers (option :lineno:) there is a misalignment of the rendered source code lines on the right side and the line numbers on the left side. https://github.com/rtfd/sphinx_rtd_theme/issues/419 Since this issue is reported to the RTD theme project, it might be fixed in the future (take this as a interim solution). Signed-off-by: Markus Heiser Signed-off-by: Jonathan Corbet --- Documentation/sphinx-static/theme_overrides.css | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/Documentation/sphinx-static/theme_overrides.css b/Documentation/sphinx-static/theme_overrides.css index 1c9a9ab0f48c..522b6d4c49d4 100644 --- a/Documentation/sphinx-static/theme_overrides.css +++ b/Documentation/sphinx-static/theme_overrides.css @@ -4,6 +4,17 @@ * */ +/* Interim: Code-blocks with line nos - lines and line numbers don't line up. + * see: https://github.com/rtfd/sphinx_rtd_theme/issues/419 + */ + +div[class^="highlight"] pre { + line-height: normal; +} +.rst-content .highlight > pre { + line-height: normal; +} + @media screen { /* content column From 127d4b376431cd8091ed98e9c284744838975ae9 Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Mon, 17 Jul 2017 13:55:58 -0600 Subject: [PATCH 0387/1795] docs: Get module_init() docs from module.h The docs build complains: ./include/linux/init.h:1: warning: no structured comments found The problem is that the comments in question were moved to module.h in commit 0fd972a7d91d (module: relocate module_init from init.h to module.h). Fix basics.rst to match. Signed-off-by: Jonathan Corbet --- Documentation/driver-api/basics.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/driver-api/basics.rst b/Documentation/driver-api/basics.rst index ab82250c7727..cbfb5a825077 100644 --- a/Documentation/driver-api/basics.rst +++ b/Documentation/driver-api/basics.rst @@ -4,7 +4,7 @@ Driver Basics Driver Entry and Exit points ---------------------------- -.. kernel-doc:: include/linux/init.h +.. kernel-doc:: include/linux/module.h :internal: Driver device table From b18b12ae608ae2a108e5872bef9ab8122e01904d Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Mon, 17 Jul 2017 14:04:50 -0600 Subject: [PATCH 0388/1795] docs: Do not include kerneldoc comments from kernel/sys.c ...because there are none there, and I cannot figure out what would ever have been of interest there. This eliminates this warning: ./kernel/sys.c:1: warning: no structured comments found from the build. Signed-off-by: Jonathan Corbet --- Documentation/driver-api/basics.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/Documentation/driver-api/basics.rst b/Documentation/driver-api/basics.rst index cbfb5a825077..73fa7d42bbba 100644 --- a/Documentation/driver-api/basics.rst +++ b/Documentation/driver-api/basics.rst @@ -103,9 +103,6 @@ Kernel utility functions .. kernel-doc:: kernel/panic.c :export: -.. kernel-doc:: kernel/sys.c - :export: - .. kernel-doc:: kernel/rcu/tree.c :export: From 9b158d860f7a87d4ff341aae684b32fc9f6e371f Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Mon, 17 Jul 2017 14:07:49 -0600 Subject: [PATCH 0389/1795] docs: Do not include from .../seqno-fence.c There are no kerneldoc comments in drivers/dma-buf/seqno-fence.c, and it appears there never have been. Stop looking for comments there to eliminate this warning: ./drivers/dma-buf/seqno-fence.c:1: warning: no structured comments found Signed-off-by: Jonathan Corbet --- Documentation/driver-api/dma-buf.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index 31671b469627..dc384f2f7f34 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -139,9 +139,6 @@ DMA Fences Seqno Hardware Fences ~~~~~~~~~~~~~~~~~~~~~ -.. kernel-doc:: drivers/dma-buf/seqno-fence.c - :export: - .. kernel-doc:: include/linux/seqno-fence.h :internal: From 405d4c5a14ba305444e0c290edb20105499a02e6 Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Mon, 17 Jul 2017 14:19:25 -0600 Subject: [PATCH 0390/1795] docs: Get the struct cmbdata kernel doc from the right file Back in 2012, commit 9807f75955ea (UAPI: (Scripted) Disintegrate arch/s390/include/asm) moved struct cmbdata (and its kerneldoc comments) to another file, but did not update the docs to match. The result is this warning: ./arch/s390/include/asm/cmb.h:1: warning: no structured comments found ...and no documentation for that structure. Update the docs to get the information from the right place. Signed-off-by: Jonathan Corbet --- Documentation/driver-api/s390-drivers.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/driver-api/s390-drivers.rst b/Documentation/driver-api/s390-drivers.rst index 7060da136095..ecf8851d3565 100644 --- a/Documentation/driver-api/s390-drivers.rst +++ b/Documentation/driver-api/s390-drivers.rst @@ -75,7 +75,7 @@ The channel-measurement facility provides a means to collect measurement data which is made available by the channel subsystem for each channel attached device. -.. kernel-doc:: arch/s390/include/asm/cmb.h +.. kernel-doc:: arch/s390/include/uapi/asm/cmb.h :internal: .. kernel-doc:: drivers/s390/cio/cmf.c From 5ee5432b045a7e0fb8b72c196ae3d7077c6111eb Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Mon, 17 Jul 2017 14:27:19 -0600 Subject: [PATCH 0391/1795] docs: Do not include from drivers/scsi/constants.c The only function of interest in that file was scsi_print_status(). That function was removed in commit 7ac7076344d9 (scsi: remove scsi_print_status()) but the docs were not changed to match, yielding this warning: ./drivers/scsi/constants.c:1: warning: no structured comments found There's nothing there anymore, so just remove that section from the docs. Signed-off-by: Jonathan Corbet --- Documentation/driver-api/scsi.rst | 8 -------- 1 file changed, 8 deletions(-) diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst index 859fb672319f..5a2aa7a377d9 100644 --- a/Documentation/driver-api/scsi.rst +++ b/Documentation/driver-api/scsi.rst @@ -224,14 +224,6 @@ mid to lowlevel SCSI driver interface .. kernel-doc:: drivers/scsi/hosts.c :export: -drivers/scsi/constants.c -~~~~~~~~~~~~~~~~~~~~~~~~ - -mid to lowlevel SCSI driver interface - -.. kernel-doc:: drivers/scsi/constants.c - :export: - Transport classes ----------------- From 14994a9bb4f4f042da9f1195ec76677552801eee Mon Sep 17 00:00:00 2001 From: Zhouyi Zhou Date: Fri, 7 Jul 2017 15:11:46 +0800 Subject: [PATCH 0392/1795] docs: disable KASLR when debugging kernel commit 6807c84652b0 ("x86: Enable KASLR by default") enables KASLR by default on x86. While KASLR will confuse gdb which resolve kernel symbol address from symbol table of vmlinux. We should turn off KASLR for kernel debugging. Signed-off-by: Zhouyi Zhou Signed-off-by: Jonathan Corbet --- Documentation/dev-tools/kgdb.rst | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/Documentation/dev-tools/kgdb.rst b/Documentation/dev-tools/kgdb.rst index 75273203a35a..d38be58f872a 100644 --- a/Documentation/dev-tools/kgdb.rst +++ b/Documentation/dev-tools/kgdb.rst @@ -348,6 +348,15 @@ default behavior is always set to 0. - ``echo 1 > /sys/module/debug_core/parameters/kgdbreboot`` - Enter the debugger on reboot notify. +Kernel parameter: ``nokaslr`` +----------------------------- + +If the architecture that you are using enable KASLR by default, +you should consider turning it off. KASLR randomizes the +virtual address where the kernel image is mapped and confuse +gdb which resolve kernel symbol address from symbol table +of vmlinux. + Using kdb ========= @@ -358,7 +367,7 @@ This is a quick example of how to use kdb. 1. Configure kgdboc at boot using kernel parameters:: - console=ttyS0,115200 kgdboc=ttyS0,115200 + console=ttyS0,115200 kgdboc=ttyS0,115200 nokaslr OR From e604f1cb85367d2e5fd4cf253296d190996da81a Mon Sep 17 00:00:00 2001 From: Zhouyi Zhou Date: Fri, 7 Jul 2017 16:51:45 +0800 Subject: [PATCH 0393/1795] docs: disable KASLR when debugging kernel commit 6807c84652b0 ("x86: Enable KASLR by default") enables KASLR by default on x86. While KASLR will confuse gdb which resolve kernel symbol address from symbol table of vmlinux. We should turn off KASLR for kernel debugging. Signed-off-by: Zhouyi Zhou Reviewed-by: Kieran Bingham Acked-by: Jan Kiszka Signed-off-by: Jonathan Corbet --- Documentation/dev-tools/gdb-kernel-debugging.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst index 5e93c9bc6619..19df79286f00 100644 --- a/Documentation/dev-tools/gdb-kernel-debugging.rst +++ b/Documentation/dev-tools/gdb-kernel-debugging.rst @@ -31,11 +31,13 @@ Setup CONFIG_DEBUG_INFO_REDUCED off. If your architecture supports CONFIG_FRAME_POINTER, keep it enabled. -- Install that kernel on the guest. +- Install that kernel on the guest, turn off KASLR if necessary by adding + "nokaslr" to the kernel command line. Alternatively, QEMU allows to boot the kernel directly using -kernel, -append, -initrd command line switches. This is generally only useful if you do not depend on modules. See QEMU documentation for more details on - this mode. + this mode. In this case, you should build the kernel with + CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR. - Enable the gdb stub of QEMU/KVM, either From cea3a330ee20e90011b10fc03944664e767e0dd2 Mon Sep 17 00:00:00 2001 From: Philippe CORNU Date: Mon, 17 Jul 2017 09:40:17 +0200 Subject: [PATCH 0394/1795] drm/stm: ltdc: Fix leak of px clk enable in some error paths The pixel clock gets enabled early during init, since it's required in order to read registers. This pixel clock must be disabled if errors during this init phase. Signed-off-by: Eric Anholt Acked-by: Philippe Cornu Signed-off-by: Archit Taneja Link: https://patchwork.freedesktop.org/patch/msgid/1500277223-29553-2-git-send-email-philippe.cornu@st.com --- drivers/gpu/drm/stm/ltdc.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 533176015cbb..7f64d5aeb080 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -1045,13 +1045,15 @@ int ltdc_load(struct drm_device *ddev) if (of_address_to_resource(np, 0, &res)) { DRM_ERROR("Unable to get resource\n"); - return -ENODEV; + ret = -ENODEV; + goto err; } ldev->regs = devm_ioremap_resource(dev, &res); if (IS_ERR(ldev->regs)) { DRM_ERROR("Unable to get ltdc registers\n"); - return PTR_ERR(ldev->regs); + ret = PTR_ERR(ldev->regs); + goto err; } for (i = 0; i < MAX_IRQ; i++) { @@ -1064,7 +1066,7 @@ int ltdc_load(struct drm_device *ddev) dev_name(dev), ddev); if (ret) { DRM_ERROR("Failed to register LTDC interrupt\n"); - return ret; + goto err; } } @@ -1079,7 +1081,7 @@ int ltdc_load(struct drm_device *ddev) if (ret) { DRM_ERROR("hardware identifier (0x%08x) not supported!\n", ldev->caps.hw_version); - return ret; + goto err; } DRM_INFO("ltdc hw version 0x%08x - ready\n", ldev->caps.hw_version); From bdf31bcf3d84ef0d107bbfb9442297cdb07a98b0 Mon Sep 17 00:00:00 2001 From: Philippe CORNU Date: Mon, 17 Jul 2017 09:40:18 +0200 Subject: [PATCH 0395/1795] drm/stm: ltdc: Add panel-bridge support Add the panel-bridge support for both panels & bridges (used by DSI host & HDMI/LVDS bridges). Signed-off-by: Philippe CORNU Reviewed-by: Archit Taneja Signed-off-by: Archit Taneja Link: https://patchwork.freedesktop.org/patch/msgid/1500277223-29553-3-git-send-email-philippe.cornu@st.com --- drivers/gpu/drm/stm/Kconfig | 2 +- drivers/gpu/drm/stm/ltdc.c | 210 ++++++------------------------------ drivers/gpu/drm/stm/ltdc.h | 3 +- 3 files changed, 38 insertions(+), 177 deletions(-) diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig index 2c4817fb0890..4b88223f9aed 100644 --- a/drivers/gpu/drm/stm/Kconfig +++ b/drivers/gpu/drm/stm/Kconfig @@ -4,7 +4,7 @@ config DRM_STM select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER - select DRM_PANEL + select DRM_PANEL_BRIDGE select VIDEOMODE_HELPERS select FB_PROVIDE_GET_FB_UNMAPPED_AREA default y diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 7f64d5aeb080..e46b427eacc7 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include