2014-11-14 08:52:28 -08:00
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE .
*/
2014-11-14 08:52:29 -08:00
/**
* DOC : Panel Self Refresh ( PSR / SRD )
*
* Since Haswell Display controller supports Panel Self - Refresh on display
* panels witch have a remote frame buffer ( RFB ) implemented according to PSR
* spec in eDP1 .3 . PSR feature allows the display to go to lower standby states
* when system is idle but display is on as it eliminates display refresh
* request to DDR memory completely as long as the frame buffer for that
* display is unchanged .
*
* Panel Self Refresh must be supported by both Hardware ( source ) and
* Panel ( sink ) .
*
* PSR saves power by caching the framebuffer in the panel RFB , which allows us
* to power down the link and memory controller . For DSI panels the same idea
* is called " manual mode " .
*
* The implementation uses the hardware - based PSR support which automatically
* enters / exits self - refresh mode . The hardware takes care of sending the
* required DP aux message and could even retrain the link ( that part isn ' t
* enabled yet though ) . The hardware also keeps track of any frontbuffer
* changes to know when to exit self - refresh mode again . Unfortunately that
* part doesn ' t work too well , hence why the i915 PSR support uses the
* software frontbuffer tracking to make sure it doesn ' t miss a screen
* update . For this integration intel_psr_invalidate ( ) and intel_psr_flush ( )
* get called by the frontbuffer tracking code . Note that because of locking
* issues the self - refresh re - enable code is done from a work queue , which
* must be correctly synchronized / cancelled when shutting down the pipe . "
*/
2019-02-06 13:18:45 -08:00
# include <drm/drmP.h>
# include <drm/drm_atomic_helper.h>
2014-11-14 08:52:28 -08:00
# include "intel_drv.h"
# include "i915_drv.h"
2018-08-09 16:21:01 +02:00
static bool psr_global_enabled ( u32 debug )
{
switch ( debug & I915_PSR_DEBUG_MODE_MASK ) {
case I915_PSR_DEBUG_DEFAULT :
return i915_modparams . enable_psr ;
case I915_PSR_DEBUG_DISABLE :
return false ;
default :
return true ;
}
}
2018-08-08 16:19:11 +02:00
static bool intel_psr2_enabled ( struct drm_i915_private * dev_priv ,
const struct intel_crtc_state * crtc_state )
{
2018-11-28 12:26:14 -08:00
/* Cannot enable DSC and PSR2 simultaneously */
WARN_ON ( crtc_state - > dsc_params . compression_enable & &
crtc_state - > has_psr2 ) ;
2018-08-08 16:19:11 +02:00
switch ( dev_priv - > psr . debug & I915_PSR_DEBUG_MODE_MASK ) {
2019-01-17 12:55:45 -08:00
case I915_PSR_DEBUG_DISABLE :
2018-08-08 16:19:11 +02:00
case I915_PSR_DEBUG_FORCE_PSR1 :
return false ;
default :
return crtc_state - > has_psr2 ;
}
}
2018-11-20 11:23:24 +02:00
static int edp_psr_shift ( enum transcoder cpu_transcoder )
{
switch ( cpu_transcoder ) {
case TRANSCODER_A :
return EDP_PSR_TRANSCODER_A_SHIFT ;
case TRANSCODER_B :
return EDP_PSR_TRANSCODER_B_SHIFT ;
case TRANSCODER_C :
return EDP_PSR_TRANSCODER_C_SHIFT ;
default :
MISSING_CASE ( cpu_transcoder ) ;
/* fallthrough */
case TRANSCODER_EDP :
return EDP_PSR_TRANSCODER_EDP_SHIFT ;
}
}
2018-08-21 15:11:56 -07:00
void intel_psr_irq_control ( struct drm_i915_private * dev_priv , u32 debug )
2018-04-04 18:37:17 -07:00
{
u32 debug_mask , mask ;
2018-11-20 11:23:24 +02:00
enum transcoder cpu_transcoder ;
u32 transcoders = BIT ( TRANSCODER_EDP ) ;
2018-04-04 18:37:17 -07:00
2018-11-20 11:23:24 +02:00
if ( INTEL_GEN ( dev_priv ) > = 8 )
transcoders | = BIT ( TRANSCODER_A ) |
BIT ( TRANSCODER_B ) |
BIT ( TRANSCODER_C ) ;
debug_mask = 0 ;
mask = 0 ;
for_each_cpu_transcoder_masked ( dev_priv , cpu_transcoder , transcoders ) {
int shift = edp_psr_shift ( cpu_transcoder ) ;
mask | = EDP_PSR_ERROR ( shift ) ;
debug_mask | = EDP_PSR_POST_EXIT ( shift ) |
EDP_PSR_PRE_ENTRY ( shift ) ;
2018-04-04 18:37:17 -07:00
}
2018-08-21 15:11:56 -07:00
if ( debug & I915_PSR_DEBUG_IRQ )
2018-04-04 18:37:17 -07:00
mask | = debug_mask ;
I915_WRITE ( EDP_PSR_IMR , ~ mask ) ;
}
2018-04-25 14:23:32 -07:00
static void psr_event_print ( u32 val , bool psr2_enabled )
{
DRM_DEBUG_KMS ( " PSR exit events: 0x%x \n " , val ) ;
if ( val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE )
DRM_DEBUG_KMS ( " \t PSR2 watchdog timer expired \n " ) ;
if ( ( val & PSR_EVENT_PSR2_DISABLED ) & & psr2_enabled )
DRM_DEBUG_KMS ( " \t PSR2 disabled \n " ) ;
if ( val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN )
DRM_DEBUG_KMS ( " \t SU dirty FIFO underrun \n " ) ;
if ( val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN )
DRM_DEBUG_KMS ( " \t SU CRC FIFO underrun \n " ) ;
if ( val & PSR_EVENT_GRAPHICS_RESET )
DRM_DEBUG_KMS ( " \t Graphics reset \n " ) ;
if ( val & PSR_EVENT_PCH_INTERRUPT )
DRM_DEBUG_KMS ( " \t PCH interrupt \n " ) ;
if ( val & PSR_EVENT_MEMORY_UP )
DRM_DEBUG_KMS ( " \t Memory up \n " ) ;
if ( val & PSR_EVENT_FRONT_BUFFER_MODIFY )
DRM_DEBUG_KMS ( " \t Front buffer modification \n " ) ;
if ( val & PSR_EVENT_WD_TIMER_EXPIRE )
DRM_DEBUG_KMS ( " \t PSR watchdog timer expired \n " ) ;
if ( val & PSR_EVENT_PIPE_REGISTERS_UPDATE )
DRM_DEBUG_KMS ( " \t PIPE registers updated \n " ) ;
if ( val & PSR_EVENT_REGISTER_UPDATE )
DRM_DEBUG_KMS ( " \t Register updated \n " ) ;
if ( val & PSR_EVENT_HDCP_ENABLE )
DRM_DEBUG_KMS ( " \t HDCP enabled \n " ) ;
if ( val & PSR_EVENT_KVMR_SESSION_ENABLE )
DRM_DEBUG_KMS ( " \t KVMR session enabled \n " ) ;
if ( val & PSR_EVENT_VBI_ENABLE )
DRM_DEBUG_KMS ( " \t VBI enabled \n " ) ;
if ( val & PSR_EVENT_LPSP_MODE_EXIT )
DRM_DEBUG_KMS ( " \t LPSP mode exited \n " ) ;
if ( ( val & PSR_EVENT_PSR_DISABLE ) & & ! psr2_enabled )
DRM_DEBUG_KMS ( " \t PSR disabled \n " ) ;
}
2018-04-04 18:37:17 -07:00
void intel_psr_irq_handler ( struct drm_i915_private * dev_priv , u32 psr_iir )
{
u32 transcoders = BIT ( TRANSCODER_EDP ) ;
enum transcoder cpu_transcoder ;
2018-04-03 14:24:20 -07:00
ktime_t time_ns = ktime_get ( ) ;
2018-11-21 14:54:39 -08:00
u32 mask = 0 ;
2018-04-04 18:37:17 -07:00
if ( INTEL_GEN ( dev_priv ) > = 8 )
transcoders | = BIT ( TRANSCODER_A ) |
BIT ( TRANSCODER_B ) |
BIT ( TRANSCODER_C ) ;
for_each_cpu_transcoder_masked ( dev_priv , cpu_transcoder , transcoders ) {
2018-11-20 11:23:24 +02:00
int shift = edp_psr_shift ( cpu_transcoder ) ;
2018-11-21 14:54:39 -08:00
if ( psr_iir & EDP_PSR_ERROR ( shift ) ) {
DRM_WARN ( " [transcoder %s] PSR aux error \n " ,
transcoder_name ( cpu_transcoder ) ) ;
dev_priv - > psr . irq_aux_error = true ;
/*
* If this interruption is not masked it will keep
* interrupting so fast that it prevents the scheduled
* work to run .
* Also after a PSR error , we don ' t want to arm PSR
* again so we don ' t care about unmask the interruption
* or unset irq_aux_error .
*/
mask | = EDP_PSR_ERROR ( shift ) ;
}
2018-04-04 18:37:17 -07:00
2018-11-20 11:23:24 +02:00
if ( psr_iir & EDP_PSR_PRE_ENTRY ( shift ) ) {
2018-04-03 14:24:20 -07:00
dev_priv - > psr . last_entry_attempt = time_ns ;
2018-04-04 18:37:17 -07:00
DRM_DEBUG_KMS ( " [transcoder %s] PSR entry attempt in 2 vblanks \n " ,
transcoder_name ( cpu_transcoder ) ) ;
2018-04-03 14:24:20 -07:00
}
2018-04-04 18:37:17 -07:00
2018-11-20 11:23:24 +02:00
if ( psr_iir & EDP_PSR_POST_EXIT ( shift ) ) {
2018-04-03 14:24:20 -07:00
dev_priv - > psr . last_exit = time_ns ;
2018-04-04 18:37:17 -07:00
DRM_DEBUG_KMS ( " [transcoder %s] PSR exit completed \n " ,
transcoder_name ( cpu_transcoder ) ) ;
2018-04-25 14:23:32 -07:00
if ( INTEL_GEN ( dev_priv ) > = 9 ) {
u32 val = I915_READ ( PSR_EVENT ( cpu_transcoder ) ) ;
bool psr2_enabled = dev_priv - > psr . psr2_enabled ;
I915_WRITE ( PSR_EVENT ( cpu_transcoder ) , val ) ;
psr_event_print ( val , psr2_enabled ) ;
}
2018-04-03 14:24:20 -07:00
}
2018-04-04 18:37:17 -07:00
}
2018-11-21 14:54:39 -08:00
if ( mask ) {
mask | = I915_READ ( EDP_PSR_IMR ) ;
I915_WRITE ( EDP_PSR_IMR , mask ) ;
schedule_work ( & dev_priv - > psr . work ) ;
}
2018-04-04 18:37:17 -07:00
}
2018-02-23 14:15:17 -08:00
static bool intel_dp_get_colorimetry_status ( struct intel_dp * intel_dp )
{
2019-01-16 11:15:19 +02:00
u8 dprx = 0 ;
2018-02-23 14:15:17 -08:00
if ( drm_dp_dpcd_readb ( & intel_dp - > aux , DP_DPRX_FEATURE_ENUMERATION_LIST ,
& dprx ) ! = 1 )
return false ;
return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED ;
}
static bool intel_dp_get_alpm_status ( struct intel_dp * intel_dp )
{
2019-01-16 11:15:19 +02:00
u8 alpm_caps = 0 ;
2018-02-23 14:15:17 -08:00
if ( drm_dp_dpcd_readb ( & intel_dp - > aux , DP_RECEIVER_ALPM_CAP ,
& alpm_caps ) ! = 1 )
return false ;
return alpm_caps & DP_ALPM_CAP ;
}
2018-03-28 15:30:44 -07:00
static u8 intel_dp_get_sink_sync_latency ( struct intel_dp * intel_dp )
{
2018-05-11 12:51:44 -07:00
u8 val = 8 ; /* assume the worst if we can't read the value */
2018-03-28 15:30:44 -07:00
if ( drm_dp_dpcd_readb ( & intel_dp - > aux ,
DP_SYNCHRONIZATION_LATENCY_IN_SINK , & val ) = = 1 )
val & = DP_MAX_RESYNC_FRAME_COUNT_MASK ;
else
2018-05-11 12:51:44 -07:00
DRM_DEBUG_KMS ( " Unable to get sink synchronization latency, assuming 8 frames \n " ) ;
2018-03-28 15:30:44 -07:00
return val ;
}
2018-12-03 16:34:03 -08:00
static u16 intel_dp_get_su_x_granulartiy ( struct intel_dp * intel_dp )
{
u16 val ;
ssize_t r ;
/*
* Returning the default X granularity if granularity not required or
* if DPCD read fails
*/
if ( ! ( intel_dp - > psr_dpcd [ 1 ] & DP_PSR2_SU_GRANULARITY_REQUIRED ) )
return 4 ;
r = drm_dp_dpcd_read ( & intel_dp - > aux , DP_PSR2_SU_X_GRANULARITY , & val , 2 ) ;
if ( r ! = 2 )
DRM_DEBUG_KMS ( " Unable to read DP_PSR2_SU_X_GRANULARITY \n " ) ;
/*
* Spec says that if the value read is 0 the default granularity should
* be used instead .
*/
if ( r ! = 2 | | val = = 0 )
val = 4 ;
return val ;
}
2018-02-23 14:15:17 -08:00
void intel_psr_init_dpcd ( struct intel_dp * intel_dp )
{
struct drm_i915_private * dev_priv =
to_i915 ( dp_to_dig_port ( intel_dp ) - > base . base . dev ) ;
drm_dp_dpcd_read ( & intel_dp - > aux , DP_PSR_SUPPORT , intel_dp - > psr_dpcd ,
sizeof ( intel_dp - > psr_dpcd ) ) ;
2018-05-11 12:51:40 -07:00
if ( ! intel_dp - > psr_dpcd [ 0 ] )
return ;
DRM_DEBUG_KMS ( " eDP panel supports PSR version %x \n " ,
intel_dp - > psr_dpcd [ 0 ] ) ;
2018-05-11 12:51:41 -07:00
2018-12-03 16:33:55 -08:00
if ( drm_dp_has_quirk ( & intel_dp - > desc , DP_DPCD_QUIRK_NO_PSR ) ) {
DRM_DEBUG_KMS ( " PSR support not currently available for this panel \n " ) ;
return ;
}
2018-05-11 12:51:41 -07:00
if ( ! ( intel_dp - > edp_dpcd [ 1 ] & DP_EDP_SET_POWER_CAP ) ) {
DRM_DEBUG_KMS ( " Panel lacks power state control, PSR cannot be enabled \n " ) ;
return ;
}
2018-12-03 16:33:55 -08:00
2018-05-11 12:51:40 -07:00
dev_priv - > psr . sink_support = true ;
2018-05-24 20:30:47 -07:00
dev_priv - > psr . sink_sync_latency =
intel_dp_get_sink_sync_latency ( intel_dp ) ;
2018-02-23 14:15:17 -08:00
2018-08-09 16:21:01 +02:00
WARN_ON ( dev_priv - > psr . dp ) ;
dev_priv - > psr . dp = intel_dp ;
2018-02-23 14:15:17 -08:00
if ( INTEL_GEN ( dev_priv ) > = 9 & &
2018-03-28 15:30:40 -07:00
( intel_dp - > psr_dpcd [ 0 ] = = DP_PSR2_WITH_Y_COORD_IS_SUPPORTED ) ) {
2018-05-11 12:51:45 -07:00
bool y_req = intel_dp - > psr_dpcd [ 1 ] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED ;
bool alpm = intel_dp_get_alpm_status ( intel_dp ) ;
2018-03-28 15:30:40 -07:00
/*
* All panels that supports PSR version 03 h ( PSR2 +
* Y - coordinate ) can handle Y - coordinates in VSC but we are
* only sure that it is going to be used when required by the
* panel . This way panel is capable to do selective update
* without a aux frame sync .
*
* To support PSR version 02 h and PSR version 03 h without
* Y - coordinate requirement panels we would need to enable
* GTC first .
*/
2018-05-11 12:51:45 -07:00
dev_priv - > psr . sink_psr2_support = y_req & & alpm ;
2018-05-11 12:51:40 -07:00
DRM_DEBUG_KMS ( " PSR2 %ssupported \n " ,
dev_priv - > psr . sink_psr2_support ? " " : " not " ) ;
2018-02-23 14:15:17 -08:00
2018-03-28 15:30:42 -07:00
if ( dev_priv - > psr . sink_psr2_support ) {
2018-02-23 14:15:17 -08:00
dev_priv - > psr . colorimetry_support =
intel_dp_get_colorimetry_status ( intel_dp ) ;
2018-12-03 16:34:03 -08:00
dev_priv - > psr . su_x_granularity =
intel_dp_get_su_x_granulartiy ( intel_dp ) ;
2018-02-23 14:15:17 -08:00
}
}
}
2018-06-25 22:25:36 -07:00
static void intel_psr_setup_vsc ( struct intel_dp * intel_dp ,
const struct intel_crtc_state * crtc_state )
2015-04-02 11:02:44 +05:30
{
2017-01-02 17:00:55 +05:30
struct intel_digital_port * intel_dig_port = dp_to_dig_port ( intel_dp ) ;
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2017-08-18 16:49:56 +03:00
struct edp_vsc_psr psr_vsc ;
2015-04-02 11:02:44 +05:30
2018-03-28 15:30:42 -07:00
if ( dev_priv - > psr . psr2_enabled ) {
2017-09-07 16:00:35 -07:00
/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
memset ( & psr_vsc , 0 , sizeof ( psr_vsc ) ) ;
psr_vsc . sdp_header . HB0 = 0 ;
psr_vsc . sdp_header . HB1 = 0x7 ;
2018-03-28 15:30:40 -07:00
if ( dev_priv - > psr . colorimetry_support ) {
2017-09-07 16:00:35 -07:00
psr_vsc . sdp_header . HB2 = 0x5 ;
psr_vsc . sdp_header . HB3 = 0x13 ;
2018-03-28 15:30:40 -07:00
} else {
2017-09-07 16:00:35 -07:00
psr_vsc . sdp_header . HB2 = 0x4 ;
psr_vsc . sdp_header . HB3 = 0xe ;
}
2017-01-02 17:00:55 +05:30
} else {
2017-09-07 16:00:35 -07:00
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset ( & psr_vsc , 0 , sizeof ( psr_vsc ) ) ;
psr_vsc . sdp_header . HB0 = 0 ;
psr_vsc . sdp_header . HB1 = 0x7 ;
psr_vsc . sdp_header . HB2 = 0x2 ;
psr_vsc . sdp_header . HB3 = 0x8 ;
2017-01-02 17:00:55 +05:30
}
2018-09-20 21:51:36 +03:00
intel_dig_port - > write_infoframe ( & intel_dig_port - > base ,
crtc_state ,
2017-10-13 22:40:51 +03:00
DP_SDP_VSC , & psr_vsc , sizeof ( psr_vsc ) ) ;
2015-04-02 11:02:44 +05:30
}
2018-03-12 20:46:45 -07:00
static void hsw_psr_setup_aux ( struct intel_dp * intel_dp )
2014-11-14 08:52:28 -08:00
{
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2018-03-12 20:46:46 -07:00
u32 aux_clock_divider , aux_ctl ;
int i ;
2019-01-16 11:15:19 +02:00
static const u8 aux_msg [ ] = {
2014-11-14 08:52:28 -08:00
[ 0 ] = DP_AUX_NATIVE_WRITE < < 4 ,
[ 1 ] = DP_SET_POWER > > 8 ,
[ 2 ] = DP_SET_POWER & 0xff ,
[ 3 ] = 1 - 1 ,
[ 4 ] = DP_SET_POWER_D0 ,
} ;
2018-03-12 20:46:46 -07:00
u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK ;
2014-11-14 08:52:28 -08:00
BUILD_BUG_ON ( sizeof ( aux_msg ) > 20 ) ;
2018-03-12 20:46:45 -07:00
for ( i = 0 ; i < sizeof ( aux_msg ) ; i + = 4 )
2018-03-12 20:46:46 -07:00
I915_WRITE ( EDP_PSR_AUX_DATA ( i > > 2 ) ,
2018-03-12 20:46:45 -07:00
intel_dp_pack_aux ( & aux_msg [ i ] , sizeof ( aux_msg ) - i ) ) ;
2018-03-12 20:46:46 -07:00
aux_clock_divider = intel_dp - > get_aux_clock_divider ( intel_dp , 0 ) ;
/* Start with bits set for DDI_AUX_CTL register */
2018-05-23 11:04:35 -07:00
aux_ctl = intel_dp - > get_aux_send_ctl ( intel_dp , sizeof ( aux_msg ) ,
2018-03-12 20:46:45 -07:00
aux_clock_divider ) ;
2018-03-12 20:46:46 -07:00
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl & = psr_aux_mask ;
I915_WRITE ( EDP_PSR_AUX_CTL , aux_ctl ) ;
2018-03-12 20:46:45 -07:00
}
2018-06-25 22:25:36 -07:00
static void intel_psr_enable_sink ( struct intel_dp * intel_dp )
2018-03-12 20:46:45 -07:00
{
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2018-03-28 15:30:45 -07:00
u8 dpcd_val = DP_PSR_ENABLE ;
2018-03-12 20:46:45 -07:00
2017-01-02 17:00:58 +05:30
/* Enable ALPM at sink for psr2 */
2018-05-11 12:51:45 -07:00
if ( dev_priv - > psr . psr2_enabled ) {
drm_dp_dpcd_writeb ( & intel_dp - > aux , DP_RECEIVER_ALPM_CONFIG ,
DP_ALPM_ENABLE ) ;
2018-12-03 16:33:58 -08:00
dpcd_val | = DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS ;
2018-12-03 16:33:56 -08:00
} else {
if ( dev_priv - > psr . link_standby )
dpcd_val | = DP_PSR_MAIN_LINK_ACTIVE ;
2018-12-03 16:33:57 -08:00
if ( INTEL_GEN ( dev_priv ) > = 8 )
dpcd_val | = DP_PSR_CRC_VERIFICATION ;
2018-05-11 12:51:45 -07:00
}
2018-03-28 15:30:45 -07:00
drm_dp_dpcd_writeb ( & intel_dp - > aux , DP_PSR_EN_CFG , dpcd_val ) ;
2016-05-18 18:47:14 +02:00
2018-03-12 20:46:46 -07:00
drm_dp_dpcd_writeb ( & intel_dp - > aux , DP_SET_POWER , DP_SET_POWER_D0 ) ;
2014-11-14 08:52:28 -08:00
}
2017-09-07 16:00:33 -07:00
static void hsw_activate_psr1 ( struct intel_dp * intel_dp )
2014-11-14 08:52:28 -08:00
{
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2018-05-24 20:30:47 -07:00
u32 max_sleep_time = 0x1f ;
u32 val = EDP_PSR_ENABLE ;
2015-04-02 11:02:44 +05:30
2018-05-24 20:30:47 -07:00
/* Let's use 6 as the minimum to cover all known cases including the
* off - by - one issue that HW has in some cases .
2014-11-14 08:52:31 -08:00
*/
2018-05-24 20:30:47 -07:00
int idle_frames = max ( 6 , dev_priv - > vbt . psr . idle_frames ) ;
2016-05-18 18:47:11 +02:00
2018-05-24 20:30:47 -07:00
/* sink_sync_latency of 8 means source has to wait for more than 8
* frames , we ' ll go with 9 frames for now
*/
idle_frames = max ( idle_frames , dev_priv - > psr . sink_sync_latency + 1 ) ;
2019-03-07 16:00:49 -08:00
2016-05-18 18:47:11 +02:00
val | = idle_frames < < EDP_PSR_IDLE_FRAME_SHIFT ;
2015-12-11 16:31:31 -08:00
2018-05-24 20:30:47 -07:00
val | = max_sleep_time < < EDP_PSR_MAX_SLEEP_TIME_SHIFT ;
2016-10-13 11:03:01 +01:00
if ( IS_HASWELL ( dev_priv ) )
2015-12-11 16:31:31 -08:00
val | = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES ;
2014-11-14 08:52:28 -08:00
2016-02-01 12:02:07 -08:00
if ( dev_priv - > psr . link_standby )
val | = EDP_PSR_LINK_STANDBY ;
2018-05-22 14:57:23 +05:30
if ( dev_priv - > vbt . psr . tp1_wakeup_time_us = = 0 )
val | = EDP_PSR_TP1_TIME_0us ;
else if ( dev_priv - > vbt . psr . tp1_wakeup_time_us < = 100 )
2016-05-18 18:47:11 +02:00
val | = EDP_PSR_TP1_TIME_100us ;
2018-05-22 14:57:23 +05:30
else if ( dev_priv - > vbt . psr . tp1_wakeup_time_us < = 500 )
val | = EDP_PSR_TP1_TIME_500us ;
2016-05-18 18:47:11 +02:00
else
2018-05-22 14:57:23 +05:30
val | = EDP_PSR_TP1_TIME_2500us ;
2016-05-18 18:47:11 +02:00
2018-05-22 14:57:23 +05:30
if ( dev_priv - > vbt . psr . tp2_tp3_wakeup_time_us = = 0 )
val | = EDP_PSR_TP2_TP3_TIME_0us ;
else if ( dev_priv - > vbt . psr . tp2_tp3_wakeup_time_us < = 100 )
2016-05-18 18:47:11 +02:00
val | = EDP_PSR_TP2_TP3_TIME_100us ;
2018-05-22 14:57:23 +05:30
else if ( dev_priv - > vbt . psr . tp2_tp3_wakeup_time_us < = 500 )
val | = EDP_PSR_TP2_TP3_TIME_500us ;
2016-05-18 18:47:11 +02:00
else
2018-05-22 14:57:23 +05:30
val | = EDP_PSR_TP2_TP3_TIME_2500us ;
2016-05-18 18:47:11 +02:00
if ( intel_dp_source_supports_hbr2 ( intel_dp ) & &
drm_dp_tps3_supported ( intel_dp - > dpcd ) )
val | = EDP_PSR_TP1_TP3_SEL ;
else
val | = EDP_PSR_TP1_TP2_SEL ;
2018-06-26 13:16:44 -07:00
if ( INTEL_GEN ( dev_priv ) > = 8 )
val | = EDP_PSR_CRC_ENABLE ;
2017-08-08 14:51:34 -07:00
val | = I915_READ ( EDP_PSR_CTL ) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK ;
2016-05-18 18:47:11 +02:00
I915_WRITE ( EDP_PSR_CTL , val ) ;
drm/i915/psr: fix blank screen issue for psr2
Psr1 and psr2 are mutually exclusive,ie when psr2 is enabled,
psr1 should be disabled.When psr2 is exited , bit 31 of reg
PSR2_CTL must be set to 0 but currently bit 31 of SRD_CTL
(psr1 control register)is set to 0.
Also ,PSR2_IDLE state is looked up from SRD_STATUS(psr1 register)
instead of PSR2_STATUS register, which has wrong data, resulting
in blankscreen.
hsw_enable_source is split into hsw_enable_source_psr1 and
hsw_enable_source_psr2 for easier code review and maintenance,
as suggested by rodrigo and jim.
v2: (Rodrigo)
- Rename hsw_enable_source_psr* to intel_enable_source_psr*
v3: (Rodrigo)
- In hsw_psr_disable ,
1) for psr active case, handle psr2 followed by psr1.
2) psr inactive case, handle psr2 followed by psr1
v4:(Rodrigo)
- move psr2 restriction(32X20) to match_conditions function
returning false and fully blocking PSR to a new patch before
this one.
v5: in source_psr2, removed val = EDP_PSR_ENABLE
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Jim Bride <jim.bride@linux.intel.com>
Signed-off-by: Vathsala Nagaraju <vathsala.nagaraju@intel.com>
Signed-off-by: Patil Deepti <deepti.patil@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1484244059-9201-1-git-send-email-vathsala.nagaraju@intel.com
2017-01-12 23:30:59 +05:30
}
2016-05-18 18:47:11 +02:00
2017-09-07 16:00:33 -07:00
static void hsw_activate_psr2 ( struct intel_dp * intel_dp )
drm/i915/psr: fix blank screen issue for psr2
Psr1 and psr2 are mutually exclusive,ie when psr2 is enabled,
psr1 should be disabled.When psr2 is exited , bit 31 of reg
PSR2_CTL must be set to 0 but currently bit 31 of SRD_CTL
(psr1 control register)is set to 0.
Also ,PSR2_IDLE state is looked up from SRD_STATUS(psr1 register)
instead of PSR2_STATUS register, which has wrong data, resulting
in blankscreen.
hsw_enable_source is split into hsw_enable_source_psr1 and
hsw_enable_source_psr2 for easier code review and maintenance,
as suggested by rodrigo and jim.
v2: (Rodrigo)
- Rename hsw_enable_source_psr* to intel_enable_source_psr*
v3: (Rodrigo)
- In hsw_psr_disable ,
1) for psr active case, handle psr2 followed by psr1.
2) psr inactive case, handle psr2 followed by psr1
v4:(Rodrigo)
- move psr2 restriction(32X20) to match_conditions function
returning false and fully blocking PSR to a new patch before
this one.
v5: in source_psr2, removed val = EDP_PSR_ENABLE
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Jim Bride <jim.bride@linux.intel.com>
Signed-off-by: Vathsala Nagaraju <vathsala.nagaraju@intel.com>
Signed-off-by: Patil Deepti <deepti.patil@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1484244059-9201-1-git-send-email-vathsala.nagaraju@intel.com
2017-01-12 23:30:59 +05:30
{
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2018-05-24 20:30:47 -07:00
u32 val ;
/* Let's use 6 as the minimum to cover all known cases including the
* off - by - one issue that HW has in some cases .
drm/i915/psr: fix blank screen issue for psr2
Psr1 and psr2 are mutually exclusive,ie when psr2 is enabled,
psr1 should be disabled.When psr2 is exited , bit 31 of reg
PSR2_CTL must be set to 0 but currently bit 31 of SRD_CTL
(psr1 control register)is set to 0.
Also ,PSR2_IDLE state is looked up from SRD_STATUS(psr1 register)
instead of PSR2_STATUS register, which has wrong data, resulting
in blankscreen.
hsw_enable_source is split into hsw_enable_source_psr1 and
hsw_enable_source_psr2 for easier code review and maintenance,
as suggested by rodrigo and jim.
v2: (Rodrigo)
- Rename hsw_enable_source_psr* to intel_enable_source_psr*
v3: (Rodrigo)
- In hsw_psr_disable ,
1) for psr active case, handle psr2 followed by psr1.
2) psr inactive case, handle psr2 followed by psr1
v4:(Rodrigo)
- move psr2 restriction(32X20) to match_conditions function
returning false and fully blocking PSR to a new patch before
this one.
v5: in source_psr2, removed val = EDP_PSR_ENABLE
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Jim Bride <jim.bride@linux.intel.com>
Signed-off-by: Vathsala Nagaraju <vathsala.nagaraju@intel.com>
Signed-off-by: Patil Deepti <deepti.patil@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1484244059-9201-1-git-send-email-vathsala.nagaraju@intel.com
2017-01-12 23:30:59 +05:30
*/
2018-05-24 20:30:47 -07:00
int idle_frames = max ( 6 , dev_priv - > vbt . psr . idle_frames ) ;
idle_frames = max ( idle_frames , dev_priv - > psr . sink_sync_latency + 1 ) ;
val = idle_frames < < EDP_PSR2_IDLE_FRAME_SHIFT ;
2016-05-18 18:47:11 +02:00
2018-03-28 15:30:41 -07:00
val | = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE ;
2018-04-25 14:23:34 -07:00
if ( INTEL_GEN ( dev_priv ) > = 10 | | IS_GEMINILAKE ( dev_priv ) )
val | = EDP_Y_COORDINATE_ENABLE ;
2017-09-26 15:29:13 +05:30
2018-03-28 15:30:44 -07:00
val | = EDP_PSR2_FRAME_BEFORE_SU ( dev_priv - > psr . sink_sync_latency + 1 ) ;
2016-05-18 18:47:11 +02:00
2019-03-12 12:57:41 -07:00
if ( dev_priv - > vbt . psr . psr2_tp2_tp3_wakeup_time_us > = 0 & &
dev_priv - > vbt . psr . psr2_tp2_tp3_wakeup_time_us < = 50 )
2018-05-22 14:57:23 +05:30
val | = EDP_PSR2_TP2_TIME_50us ;
2019-03-12 12:57:41 -07:00
else if ( dev_priv - > vbt . psr . psr2_tp2_tp3_wakeup_time_us < = 100 )
2018-05-22 14:57:23 +05:30
val | = EDP_PSR2_TP2_TIME_100us ;
2019-03-12 12:57:41 -07:00
else if ( dev_priv - > vbt . psr . psr2_tp2_tp3_wakeup_time_us < = 500 )
2018-05-22 14:57:23 +05:30
val | = EDP_PSR2_TP2_TIME_500us ;
2016-05-18 18:47:11 +02:00
else
2018-05-22 14:57:23 +05:30
val | = EDP_PSR2_TP2_TIME_2500us ;
2015-04-02 11:02:44 +05:30
2016-05-18 18:47:11 +02:00
I915_WRITE ( EDP_PSR2_CTL , val ) ;
2014-11-14 08:52:28 -08:00
}
2018-02-27 13:29:13 -08:00
static bool intel_psr2_config_valid ( struct intel_dp * intel_dp ,
struct intel_crtc_state * crtc_state )
{
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2018-03-06 12:33:55 -08:00
int crtc_hdisplay = crtc_state - > base . adjusted_mode . crtc_hdisplay ;
int crtc_vdisplay = crtc_state - > base . adjusted_mode . crtc_vdisplay ;
int psr_max_h = 0 , psr_max_v = 0 ;
2018-02-27 13:29:13 -08:00
2018-03-28 15:30:42 -07:00
if ( ! dev_priv - > psr . sink_psr2_support )
2018-02-27 13:29:13 -08:00
return false ;
2018-11-28 12:26:14 -08:00
/*
* DSC and PSR2 cannot be enabled simultaneously . If a requested
* resolution requires DSC to be enabled , priority is given to DSC
* over PSR2 .
*/
if ( crtc_state - > dsc_params . compression_enable ) {
DRM_DEBUG_KMS ( " PSR2 cannot be enabled since DSC is enabled \n " ) ;
return false ;
}
2018-03-06 12:33:55 -08:00
if ( INTEL_GEN ( dev_priv ) > = 10 | | IS_GEMINILAKE ( dev_priv ) ) {
psr_max_h = 4096 ;
psr_max_v = 2304 ;
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-12 10:10:43 -08:00
} else if ( IS_GEN ( dev_priv , 9 ) ) {
2018-03-06 12:33:55 -08:00
psr_max_h = 3640 ;
psr_max_v = 2304 ;
}
if ( crtc_hdisplay > psr_max_h | | crtc_vdisplay > psr_max_v ) {
DRM_DEBUG_KMS ( " PSR2 not enabled, resolution %dx%d > max supported %dx%d \n " ,
crtc_hdisplay , crtc_vdisplay ,
psr_max_h , psr_max_v ) ;
2018-02-27 13:29:13 -08:00
return false ;
}
2018-12-03 16:34:02 -08:00
/*
* HW sends SU blocks of size four scan lines , which means the starting
* X coordinate and Y granularity requirements will always be met . We
2018-12-03 16:34:03 -08:00
* only need to validate the SU block width is a multiple of
* x granularity .
2018-12-03 16:34:02 -08:00
*/
2018-12-03 16:34:03 -08:00
if ( crtc_hdisplay % dev_priv - > psr . su_x_granularity ) {
DRM_DEBUG_KMS ( " PSR2 not enabled, hdisplay(%d) not multiple of %d \n " ,
crtc_hdisplay , dev_priv - > psr . su_x_granularity ) ;
2018-12-03 16:34:02 -08:00
return false ;
}
2019-03-07 16:00:47 -08:00
if ( crtc_state - > crc_enabled ) {
DRM_DEBUG_KMS ( " PSR2 not enabled because it would inhibit pipe CRC calculation \n " ) ;
return false ;
}
2018-02-27 13:29:13 -08:00
return true ;
}
2017-10-12 16:02:01 +03:00
void intel_psr_compute_config ( struct intel_dp * intel_dp ,
struct intel_crtc_state * crtc_state )
2014-11-14 08:52:28 -08:00
{
struct intel_digital_port * dig_port = dp_to_dig_port ( intel_dp ) ;
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2016-05-18 11:34:38 +03:00
const struct drm_display_mode * adjusted_mode =
2017-10-12 16:02:01 +03:00
& crtc_state - > base . adjusted_mode ;
2016-05-18 11:34:38 +03:00
int psr_setup_time ;
2014-11-14 08:52:28 -08:00
2018-01-03 13:38:23 -08:00
if ( ! CAN_PSR ( dev_priv ) )
2017-10-12 16:02:01 +03:00
return ;
2018-08-09 16:21:01 +02:00
if ( intel_dp ! = dev_priv - > psr . dp )
2017-10-12 16:02:01 +03:00
return ;
2014-11-14 08:52:28 -08:00
2016-02-01 12:02:06 -08:00
/*
* HSW spec explicitly says PSR is tied to port A .
* BDW + platforms with DDI implementation of PSR have different
* PSR registers per transcoder and we only implement transcoder EDP
* ones . Since by Display design transcoder EDP is tied to port A
* we can safely escape based on the port A .
*/
2018-05-11 16:00:59 -07:00
if ( dig_port - > base . port ! = PORT_A ) {
2016-02-01 12:02:06 -08:00
DRM_DEBUG_KMS ( " PSR condition failed: Port not supported \n " ) ;
2017-10-12 16:02:01 +03:00
return ;
2014-11-14 08:52:28 -08:00
}
2018-11-21 14:54:38 -08:00
if ( dev_priv - > psr . sink_not_reliable ) {
DRM_DEBUG_KMS ( " PSR sink implementation is not reliable \n " ) ;
return ;
}
2016-10-13 11:03:01 +01:00
if ( IS_HASWELL ( dev_priv ) & &
2016-05-18 11:34:38 +03:00
adjusted_mode - > flags & DRM_MODE_FLAG_INTERLACE ) {
2014-11-14 08:52:28 -08:00
DRM_DEBUG_KMS ( " PSR condition failed: Interlaced is Enabled \n " ) ;
2017-10-12 16:02:01 +03:00
return ;
2014-11-14 08:52:28 -08:00
}
2016-05-18 11:34:38 +03:00
psr_setup_time = drm_dp_psr_setup_time ( intel_dp - > psr_dpcd ) ;
if ( psr_setup_time < 0 ) {
DRM_DEBUG_KMS ( " PSR condition failed: Invalid PSR setup time (0x%02x) \n " ,
intel_dp - > psr_dpcd [ 1 ] ) ;
2017-10-12 16:02:01 +03:00
return ;
2016-05-18 11:34:38 +03:00
}
if ( intel_usecs_to_scanlines ( adjusted_mode , psr_setup_time ) >
adjusted_mode - > crtc_vtotal - adjusted_mode - > crtc_vdisplay - 1 ) {
DRM_DEBUG_KMS ( " PSR condition failed: PSR setup time (%d us) too long \n " ,
psr_setup_time ) ;
2017-10-12 16:02:01 +03:00
return ;
}
crtc_state - > has_psr = true ;
2018-02-27 13:29:13 -08:00
crtc_state - > has_psr2 = intel_psr2_config_valid ( intel_dp , crtc_state ) ;
2014-11-14 08:52:28 -08:00
}
2014-11-19 07:37:00 -08:00
static void intel_psr_activate ( struct intel_dp * intel_dp )
2014-11-14 08:52:28 -08:00
{
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2014-11-14 08:52:28 -08:00
2018-06-26 02:05:22 -07:00
if ( INTEL_GEN ( dev_priv ) > = 9 )
drm/i915/psr: fix blank screen issue for psr2
Psr1 and psr2 are mutually exclusive,ie when psr2 is enabled,
psr1 should be disabled.When psr2 is exited , bit 31 of reg
PSR2_CTL must be set to 0 but currently bit 31 of SRD_CTL
(psr1 control register)is set to 0.
Also ,PSR2_IDLE state is looked up from SRD_STATUS(psr1 register)
instead of PSR2_STATUS register, which has wrong data, resulting
in blankscreen.
hsw_enable_source is split into hsw_enable_source_psr1 and
hsw_enable_source_psr2 for easier code review and maintenance,
as suggested by rodrigo and jim.
v2: (Rodrigo)
- Rename hsw_enable_source_psr* to intel_enable_source_psr*
v3: (Rodrigo)
- In hsw_psr_disable ,
1) for psr active case, handle psr2 followed by psr1.
2) psr inactive case, handle psr2 followed by psr1
v4:(Rodrigo)
- move psr2 restriction(32X20) to match_conditions function
returning false and fully blocking PSR to a new patch before
this one.
v5: in source_psr2, removed val = EDP_PSR_ENABLE
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Jim Bride <jim.bride@linux.intel.com>
Signed-off-by: Vathsala Nagaraju <vathsala.nagaraju@intel.com>
Signed-off-by: Patil Deepti <deepti.patil@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1484244059-9201-1-git-send-email-vathsala.nagaraju@intel.com
2017-01-12 23:30:59 +05:30
WARN_ON ( I915_READ ( EDP_PSR2_CTL ) & EDP_PSR2_ENABLE ) ;
2018-06-26 02:05:22 -07:00
WARN_ON ( I915_READ ( EDP_PSR_CTL ) & EDP_PSR_ENABLE ) ;
2014-11-14 08:52:28 -08:00
WARN_ON ( dev_priv - > psr . active ) ;
lockdep_assert_held ( & dev_priv - > psr . lock ) ;
2018-06-25 22:25:36 -07:00
/* psr1 and psr2 are mutually exclusive.*/
if ( dev_priv - > psr . psr2_enabled )
hsw_activate_psr2 ( intel_dp ) ;
else
hsw_activate_psr1 ( intel_dp ) ;
2014-11-14 08:52:28 -08:00
dev_priv - > psr . active = true ;
}
2018-11-19 20:00:21 +02:00
static i915_reg_t gen9_chicken_trans_reg ( struct drm_i915_private * dev_priv ,
enum transcoder cpu_transcoder )
{
static const i915_reg_t regs [ ] = {
[ TRANSCODER_A ] = CHICKEN_TRANS_A ,
[ TRANSCODER_B ] = CHICKEN_TRANS_B ,
[ TRANSCODER_C ] = CHICKEN_TRANS_C ,
[ TRANSCODER_EDP ] = CHICKEN_TRANS_EDP ,
} ;
WARN_ON ( INTEL_GEN ( dev_priv ) < 9 ) ;
if ( WARN_ON ( cpu_transcoder > = ARRAY_SIZE ( regs ) | |
! regs [ cpu_transcoder ] . reg ) )
cpu_transcoder = TRANSCODER_A ;
return regs [ cpu_transcoder ] ;
}
2018-06-25 22:25:36 -07:00
static void intel_psr_enable_source ( struct intel_dp * intel_dp ,
const struct intel_crtc_state * crtc_state )
2017-09-07 16:00:36 -07:00
{
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2017-09-07 16:00:36 -07:00
enum transcoder cpu_transcoder = crtc_state - > cpu_transcoder ;
2018-10-03 13:50:26 -07:00
u32 mask ;
2017-09-07 16:00:36 -07:00
2018-03-12 20:46:46 -07:00
/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
* use hardcoded values PSR AUX transactions
*/
if ( IS_HASWELL ( dev_priv ) | | IS_BROADWELL ( dev_priv ) )
hsw_psr_setup_aux ( intel_dp ) ;
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-12 10:10:43 -08:00
if ( dev_priv - > psr . psr2_enabled & & ( IS_GEN ( dev_priv , 9 ) & &
2018-12-03 16:33:59 -08:00
! IS_GEMINILAKE ( dev_priv ) ) ) {
2018-11-19 20:00:21 +02:00
i915_reg_t reg = gen9_chicken_trans_reg ( dev_priv ,
cpu_transcoder ) ;
u32 chicken = I915_READ ( reg ) ;
2018-03-28 15:30:41 -07:00
2018-12-03 16:33:59 -08:00
chicken | = PSR2_VSC_ENABLE_PROG_HEADER |
PSR2_ADD_VERTICAL_LINE_COUNT ;
2018-11-19 20:00:21 +02:00
I915_WRITE ( reg , chicken ) ;
2017-09-07 16:00:36 -07:00
}
2018-10-03 13:50:25 -07:00
/*
* Per Spec : Avoid continuous PSR exit by masking MEMUP and HPD also
* mask LPSP to avoid dependency on other drivers that might block
* runtime_pm besides preventing other hw tracking issues now we
* can rely on frontbuffer tracking .
*/
2018-10-03 13:50:26 -07:00
mask = EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD |
EDP_PSR_DEBUG_MASK_LPSP |
EDP_PSR_DEBUG_MASK_MAX_SLEEP ;
if ( INTEL_GEN ( dev_priv ) < 11 )
mask | = EDP_PSR_DEBUG_MASK_DISP_REG_WRITE ;
I915_WRITE ( EDP_PSR_DEBUG , mask ) ;
2017-09-07 16:00:36 -07:00
}
2018-08-09 16:21:01 +02:00
static void intel_psr_enable_locked ( struct drm_i915_private * dev_priv ,
const struct intel_crtc_state * crtc_state )
{
struct intel_dp * intel_dp = dev_priv - > psr . dp ;
2019-02-06 13:18:45 -08:00
WARN_ON ( dev_priv - > psr . enabled ) ;
dev_priv - > psr . psr2_enabled = intel_psr2_enabled ( dev_priv , crtc_state ) ;
dev_priv - > psr . busy_frontbuffer_bits = 0 ;
dev_priv - > psr . pipe = to_intel_crtc ( crtc_state - > base . crtc ) - > pipe ;
2018-08-09 16:21:01 +02:00
DRM_DEBUG_KMS ( " Enabling PSR%s \n " ,
dev_priv - > psr . psr2_enabled ? " 2 " : " 1 " ) ;
intel_psr_setup_vsc ( intel_dp , crtc_state ) ;
intel_psr_enable_sink ( intel_dp ) ;
intel_psr_enable_source ( intel_dp , crtc_state ) ;
dev_priv - > psr . enabled = true ;
intel_psr_activate ( intel_dp ) ;
}
2014-11-14 08:52:29 -08:00
/**
* intel_psr_enable - Enable PSR
* @ intel_dp : Intel DP
2017-08-18 16:49:56 +03:00
* @ crtc_state : new CRTC state
2014-11-14 08:52:29 -08:00
*
* This function can only be called after the pipe is fully trained and enabled .
*/
2017-08-18 16:49:56 +03:00
void intel_psr_enable ( struct intel_dp * intel_dp ,
const struct intel_crtc_state * crtc_state )
2014-11-14 08:52:28 -08:00
{
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2014-11-14 08:52:28 -08:00
2017-10-12 16:02:01 +03:00
if ( ! crtc_state - > has_psr )
2014-11-14 08:52:28 -08:00
return ;
2018-01-03 13:38:24 -08:00
if ( WARN_ON ( ! CAN_PSR ( dev_priv ) ) )
return ;
2017-09-14 11:16:41 -07:00
WARN_ON ( dev_priv - > drrs . dp ) ;
2018-08-09 16:21:01 +02:00
2014-11-14 08:52:28 -08:00
mutex_lock ( & dev_priv - > psr . lock ) ;
2019-02-06 13:18:45 -08:00
if ( ! psr_global_enabled ( dev_priv - > psr . debug ) ) {
DRM_DEBUG_KMS ( " PSR disabled by flag \n " ) ;
2014-11-14 08:52:28 -08:00
goto unlock ;
}
2019-02-06 13:18:45 -08:00
intel_psr_enable_locked ( dev_priv , crtc_state ) ;
2015-11-11 11:37:07 -08:00
2014-11-14 08:52:28 -08:00
unlock :
mutex_unlock ( & dev_priv - > psr . lock ) ;
}
2018-11-06 11:08:40 -08:00
static void intel_psr_exit ( struct drm_i915_private * dev_priv )
{
u32 val ;
2018-11-06 11:08:41 -08:00
if ( ! dev_priv - > psr . active ) {
if ( INTEL_GEN ( dev_priv ) > = 9 )
WARN_ON ( I915_READ ( EDP_PSR2_CTL ) & EDP_PSR2_ENABLE ) ;
WARN_ON ( I915_READ ( EDP_PSR_CTL ) & EDP_PSR_ENABLE ) ;
2018-11-06 11:08:40 -08:00
return ;
2018-11-06 11:08:41 -08:00
}
2018-11-06 11:08:40 -08:00
if ( dev_priv - > psr . psr2_enabled ) {
val = I915_READ ( EDP_PSR2_CTL ) ;
WARN_ON ( ! ( val & EDP_PSR2_ENABLE ) ) ;
I915_WRITE ( EDP_PSR2_CTL , val & ~ EDP_PSR2_ENABLE ) ;
} else {
val = I915_READ ( EDP_PSR_CTL ) ;
WARN_ON ( ! ( val & EDP_PSR_ENABLE ) ) ;
I915_WRITE ( EDP_PSR_CTL , val & ~ EDP_PSR_ENABLE ) ;
}
dev_priv - > psr . active = false ;
}
2018-11-06 11:08:43 -08:00
static void intel_psr_disable_locked ( struct intel_dp * intel_dp )
2014-11-19 07:37:00 -08:00
{
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2018-11-06 11:08:41 -08:00
i915_reg_t psr_status ;
u32 psr_status_mask ;
2014-11-14 08:52:28 -08:00
2018-11-06 11:08:43 -08:00
lockdep_assert_held ( & dev_priv - > psr . lock ) ;
if ( ! dev_priv - > psr . enabled )
return ;
DRM_DEBUG_KMS ( " Disabling PSR%s \n " ,
dev_priv - > psr . psr2_enabled ? " 2 " : " 1 " ) ;
2018-11-06 11:08:41 -08:00
intel_psr_exit ( dev_priv ) ;
2017-01-16 13:06:22 +00:00
2018-11-06 11:08:41 -08:00
if ( dev_priv - > psr . psr2_enabled ) {
psr_status = EDP_PSR2_STATUS ;
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK ;
2014-11-14 08:52:28 -08:00
} else {
2018-11-06 11:08:41 -08:00
psr_status = EDP_PSR_STATUS ;
psr_status_mask = EDP_PSR_STATUS_STATE_MASK ;
2014-11-14 08:52:28 -08:00
}
2018-11-06 11:08:41 -08:00
/* Wait till PSR is idle */
if ( intel_wait_for_register ( dev_priv , psr_status , psr_status_mask , 0 ,
2000 ) )
DRM_ERROR ( " Timed out waiting PSR idle state \n " ) ;
2018-06-26 13:16:41 -07:00
/* Disable PSR on Sink */
drm_dp_dpcd_writeb ( & intel_dp - > aux , DP_PSR_EN_CFG , 0 ) ;
2018-08-09 16:21:01 +02:00
dev_priv - > psr . enabled = false ;
2018-06-26 13:16:41 -07:00
}
2014-11-19 07:37:00 -08:00
/**
* intel_psr_disable - Disable PSR
* @ intel_dp : Intel DP
2017-08-18 16:49:56 +03:00
* @ old_crtc_state : old CRTC state
2014-11-19 07:37:00 -08:00
*
* This function needs to be called before disabling pipe .
*/
2017-08-18 16:49:56 +03:00
void intel_psr_disable ( struct intel_dp * intel_dp ,
const struct intel_crtc_state * old_crtc_state )
2014-11-19 07:37:00 -08:00
{
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2014-11-19 07:37:00 -08:00
2017-10-12 16:02:01 +03:00
if ( ! old_crtc_state - > has_psr )
2017-09-07 16:00:31 -07:00
return ;
2018-01-03 13:38:24 -08:00
if ( WARN_ON ( ! CAN_PSR ( dev_priv ) ) )
return ;
2014-11-19 07:37:00 -08:00
mutex_lock ( & dev_priv - > psr . lock ) ;
2018-08-09 16:21:01 +02:00
2018-06-26 13:16:41 -07:00
intel_psr_disable_locked ( intel_dp ) ;
2018-08-09 16:21:01 +02:00
2014-11-14 08:52:28 -08:00
mutex_unlock ( & dev_priv - > psr . lock ) ;
2018-06-18 15:02:07 -07:00
cancel_work_sync ( & dev_priv - > psr . work ) ;
2014-11-14 08:52:28 -08:00
}
2019-03-07 16:00:49 -08:00
static void psr_force_hw_tracking_exit ( struct drm_i915_private * dev_priv )
{
/*
* Display WA # 0884 : all
* This documented WA for bxt can be safely applied
* broadly so we can force HW tracking to exit PSR
* instead of disabling and re - enabling .
* Workaround tells us to write 0 to CUR_SURFLIVE_A ,
* but it makes more sense write to the current active
* pipe .
*/
I915_WRITE ( CURSURFLIVE ( dev_priv - > psr . pipe ) , 0 ) ;
}
2019-02-06 13:18:45 -08:00
/**
* intel_psr_update - Update PSR state
* @ intel_dp : Intel DP
* @ crtc_state : new CRTC state
*
* This functions will update PSR states , disabling , enabling or switching PSR
* version when executing fastsets . For full modeset , intel_psr_disable ( ) and
* intel_psr_enable ( ) should be called instead .
*/
void intel_psr_update ( struct intel_dp * intel_dp ,
const struct intel_crtc_state * crtc_state )
{
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
struct i915_psr * psr = & dev_priv - > psr ;
bool enable , psr2_enable ;
if ( ! CAN_PSR ( dev_priv ) | | READ_ONCE ( psr - > dp ) ! = intel_dp )
return ;
mutex_lock ( & dev_priv - > psr . lock ) ;
enable = crtc_state - > has_psr & & psr_global_enabled ( psr - > debug ) ;
psr2_enable = intel_psr2_enabled ( dev_priv , crtc_state ) ;
2019-03-07 16:00:49 -08:00
if ( enable = = psr - > enabled & & psr2_enable = = psr - > psr2_enabled ) {
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if ( crtc_state - > crc_enabled & & psr - > enabled )
psr_force_hw_tracking_exit ( dev_priv ) ;
2019-02-06 13:18:45 -08:00
goto unlock ;
2019-03-07 16:00:49 -08:00
}
2019-02-06 13:18:45 -08:00
2019-03-07 16:00:48 -08:00
if ( psr - > enabled )
intel_psr_disable_locked ( intel_dp ) ;
2019-02-06 13:18:45 -08:00
2019-03-07 16:00:48 -08:00
if ( enable )
intel_psr_enable_locked ( dev_priv , crtc_state ) ;
2019-02-06 13:18:45 -08:00
unlock :
mutex_unlock ( & dev_priv - > psr . lock ) ;
}
2018-08-24 16:08:44 -07:00
/**
* intel_psr_wait_for_idle - wait for PSR1 to idle
* @ new_crtc_state : new CRTC state
* @ out_value : PSR status in case of failure
*
* This function is expected to be called from pipe_update_start ( ) where it is
* not expected to race with PSR enable or disable .
*
* Returns : 0 on success or - ETIMEOUT if PSR status does not idle .
*/
2018-08-21 15:11:54 -07:00
int intel_psr_wait_for_idle ( const struct intel_crtc_state * new_crtc_state ,
u32 * out_value )
2018-06-27 13:02:49 -07:00
{
2018-07-11 22:33:23 -07:00
struct intel_crtc * crtc = to_intel_crtc ( new_crtc_state - > base . crtc ) ;
struct drm_i915_private * dev_priv = to_i915 ( crtc - > base . dev ) ;
2018-06-27 13:02:49 -07:00
2018-08-09 16:21:01 +02:00
if ( ! dev_priv - > psr . enabled | | ! new_crtc_state - > has_psr )
2018-07-11 22:33:23 -07:00
return 0 ;
2018-08-24 16:08:43 -07:00
/* FIXME: Update this for PSR2 if we need to wait for idle */
if ( READ_ONCE ( dev_priv - > psr . psr2_enabled ) )
return 0 ;
2018-06-27 13:02:49 -07:00
/*
2018-08-24 16:08:44 -07:00
* From bspec : Panel Self Refresh ( BDW + )
* Max . time for PSR to idle = Inverse of the refresh rate + 6 ms of
* exit training time + 1.5 ms of aux channel handshake . 50 ms is
* defensive enough to cover everything .
2018-06-27 13:02:49 -07:00
*/
2018-08-21 15:11:54 -07:00
2018-08-24 16:08:43 -07:00
return __intel_wait_for_register ( dev_priv , EDP_PSR_STATUS ,
EDP_PSR_STATUS_STATE_MASK ,
2018-08-21 15:11:54 -07:00
EDP_PSR_STATUS_STATE_IDLE , 2 , 50 ,
out_value ) ;
2018-06-27 13:02:49 -07:00
}
static bool __psr_wait_for_idle_locked ( struct drm_i915_private * dev_priv )
2014-11-14 08:52:28 -08:00
{
2018-04-05 12:49:15 +01:00
i915_reg_t reg ;
u32 mask ;
int err ;
2018-08-09 16:21:01 +02:00
if ( ! dev_priv - > psr . enabled )
2018-04-05 12:49:15 +01:00
return false ;
2014-11-14 08:52:28 -08:00
2018-05-11 16:00:59 -07:00
if ( dev_priv - > psr . psr2_enabled ) {
reg = EDP_PSR2_STATUS ;
mask = EDP_PSR2_STATUS_STATE_MASK ;
2014-11-19 07:37:47 -08:00
} else {
2018-05-11 16:00:59 -07:00
reg = EDP_PSR_STATUS ;
mask = EDP_PSR_STATUS_STATE_MASK ;
2014-11-14 08:52:28 -08:00
}
2018-04-05 12:49:15 +01:00
mutex_unlock ( & dev_priv - > psr . lock ) ;
err = intel_wait_for_register ( dev_priv , reg , mask , 0 , 50 ) ;
if ( err )
DRM_ERROR ( " Timed out waiting for PSR Idle for re-enable \n " ) ;
/* After the unlocked wait, verify that PSR is still wanted! */
2014-11-14 08:52:28 -08:00
mutex_lock ( & dev_priv - > psr . lock ) ;
2018-04-05 12:49:15 +01:00
return err = = 0 & & dev_priv - > psr . enabled ;
}
2014-11-14 08:52:28 -08:00
2019-02-06 13:18:45 -08:00
static int intel_psr_fastset_force ( struct drm_i915_private * dev_priv )
2018-08-08 16:19:11 +02:00
{
2019-02-06 13:18:45 -08:00
struct drm_device * dev = & dev_priv - > drm ;
struct drm_modeset_acquire_ctx ctx ;
struct drm_atomic_state * state ;
struct drm_crtc * crtc ;
int err ;
2018-08-08 16:19:11 +02:00
2019-02-06 13:18:45 -08:00
state = drm_atomic_state_alloc ( dev ) ;
if ( ! state )
return - ENOMEM ;
2018-08-08 16:19:11 +02:00
2019-02-06 13:18:45 -08:00
drm_modeset_acquire_init ( & ctx , DRM_MODESET_ACQUIRE_INTERRUPTIBLE ) ;
state - > acquire_ctx = & ctx ;
retry :
drm_for_each_crtc ( crtc , dev ) {
struct drm_crtc_state * crtc_state ;
struct intel_crtc_state * intel_crtc_state ;
crtc_state = drm_atomic_get_crtc_state ( state , crtc ) ;
if ( IS_ERR ( crtc_state ) ) {
err = PTR_ERR ( crtc_state ) ;
goto error ;
}
intel_crtc_state = to_intel_crtc_state ( crtc_state ) ;
2019-03-07 16:00:45 -08:00
if ( crtc_state - > active & & intel_crtc_state - > has_psr ) {
2019-02-06 13:18:45 -08:00
/* Mark mode as changed to trigger a pipe->update() */
crtc_state - > mode_changed = true ;
break ;
}
}
err = drm_atomic_commit ( state ) ;
2018-08-08 16:19:11 +02:00
2019-02-06 13:18:45 -08:00
error :
if ( err = = - EDEADLK ) {
drm_atomic_state_clear ( state ) ;
err = drm_modeset_backoff ( & ctx ) ;
if ( ! err )
goto retry ;
}
drm_modeset_drop_locks ( & ctx ) ;
drm_modeset_acquire_fini ( & ctx ) ;
drm_atomic_state_put ( state ) ;
return err ;
2018-08-08 16:19:11 +02:00
}
2019-02-06 13:18:45 -08:00
int intel_psr_debug_set ( struct drm_i915_private * dev_priv , u64 val )
2018-08-09 16:21:01 +02:00
{
2019-02-06 13:18:45 -08:00
const u32 mode = val & I915_PSR_DEBUG_MODE_MASK ;
u32 old_mode ;
2018-08-09 16:21:01 +02:00
int ret ;
if ( val & ~ ( I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK ) | |
2018-08-08 16:19:11 +02:00
mode > I915_PSR_DEBUG_FORCE_PSR1 ) {
2018-08-09 16:21:01 +02:00
DRM_DEBUG_KMS ( " Invalid debug mask %llx \n " , val ) ;
return - EINVAL ;
}
ret = mutex_lock_interruptible ( & dev_priv - > psr . lock ) ;
if ( ret )
return ret ;
2019-02-06 13:18:45 -08:00
old_mode = dev_priv - > psr . debug & I915_PSR_DEBUG_MODE_MASK ;
2018-08-09 16:21:01 +02:00
dev_priv - > psr . debug = val ;
2018-08-21 15:11:56 -07:00
intel_psr_irq_control ( dev_priv , dev_priv - > psr . debug ) ;
2018-08-09 16:21:01 +02:00
mutex_unlock ( & dev_priv - > psr . lock ) ;
2019-02-06 13:18:45 -08:00
if ( old_mode ! = mode )
ret = intel_psr_fastset_force ( dev_priv ) ;
2018-08-09 16:21:01 +02:00
return ret ;
}
2018-11-21 14:54:39 -08:00
static void intel_psr_handle_irq ( struct drm_i915_private * dev_priv )
{
struct i915_psr * psr = & dev_priv - > psr ;
intel_psr_disable_locked ( psr - > dp ) ;
psr - > sink_not_reliable = true ;
/* let's make sure that sink is awaken */
drm_dp_dpcd_writeb ( & psr - > dp - > aux , DP_SET_POWER , DP_SET_POWER_D0 ) ;
}
2018-04-05 12:49:15 +01:00
static void intel_psr_work ( struct work_struct * work )
{
struct drm_i915_private * dev_priv =
2018-06-13 12:26:00 -07:00
container_of ( work , typeof ( * dev_priv ) , psr . work ) ;
2018-04-05 12:49:15 +01:00
mutex_lock ( & dev_priv - > psr . lock ) ;
2018-06-13 12:26:00 -07:00
if ( ! dev_priv - > psr . enabled )
goto unlock ;
2018-11-21 14:54:39 -08:00
if ( READ_ONCE ( dev_priv - > psr . irq_aux_error ) )
intel_psr_handle_irq ( dev_priv ) ;
2018-04-05 12:49:15 +01:00
/*
* We have to make sure PSR is ready for re - enable
* otherwise it keeps disabled until next full enable / disable cycle .
* PSR might take some time to get fully disabled
* and be ready for re - enable .
*/
2018-06-27 13:02:49 -07:00
if ( ! __psr_wait_for_idle_locked ( dev_priv ) )
2014-11-14 08:52:28 -08:00
goto unlock ;
/*
* The delayed work can race with an invalidate hence we need to
* recheck . Since psr_flush first clears this and then reschedules we
* won ' t ever miss a flush when bailing out here .
*/
2018-06-24 22:47:40 -07:00
if ( dev_priv - > psr . busy_frontbuffer_bits | | dev_priv - > psr . active )
2014-11-14 08:52:28 -08:00
goto unlock ;
2018-08-09 16:21:01 +02:00
intel_psr_activate ( dev_priv - > psr . dp ) ;
2014-11-14 08:52:28 -08:00
unlock :
mutex_unlock ( & dev_priv - > psr . lock ) ;
}
2014-11-14 08:52:29 -08:00
/**
* intel_psr_invalidate - Invalidade PSR
2016-08-04 16:32:38 +01:00
* @ dev_priv : i915 device
2014-11-14 08:52:29 -08:00
* @ frontbuffer_bits : frontbuffer plane tracking bits
2018-03-06 19:34:20 -08:00
* @ origin : which operation caused the invalidate
2014-11-14 08:52:29 -08:00
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking . This function gets called every
* time frontbuffer rendering starts and a buffer gets dirtied . PSR must be
* disabled if the frontbuffer mask contains a buffer relevant to PSR .
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits . "
*/
2016-08-04 16:32:38 +01:00
void intel_psr_invalidate ( struct drm_i915_private * dev_priv ,
2018-03-06 19:34:20 -08:00
unsigned frontbuffer_bits , enum fb_op_origin origin )
2014-11-14 08:52:28 -08:00
{
2018-01-03 13:38:23 -08:00
if ( ! CAN_PSR ( dev_priv ) )
2017-09-07 16:00:31 -07:00
return ;
2018-05-11 16:00:59 -07:00
if ( origin = = ORIGIN_FLIP )
2018-03-06 19:34:20 -08:00
return ;
2014-11-14 08:52:28 -08:00
mutex_lock ( & dev_priv - > psr . lock ) ;
if ( ! dev_priv - > psr . enabled ) {
mutex_unlock ( & dev_priv - > psr . lock ) ;
return ;
}
2018-11-27 23:28:38 -08:00
frontbuffer_bits & = INTEL_FRONTBUFFER_ALL_MASK ( dev_priv - > psr . pipe ) ;
2014-11-14 08:52:28 -08:00
dev_priv - > psr . busy_frontbuffer_bits | = frontbuffer_bits ;
2015-06-18 10:30:26 +02:00
if ( frontbuffer_bits )
2016-08-04 16:32:38 +01:00
intel_psr_exit ( dev_priv ) ;
2015-06-18 10:30:26 +02:00
2014-11-14 08:52:28 -08:00
mutex_unlock ( & dev_priv - > psr . lock ) ;
}
2014-11-14 08:52:29 -08:00
/**
* intel_psr_flush - Flush PSR
2016-08-04 16:32:38 +01:00
* @ dev_priv : i915 device
2014-11-14 08:52:29 -08:00
* @ frontbuffer_bits : frontbuffer plane tracking bits
2015-07-08 16:21:31 -07:00
* @ origin : which operation caused the flush
2014-11-14 08:52:29 -08:00
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking . This function gets called every
* time frontbuffer rendering has completed and flushed out to memory . PSR
* can be enabled again if no other frontbuffer relevant to PSR is dirty .
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits .
*/
2016-08-04 16:32:38 +01:00
void intel_psr_flush ( struct drm_i915_private * dev_priv ,
2015-07-08 16:21:31 -07:00
unsigned frontbuffer_bits , enum fb_op_origin origin )
2014-11-14 08:52:28 -08:00
{
2018-01-03 13:38:23 -08:00
if ( ! CAN_PSR ( dev_priv ) )
2017-09-07 16:00:31 -07:00
return ;
2018-05-11 16:00:59 -07:00
if ( origin = = ORIGIN_FLIP )
2018-03-06 19:34:20 -08:00
return ;
2014-11-14 08:52:28 -08:00
mutex_lock ( & dev_priv - > psr . lock ) ;
if ( ! dev_priv - > psr . enabled ) {
mutex_unlock ( & dev_priv - > psr . lock ) ;
return ;
}
2018-11-27 23:28:38 -08:00
frontbuffer_bits & = INTEL_FRONTBUFFER_ALL_MASK ( dev_priv - > psr . pipe ) ;
2014-11-14 08:52:28 -08:00
dev_priv - > psr . busy_frontbuffer_bits & = ~ frontbuffer_bits ;
2015-11-18 11:21:12 -08:00
/* By definition flush = invalidate + flush */
2019-03-07 16:00:49 -08:00
if ( frontbuffer_bits )
psr_force_hw_tracking_exit ( dev_priv ) ;
2014-11-19 07:37:47 -08:00
2014-11-14 08:52:28 -08:00
if ( ! dev_priv - > psr . active & & ! dev_priv - > psr . busy_frontbuffer_bits )
2018-06-13 12:26:00 -07:00
schedule_work ( & dev_priv - > psr . work ) ;
2014-11-14 08:52:28 -08:00
mutex_unlock ( & dev_priv - > psr . lock ) ;
}
2014-11-14 08:52:29 -08:00
/**
* intel_psr_init - Init basic PSR work and mutex .
2016-11-29 13:48:47 +02:00
* @ dev_priv : i915 device private
2014-11-14 08:52:29 -08:00
*
* This function is called only once at driver load to initialize basic
* PSR stuff .
*/
2016-11-23 16:21:44 +02:00
void intel_psr_init ( struct drm_i915_private * dev_priv )
2014-11-14 08:52:28 -08:00
{
2018-11-21 14:54:40 -08:00
u32 val ;
2017-09-07 16:00:31 -07:00
if ( ! HAS_PSR ( dev_priv ) )
return ;
2015-11-11 20:34:15 +02:00
dev_priv - > psr_mmio_base = IS_HASWELL ( dev_priv ) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE ;
2018-01-03 13:38:24 -08:00
if ( ! dev_priv - > psr . sink_support )
return ;
2018-09-27 23:11:17 -07:00
if ( i915_modparams . enable_psr = = - 1 )
if ( INTEL_GEN ( dev_priv ) < 9 | | ! dev_priv - > vbt . psr . enable )
i915_modparams . enable_psr = 0 ;
2016-02-12 04:08:11 -08:00
2018-11-21 14:54:40 -08:00
/*
* If a PSR error happened and the driver is reloaded , the EDP_PSR_IIR
* will still keep the error set even after the reset done in the
* irq_preinstall and irq_uninstall hooks .
* And enabling in this situation cause the screen to freeze in the
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems .
*/
val = I915_READ ( EDP_PSR_IIR ) ;
val & = EDP_PSR_ERROR ( edp_psr_shift ( TRANSCODER_EDP ) ) ;
if ( val ) {
DRM_DEBUG_KMS ( " PSR interruption error set \n " ) ;
dev_priv - > psr . sink_not_reliable = true ;
return ;
}
2016-02-01 12:02:08 -08:00
/* Set link_standby x link_off defaults */
2016-10-13 11:03:00 +01:00
if ( IS_HASWELL ( dev_priv ) | | IS_BROADWELL ( dev_priv ) )
2016-02-01 12:02:07 -08:00
/* HSW and BDW require workarounds that we don't implement. */
dev_priv - > psr . link_standby = false ;
else
/* For new platforms let's respect VBT back again */
dev_priv - > psr . link_standby = dev_priv - > vbt . psr . full_link ;
2018-06-13 12:26:00 -07:00
INIT_WORK ( & dev_priv - > psr . work , intel_psr_work ) ;
2014-11-14 08:52:28 -08:00
mutex_init ( & dev_priv - > psr . lock ) ;
}
2018-06-26 13:16:41 -07:00
void intel_psr_short_pulse ( struct intel_dp * intel_dp )
{
2018-08-27 15:30:21 -07:00
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
2018-06-26 13:16:41 -07:00
struct i915_psr * psr = & dev_priv - > psr ;
u8 val ;
2018-06-26 13:16:42 -07:00
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2018-06-26 13:16:44 -07:00
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
DP_PSR_LINK_CRC_ERROR ;
2018-06-26 13:16:41 -07:00
if ( ! CAN_PSR ( dev_priv ) | | ! intel_dp_is_edp ( intel_dp ) )
return ;
mutex_lock ( & psr - > lock ) ;
2018-08-09 16:21:01 +02:00
if ( ! psr - > enabled | | psr - > dp ! = intel_dp )
2018-06-26 13:16:41 -07:00
goto exit ;
if ( drm_dp_dpcd_readb ( & intel_dp - > aux , DP_PSR_STATUS , & val ) ! = 1 ) {
DRM_ERROR ( " PSR_STATUS dpcd read failed \n " ) ;
goto exit ;
}
if ( ( val & DP_PSR_SINK_STATE_MASK ) = = DP_PSR_SINK_INTERNAL_ERROR ) {
DRM_DEBUG_KMS ( " PSR sink internal error, disabling PSR \n " ) ;
intel_psr_disable_locked ( intel_dp ) ;
2018-11-21 14:54:38 -08:00
psr - > sink_not_reliable = true ;
2018-06-26 13:16:41 -07:00
}
2018-06-26 13:16:42 -07:00
if ( drm_dp_dpcd_readb ( & intel_dp - > aux , DP_PSR_ERROR_STATUS , & val ) ! = 1 ) {
DRM_ERROR ( " PSR_ERROR_STATUS dpcd read failed \n " ) ;
goto exit ;
}
if ( val & DP_PSR_RFB_STORAGE_ERROR )
DRM_DEBUG_KMS ( " PSR RFB storage error, disabling PSR \n " ) ;
if ( val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR )
DRM_DEBUG_KMS ( " PSR VSC SDP uncorrectable error, disabling PSR \n " ) ;
2018-06-26 13:16:44 -07:00
if ( val & DP_PSR_LINK_CRC_ERROR )
DRM_ERROR ( " PSR Link CRC error, disabling PSR \n " ) ;
2018-06-26 13:16:42 -07:00
if ( val & ~ errors )
DRM_ERROR ( " PSR_ERROR_STATUS unhandled errors %x \n " ,
val & ~ errors ) ;
2018-11-21 14:54:38 -08:00
if ( val & errors ) {
2018-06-26 13:16:42 -07:00
intel_psr_disable_locked ( intel_dp ) ;
2018-11-21 14:54:38 -08:00
psr - > sink_not_reliable = true ;
}
2018-06-26 13:16:42 -07:00
/* clear status register */
drm_dp_dpcd_writeb ( & intel_dp - > aux , DP_PSR_ERROR_STATUS , val ) ;
2018-06-26 13:16:41 -07:00
exit :
mutex_unlock ( & psr - > lock ) ;
}
2018-11-21 14:54:37 -08:00
bool intel_psr_enabled ( struct intel_dp * intel_dp )
{
struct drm_i915_private * dev_priv = dp_to_i915 ( intel_dp ) ;
bool ret ;
if ( ! CAN_PSR ( dev_priv ) | | ! intel_dp_is_edp ( intel_dp ) )
return false ;
mutex_lock ( & dev_priv - > psr . lock ) ;
ret = ( dev_priv - > psr . dp = = intel_dp & & dev_priv - > psr . enabled ) ;
mutex_unlock ( & dev_priv - > psr . lock ) ;
return ret ;
}