2014-09-30 10:56:38 +02:00
/*
* Copyright © 2012 - 2014 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
* Authors :
* Eugeni Dodonov < eugeni . dodonov @ intel . com >
* Daniel Vetter < daniel . vetter @ ffwll . ch >
*
*/
# include <linux/pm_runtime.h>
2019-01-14 14:21:09 +00:00
# include <drm/drm_print.h>
2014-09-30 10:56:38 +02:00
# include "i915_drv.h"
2019-08-06 13:07:28 +03:00
# include "i915_trace.h"
2014-09-30 10:56:38 +02:00
2014-09-30 10:56:42 +02:00
/**
* DOC : runtime pm
*
* The i915 driver supports dynamic enabling and disabling of entire hardware
* blocks at runtime . This is especially important on the display side where
* software is supposed to control many power gates manually on recent hardware ,
* since on the GT side a lot of the power management is done by the hardware .
* But even there some manual control at the device level is required .
*
* Since i915 supports a diverse set of platforms with a unified codebase and
* hardware engineers just love to shuffle functionality around between power
* domains there ' s a sizeable amount of indirection required . This file provides
* generic functions to the driver for grabbing and releasing references for
* abstract power domains . It then maps those to the actual power wells
* present for a given platform .
*/
2019-01-14 14:21:09 +00:00
# if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
# include <linux/sort.h>
# define STACKDEPTH 8
static noinline depot_stack_handle_t __save_depot_stack ( void )
{
unsigned long entries [ STACKDEPTH ] ;
2019-04-25 11:45:09 +02:00
unsigned int n ;
2019-01-14 14:21:09 +00:00
2019-04-25 11:45:09 +02:00
n = stack_trace_save ( entries , ARRAY_SIZE ( entries ) , 1 ) ;
return stack_depot_save ( entries , n , GFP_NOWAIT | __GFP_NOWARN ) ;
2019-01-14 14:21:09 +00:00
}
static void __print_depot_stack ( depot_stack_handle_t stack ,
char * buf , int sz , int indent )
{
2019-04-25 11:45:09 +02:00
unsigned long * entries ;
unsigned int nr_entries ;
2019-01-14 14:21:09 +00:00
2019-04-25 11:45:09 +02:00
nr_entries = stack_depot_fetch ( stack , & entries ) ;
stack_trace_snprint ( buf , sz , entries , nr_entries , indent ) ;
2019-01-14 14:21:09 +00:00
}
2019-06-13 16:21:52 -07:00
static void init_intel_runtime_pm_wakeref ( struct intel_runtime_pm * rpm )
2019-01-14 14:21:09 +00:00
{
spin_lock_init ( & rpm - > debug . lock ) ;
}
2019-01-14 14:21:10 +00:00
static noinline depot_stack_handle_t
2019-06-13 16:21:52 -07:00
track_intel_runtime_pm_wakeref ( struct intel_runtime_pm * rpm )
2019-01-14 14:21:09 +00:00
{
depot_stack_handle_t stack , * stacks ;
unsigned long flags ;
2019-06-13 16:21:49 -07:00
if ( ! rpm - > available )
2019-01-14 14:21:10 +00:00
return - 1 ;
2019-01-14 14:21:09 +00:00
stack = __save_depot_stack ( ) ;
if ( ! stack )
2019-01-14 14:21:10 +00:00
return - 1 ;
2019-01-14 14:21:09 +00:00
spin_lock_irqsave ( & rpm - > debug . lock , flags ) ;
if ( ! rpm - > debug . count )
rpm - > debug . last_acquire = stack ;
stacks = krealloc ( rpm - > debug . owners ,
( rpm - > debug . count + 1 ) * sizeof ( * stacks ) ,
GFP_NOWAIT | __GFP_NOWARN ) ;
if ( stacks ) {
stacks [ rpm - > debug . count + + ] = stack ;
rpm - > debug . owners = stacks ;
2019-01-14 14:21:10 +00:00
} else {
stack = - 1 ;
2019-01-14 14:21:09 +00:00
}
spin_unlock_irqrestore ( & rpm - > debug . lock , flags ) ;
2019-01-14 14:21:10 +00:00
return stack ;
}
2019-06-13 16:21:52 -07:00
static void untrack_intel_runtime_pm_wakeref ( struct intel_runtime_pm * rpm ,
2019-05-09 20:34:36 +03:00
depot_stack_handle_t stack )
2019-01-14 14:21:10 +00:00
{
2020-05-04 23:46:00 +05:30
struct drm_i915_private * i915 = container_of ( rpm ,
struct drm_i915_private ,
runtime_pm ) ;
2019-01-14 14:21:10 +00:00
unsigned long flags , n ;
bool found = false ;
if ( unlikely ( stack = = - 1 ) )
return ;
spin_lock_irqsave ( & rpm - > debug . lock , flags ) ;
for ( n = rpm - > debug . count ; n - - ; ) {
if ( rpm - > debug . owners [ n ] = = stack ) {
memmove ( rpm - > debug . owners + n ,
rpm - > debug . owners + n + 1 ,
( - - rpm - > debug . count - n ) * sizeof ( stack ) ) ;
found = true ;
break ;
}
}
spin_unlock_irqrestore ( & rpm - > debug . lock , flags ) ;
2020-05-04 23:46:00 +05:30
if ( drm_WARN ( & i915 - > drm , ! found ,
" Unmatched wakeref (tracking %lu), count %u \n " ,
rpm - > debug . count , atomic_read ( & rpm - > wakeref_count ) ) ) {
2019-01-14 14:21:10 +00:00
char * buf ;
2019-04-09 18:41:08 +01:00
buf = kmalloc ( PAGE_SIZE , GFP_NOWAIT | __GFP_NOWARN ) ;
2019-01-14 14:21:10 +00:00
if ( ! buf )
return ;
__print_depot_stack ( stack , buf , PAGE_SIZE , 2 ) ;
DRM_DEBUG_DRIVER ( " wakeref %x from \n %s " , stack , buf ) ;
stack = READ_ONCE ( rpm - > debug . last_release ) ;
if ( stack ) {
__print_depot_stack ( stack , buf , PAGE_SIZE , 2 ) ;
DRM_DEBUG_DRIVER ( " wakeref last released at \n %s " , buf ) ;
}
kfree ( buf ) ;
}
2019-01-14 14:21:09 +00:00
}
static int cmphandle ( const void * _a , const void * _b )
{
const depot_stack_handle_t * const a = _a , * const b = _b ;
if ( * a < * b )
return - 1 ;
else if ( * a > * b )
return 1 ;
else
return 0 ;
}
static void
__print_intel_runtime_pm_wakeref ( struct drm_printer * p ,
const struct intel_runtime_pm_debug * dbg )
{
unsigned long i ;
char * buf ;
2019-04-09 18:41:08 +01:00
buf = kmalloc ( PAGE_SIZE , GFP_NOWAIT | __GFP_NOWARN ) ;
2019-01-14 14:21:09 +00:00
if ( ! buf )
return ;
if ( dbg - > last_acquire ) {
__print_depot_stack ( dbg - > last_acquire , buf , PAGE_SIZE , 2 ) ;
drm_printf ( p , " Wakeref last acquired: \n %s " , buf ) ;
}
if ( dbg - > last_release ) {
__print_depot_stack ( dbg - > last_release , buf , PAGE_SIZE , 2 ) ;
drm_printf ( p , " Wakeref last released: \n %s " , buf ) ;
}
drm_printf ( p , " Wakeref count: %lu \n " , dbg - > count ) ;
sort ( dbg - > owners , dbg - > count , sizeof ( * dbg - > owners ) , cmphandle , NULL ) ;
for ( i = 0 ; i < dbg - > count ; i + + ) {
depot_stack_handle_t stack = dbg - > owners [ i ] ;
unsigned long rep ;
rep = 1 ;
while ( i + 1 < dbg - > count & & dbg - > owners [ i + 1 ] = = stack )
rep + + , i + + ;
__print_depot_stack ( stack , buf , PAGE_SIZE , 2 ) ;
drm_printf ( p , " Wakeref x%lu taken at: \n %s " , rep , buf ) ;
}
kfree ( buf ) ;
}
static noinline void
2019-05-09 20:34:37 +03:00
__untrack_all_wakerefs ( struct intel_runtime_pm_debug * debug ,
struct intel_runtime_pm_debug * saved )
{
* saved = * debug ;
debug - > owners = NULL ;
debug - > count = 0 ;
debug - > last_release = __save_depot_stack ( ) ;
}
static void
dump_and_free_wakeref_tracking ( struct intel_runtime_pm_debug * debug )
{
2019-07-01 13:44:42 +03:00
if ( debug - > count ) {
struct drm_printer p = drm_debug_printer ( " i915 " ) ;
2019-05-09 20:34:37 +03:00
2019-07-01 13:44:42 +03:00
__print_intel_runtime_pm_wakeref ( & p , debug ) ;
}
2019-05-09 20:34:37 +03:00
kfree ( debug - > owners ) ;
}
2019-01-14 14:21:09 +00:00
static noinline void
2019-06-13 16:21:52 -07:00
__intel_wakeref_dec_and_check_tracking ( struct intel_runtime_pm * rpm )
2019-01-14 14:21:09 +00:00
{
struct intel_runtime_pm_debug dbg = { } ;
unsigned long flags ;
2019-05-09 20:34:37 +03:00
if ( ! atomic_dec_and_lock_irqsave ( & rpm - > wakeref_count ,
& rpm - > debug . lock ,
flags ) )
return ;
2019-01-14 14:21:09 +00:00
2019-05-09 20:34:37 +03:00
__untrack_all_wakerefs ( & rpm - > debug , & dbg ) ;
spin_unlock_irqrestore ( & rpm - > debug . lock , flags ) ;
2019-01-14 14:21:09 +00:00
2019-05-09 20:34:37 +03:00
dump_and_free_wakeref_tracking ( & dbg ) ;
}
2019-01-14 14:21:09 +00:00
2019-05-09 20:34:37 +03:00
static noinline void
2019-06-13 16:21:52 -07:00
untrack_all_intel_runtime_pm_wakerefs ( struct intel_runtime_pm * rpm )
2019-05-09 20:34:37 +03:00
{
struct intel_runtime_pm_debug dbg = { } ;
unsigned long flags ;
2019-01-14 14:21:09 +00:00
2019-05-09 20:34:37 +03:00
spin_lock_irqsave ( & rpm - > debug . lock , flags ) ;
__untrack_all_wakerefs ( & rpm - > debug , & dbg ) ;
spin_unlock_irqrestore ( & rpm - > debug . lock , flags ) ;
dump_and_free_wakeref_tracking ( & dbg ) ;
2019-01-14 14:21:09 +00:00
}
2019-06-13 16:21:53 -07:00
void print_intel_runtime_pm_wakeref ( struct intel_runtime_pm * rpm ,
2019-01-14 14:21:09 +00:00
struct drm_printer * p )
{
struct intel_runtime_pm_debug dbg = { } ;
do {
unsigned long alloc = dbg . count ;
depot_stack_handle_t * s ;
spin_lock_irq ( & rpm - > debug . lock ) ;
dbg . count = rpm - > debug . count ;
if ( dbg . count < = alloc ) {
memcpy ( dbg . owners ,
rpm - > debug . owners ,
dbg . count * sizeof ( * s ) ) ;
}
dbg . last_acquire = rpm - > debug . last_acquire ;
dbg . last_release = rpm - > debug . last_release ;
spin_unlock_irq ( & rpm - > debug . lock ) ;
if ( dbg . count < = alloc )
break ;
2019-04-09 18:41:08 +01:00
s = krealloc ( dbg . owners ,
dbg . count * sizeof ( * s ) ,
GFP_NOWAIT | __GFP_NOWARN ) ;
2019-01-14 14:21:09 +00:00
if ( ! s )
goto out ;
dbg . owners = s ;
} while ( 1 ) ;
__print_intel_runtime_pm_wakeref ( p , & dbg ) ;
out :
kfree ( dbg . owners ) ;
}
# else
2019-06-13 16:21:52 -07:00
static void init_intel_runtime_pm_wakeref ( struct intel_runtime_pm * rpm )
2019-01-14 14:21:09 +00:00
{
}
2019-01-14 14:21:10 +00:00
static depot_stack_handle_t
2019-06-13 16:21:52 -07:00
track_intel_runtime_pm_wakeref ( struct intel_runtime_pm * rpm )
2019-01-14 14:21:09 +00:00
{
2019-01-14 14:21:10 +00:00
return - 1 ;
2019-01-14 14:21:09 +00:00
}
2019-06-13 16:21:52 -07:00
static void untrack_intel_runtime_pm_wakeref ( struct intel_runtime_pm * rpm ,
2019-05-09 20:34:36 +03:00
intel_wakeref_t wref )
{
}
static void
2019-06-13 16:21:52 -07:00
__intel_wakeref_dec_and_check_tracking ( struct intel_runtime_pm * rpm )
2019-01-14 14:21:09 +00:00
{
2019-06-13 16:21:49 -07:00
atomic_dec ( & rpm - > wakeref_count ) ;
2019-01-14 14:21:09 +00:00
}
2019-05-09 20:34:37 +03:00
static void
2019-06-13 16:21:52 -07:00
untrack_all_intel_runtime_pm_wakerefs ( struct intel_runtime_pm * rpm )
2019-05-09 20:34:37 +03:00
{
}
2019-01-14 14:21:09 +00:00
# endif
2019-05-09 20:34:36 +03:00
static void
2019-06-13 16:21:52 -07:00
intel_runtime_pm_acquire ( struct intel_runtime_pm * rpm , bool wakelock )
2019-05-09 20:34:36 +03:00
{
if ( wakelock ) {
atomic_add ( 1 + INTEL_RPM_WAKELOCK_BIAS , & rpm - > wakeref_count ) ;
2019-06-13 16:21:50 -07:00
assert_rpm_wakelock_held ( rpm ) ;
2019-05-09 20:34:36 +03:00
} else {
atomic_inc ( & rpm - > wakeref_count ) ;
2019-06-13 16:21:50 -07:00
assert_rpm_raw_wakeref_held ( rpm ) ;
2019-05-09 20:34:36 +03:00
}
}
static void
2019-06-13 16:21:52 -07:00
intel_runtime_pm_release ( struct intel_runtime_pm * rpm , int wakelock )
2019-05-09 20:34:36 +03:00
{
if ( wakelock ) {
2019-06-13 16:21:50 -07:00
assert_rpm_wakelock_held ( rpm ) ;
2019-05-09 20:34:36 +03:00
atomic_sub ( INTEL_RPM_WAKELOCK_BIAS , & rpm - > wakeref_count ) ;
} else {
2019-06-13 16:21:50 -07:00
assert_rpm_raw_wakeref_held ( rpm ) ;
2019-05-09 20:34:36 +03:00
}
2019-06-13 16:21:49 -07:00
__intel_wakeref_dec_and_check_tracking ( rpm ) ;
2019-05-09 20:34:36 +03:00
}
2019-06-13 16:21:52 -07:00
static intel_wakeref_t __intel_runtime_pm_get ( struct intel_runtime_pm * rpm ,
2019-05-31 15:24:08 -07:00
bool wakelock )
2015-07-30 18:20:27 -03:00
{
2020-05-04 23:46:00 +05:30
struct drm_i915_private * i915 = container_of ( rpm ,
struct drm_i915_private ,
runtime_pm ) ;
2019-05-31 15:24:08 -07:00
int ret ;
2015-07-30 18:20:27 -03:00
2019-06-13 16:21:49 -07:00
ret = pm_runtime_get_sync ( rpm - > kdev ) ;
2020-05-04 23:46:00 +05:30
drm_WARN_ONCE ( & i915 - > drm , ret < 0 ,
" pm_runtime_get_sync() failed: %d \n " , ret ) ;
2016-06-13 16:44:33 +03:00
2019-06-13 16:21:49 -07:00
intel_runtime_pm_acquire ( rpm , wakelock ) ;
2016-06-13 16:44:33 +03:00
2019-06-13 16:21:49 -07:00
return track_intel_runtime_pm_wakeref ( rpm ) ;
2016-06-13 16:44:33 +03:00
}
2014-09-30 10:56:42 +02:00
/**
2019-05-31 15:24:08 -07:00
* intel_runtime_pm_get_raw - grab a raw runtime pm reference
2019-06-13 16:21:54 -07:00
* @ rpm : the intel_runtime_pm structure
2014-09-30 10:56:42 +02:00
*
* This is the unlocked version of intel_display_power_is_enabled ( ) and should
* only be used from error capture and recovery code where deadlocks are
* possible .
2019-05-31 15:24:08 -07:00
* This function grabs a device - level runtime pm reference ( mostly used for
* asynchronous PM management from display code ) and ensures that it is powered
* up . Raw references are not considered during wakelock assert checks .
2014-09-30 10:56:42 +02:00
*
2019-05-31 15:24:08 -07:00
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put_raw ( ) to release the reference again .
*
* Returns : the wakeref cookie to pass to intel_runtime_pm_put_raw ( ) , evaluates
* as True if the wakeref was acquired , or False otherwise .
2014-09-30 10:56:42 +02:00
*/
2019-06-13 16:21:54 -07:00
intel_wakeref_t intel_runtime_pm_get_raw ( struct intel_runtime_pm * rpm )
2019-05-31 15:24:08 -07:00
{
2019-06-13 16:21:54 -07:00
return __intel_runtime_pm_get ( rpm , false ) ;
2014-09-30 10:56:38 +02:00
}
2014-09-30 10:56:42 +02:00
/**
2019-05-31 15:24:08 -07:00
* intel_runtime_pm_get - grab a runtime pm reference
2019-06-13 16:21:54 -07:00
* @ rpm : the intel_runtime_pm structure
2014-09-30 10:56:42 +02:00
*
2019-05-31 15:24:08 -07:00
* This function grabs a device - level runtime pm reference ( mostly used for GEM
* code to ensure the GTT or GT is on ) and ensures that it is powered up .
2014-09-30 10:56:42 +02:00
*
2019-05-31 15:24:08 -07:00
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put ( ) to release the reference again .
2014-09-30 10:56:42 +02:00
*
2019-05-31 15:24:08 -07:00
* Returns : the wakeref cookie to pass to intel_runtime_pm_put ( )
2014-09-30 10:56:42 +02:00
*/
2019-06-13 16:21:54 -07:00
intel_wakeref_t intel_runtime_pm_get ( struct intel_runtime_pm * rpm )
2014-09-30 10:56:38 +02:00
{
2019-06-13 16:21:54 -07:00
return __intel_runtime_pm_get ( rpm , true ) ;
2014-09-30 10:56:38 +02:00
}
2019-05-31 15:24:08 -07:00
/**
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2019-06-13 16:21:54 -07:00
* @ rpm : the intel_runtime_pm structure
2019-05-31 15:24:08 -07:00
*
* This function grabs a device - level runtime pm reference if the device is
* already in use and ensures that it is powered up . It is illegal to try
* and access the HW should intel_runtime_pm_get_if_in_use ( ) report failure .
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put ( ) to release the reference again .
*
* Returns : the wakeref cookie to pass to intel_runtime_pm_put ( ) , evaluates
* as True if the wakeref was acquired , or False otherwise .
2014-09-30 10:56:38 +02:00
*/
2019-06-13 16:21:54 -07:00
intel_wakeref_t intel_runtime_pm_get_if_in_use ( struct intel_runtime_pm * rpm )
2014-09-30 10:56:38 +02:00
{
2019-06-13 16:21:49 -07:00
if ( IS_ENABLED ( CONFIG_PM ) ) {
2017-07-11 23:42:35 +03:00
/*
2019-05-31 15:24:08 -07:00
* In cases runtime PM is disabled by the RPM core and we get
* an - EINVAL return value we are not supposed to call this
* function , since the power state is undefined . This applies
* atm to the late / early system suspend / resume handlers .
2017-07-11 23:42:35 +03:00
*/
2019-06-13 16:21:49 -07:00
if ( pm_runtime_get_if_in_use ( rpm - > kdev ) < = 0 )
2019-05-31 15:24:08 -07:00
return 0 ;
2018-10-12 14:57:58 -07:00
}
2014-11-24 13:37:44 +05:30
2019-06-13 16:21:49 -07:00
intel_runtime_pm_acquire ( rpm , true ) ;
2016-04-20 20:27:56 +03:00
2019-06-13 16:21:49 -07:00
return track_intel_runtime_pm_wakeref ( rpm ) ;
2016-04-20 20:27:56 +03:00
}
2018-04-17 14:31:47 +03:00
/**
2019-05-31 15:24:08 -07:00
* intel_runtime_pm_get_noresume - grab a runtime pm reference
2019-06-13 16:21:54 -07:00
* @ rpm : the intel_runtime_pm structure
2019-05-31 15:24:08 -07:00
*
* This function grabs a device - level runtime pm reference ( mostly used for GEM
* code to ensure the GTT or GT is on ) .
*
* It will _not_ power up the device but instead only check that it ' s powered
* on . Therefore it is only valid to call this functions from contexts where
* the device is known to be powered up and where trying to power it up would
* result in hilarity and deadlocks . That pretty much means only the system
* suspend / resume code where this is used to grab runtime pm references for
* delayed setup down in work items .
2018-04-17 14:31:47 +03:00
*
2019-05-31 15:24:08 -07:00
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put ( ) to release the reference again .
2018-04-17 14:31:47 +03:00
*
2019-05-31 15:24:08 -07:00
* Returns : the wakeref cookie to pass to intel_runtime_pm_put ( )
2018-04-17 14:31:47 +03:00
*/
2019-06-13 16:21:54 -07:00
intel_wakeref_t intel_runtime_pm_get_noresume ( struct intel_runtime_pm * rpm )
drm/i915/skl: Add DC5 Trigger Sequence
Add triggers as per expectations mentioned in gen9_enable_dc5
and gen9_disable_dc5 patch.
Also call POSTING_READ for every write to a register to ensure that
its written immediately.
v1: Remove POSTING_READ calls as they've already been added in previous patches.
v2: Rebase to move all runtime pm specific changes to intel_runtime_pm.c file.
Modified as per review comments from Imre:
1] Change variable name 'dc5_allowed' to 'dc5_enabled' to correspond to relevant
functions.
2] Move the check dc5_enabled in skl_set_power_well() to disable DC5 into
gen9_disable_DC5 which is a more appropriate place.
3] Convert checks for 'pm.dc5_enabled' and 'pm.suspended' in skl_set_power_well()
to warnings. However, removing them for now as they'll be included in a future patch
asserting DC-state entry/exit criteria.
4] Enable DC5, only when CSR firmware is verified to be loaded. Create new structure
to track 'enabled' and 'deferred' status of DC5.
5] Ensure runtime PM reference is obtained, if CSR is not loaded, to avoid entering
runtime-suspend and release it when it's loaded.
6] Protect necessary CSR-related code with locks.
7] Move CSR-loading call to runtime PM initialization, as power domains needed to be
accessed during deferred DC5-enabling, are not initialized earlier.
v3: Rebase to latest.
Modified as per review comments from Imre:
1] Use blocking wait for CSR-loading to finish to enable DC5 for simplicity, instead of
deferring enabling DC5 until CSR is loaded.
2] Obtain runtime PM reference during CSR-loading initialization itself as deferred DC5-
enabling is removed and release it at the end of CSR-loading functionality.
3] Revert calling CSR-loading functionality to the beginning of i915 driver-load
functionality to avoid any delay in loading.
4] Define another variable to track whether CSR-loading failed and use it to avoid enabling
DC5 if it's true.
5] Define CSR-load-status accessor functions for use later.
v4:
1] Disable DC5 before enabling PG2 instead of after it.
2] DC5 was being mistaken enabled even when CSR-loading timed-out. Fix that.
3] Enable DC5-related functionality using a macro.
4] Remove dc5_enabled tracking variable and its use as it's not needed now.
v5:
1] Mark CSR failed to load where necessary in finish_csr_load function.
2] Use mutex-protected accessor function to check if CSR loaded instead of directly
accessing the variable.
3] Prefix csr_load_status_get/set function names with intel_.
v6: rebase to latest.
v7: Rebase on top of nightly (Damien)
v8: Squashed the patch from Imre - added csr helper pointers to simplify the code. (Imre)
v9: After adding dmc ver 1.0 support rebased on top of nightly. (Animesh)
v10: Added a enum for different csr states, suggested by Imre. (Animesh)
v11: Based on review comments from Imre, Damien and Daniel following changes done
- enum name chnaged to csr_state (singular form).
- FW_UNINITIALIZED used as zeroth element in enum csr_state.
- Prototype changed for helper function(set/get csr status), using enum csr_state instead of bool.
v12: Based on review comment from Imre, introduced bool fw_loaded local to finish_csr_load() which helps
calling once to set the csr status. The same flag used to fail RPM if find any issue during
firmware loading.
Issue: VIZ-2819
Signed-off-by: A.Sunil Kamath <sunil.kamath@intel.com>
Signed-off-by: Suketu Shah <suketu.j.shah@intel.com>
Signed-off-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Animesh Manna <animesh.manna@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-04-17 19:46:16 +05:30
{
2019-06-13 16:21:50 -07:00
assert_rpm_wakelock_held ( rpm ) ;
2019-06-13 16:21:49 -07:00
pm_runtime_get_noresume ( rpm - > kdev ) ;
2015-04-16 14:22:10 +05:30
2019-06-13 16:21:49 -07:00
intel_runtime_pm_acquire ( rpm , true ) ;
2015-04-16 14:22:10 +05:30
2019-06-13 16:21:49 -07:00
return track_intel_runtime_pm_wakeref ( rpm ) ;
2015-04-16 14:22:10 +05:30
}
2019-06-13 16:21:52 -07:00
static void __intel_runtime_pm_put ( struct intel_runtime_pm * rpm ,
2019-05-31 15:24:08 -07:00
intel_wakeref_t wref ,
bool wakelock )
2015-04-16 14:22:10 +05:30
{
2019-06-13 16:21:49 -07:00
struct device * kdev = rpm - > kdev ;
2017-12-04 15:22:10 -08:00
2019-06-13 16:21:49 -07:00
untrack_intel_runtime_pm_wakeref ( rpm , wref ) ;
drm/i915/skl: Add DC5 Trigger Sequence
Add triggers as per expectations mentioned in gen9_enable_dc5
and gen9_disable_dc5 patch.
Also call POSTING_READ for every write to a register to ensure that
its written immediately.
v1: Remove POSTING_READ calls as they've already been added in previous patches.
v2: Rebase to move all runtime pm specific changes to intel_runtime_pm.c file.
Modified as per review comments from Imre:
1] Change variable name 'dc5_allowed' to 'dc5_enabled' to correspond to relevant
functions.
2] Move the check dc5_enabled in skl_set_power_well() to disable DC5 into
gen9_disable_DC5 which is a more appropriate place.
3] Convert checks for 'pm.dc5_enabled' and 'pm.suspended' in skl_set_power_well()
to warnings. However, removing them for now as they'll be included in a future patch
asserting DC-state entry/exit criteria.
4] Enable DC5, only when CSR firmware is verified to be loaded. Create new structure
to track 'enabled' and 'deferred' status of DC5.
5] Ensure runtime PM reference is obtained, if CSR is not loaded, to avoid entering
runtime-suspend and release it when it's loaded.
6] Protect necessary CSR-related code with locks.
7] Move CSR-loading call to runtime PM initialization, as power domains needed to be
accessed during deferred DC5-enabling, are not initialized earlier.
v3: Rebase to latest.
Modified as per review comments from Imre:
1] Use blocking wait for CSR-loading to finish to enable DC5 for simplicity, instead of
deferring enabling DC5 until CSR is loaded.
2] Obtain runtime PM reference during CSR-loading initialization itself as deferred DC5-
enabling is removed and release it at the end of CSR-loading functionality.
3] Revert calling CSR-loading functionality to the beginning of i915 driver-load
functionality to avoid any delay in loading.
4] Define another variable to track whether CSR-loading failed and use it to avoid enabling
DC5 if it's true.
5] Define CSR-load-status accessor functions for use later.
v4:
1] Disable DC5 before enabling PG2 instead of after it.
2] DC5 was being mistaken enabled even when CSR-loading timed-out. Fix that.
3] Enable DC5-related functionality using a macro.
4] Remove dc5_enabled tracking variable and its use as it's not needed now.
v5:
1] Mark CSR failed to load where necessary in finish_csr_load function.
2] Use mutex-protected accessor function to check if CSR loaded instead of directly
accessing the variable.
3] Prefix csr_load_status_get/set function names with intel_.
v6: rebase to latest.
v7: Rebase on top of nightly (Damien)
v8: Squashed the patch from Imre - added csr helper pointers to simplify the code. (Imre)
v9: After adding dmc ver 1.0 support rebased on top of nightly. (Animesh)
v10: Added a enum for different csr states, suggested by Imre. (Animesh)
v11: Based on review comments from Imre, Damien and Daniel following changes done
- enum name chnaged to csr_state (singular form).
- FW_UNINITIALIZED used as zeroth element in enum csr_state.
- Prototype changed for helper function(set/get csr status), using enum csr_state instead of bool.
v12: Based on review comment from Imre, introduced bool fw_loaded local to finish_csr_load() which helps
calling once to set the csr status. The same flag used to fail RPM if find any issue during
firmware loading.
Issue: VIZ-2819
Signed-off-by: A.Sunil Kamath <sunil.kamath@intel.com>
Signed-off-by: Suketu Shah <suketu.j.shah@intel.com>
Signed-off-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Animesh Manna <animesh.manna@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2015-04-17 19:46:16 +05:30
2019-06-13 16:21:49 -07:00
intel_runtime_pm_release ( rpm , wakelock ) ;
2015-04-16 14:22:13 +05:30
2019-05-31 15:24:08 -07:00
pm_runtime_mark_last_busy ( kdev ) ;
pm_runtime_put_autosuspend ( kdev ) ;
2015-04-16 14:22:13 +05:30
}
2019-05-31 15:24:08 -07:00
/**
* intel_runtime_pm_put_raw - release a raw runtime pm reference
2019-06-13 16:21:54 -07:00
* @ rpm : the intel_runtime_pm structure
2019-05-31 15:24:08 -07:00
* @ wref : wakeref acquired for the reference that is being released
*
* This function drops the device - level runtime pm reference obtained by
* intel_runtime_pm_get_raw ( ) and might power down the corresponding
* hardware block right away if this is the last reference .
*/
void
2019-06-13 16:21:54 -07:00
intel_runtime_pm_put_raw ( struct intel_runtime_pm * rpm , intel_wakeref_t wref )
2015-04-16 14:22:13 +05:30
{
2019-06-13 16:21:54 -07:00
__intel_runtime_pm_put ( rpm , wref , false ) ;
2015-04-16 14:22:11 +05:30
}
2014-09-30 10:56:42 +02:00
/**
2019-05-09 20:34:36 +03:00
* intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
2019-06-13 16:21:54 -07:00
* @ rpm : the intel_runtime_pm structure
2014-09-30 10:56:42 +02:00
*
* This function drops the device - level runtime pm reference obtained by
* intel_runtime_pm_get ( ) and might power down the corresponding
* hardware block right away if this is the last reference .
2019-05-09 20:34:36 +03:00
*
* This function exists only for historical reasons and should be avoided in
* new code , as the correctness of its use cannot be checked . Always use
* intel_runtime_pm_put ( ) instead .
2014-09-30 10:56:42 +02:00
*/
2019-06-13 16:21:54 -07:00
void intel_runtime_pm_put_unchecked ( struct intel_runtime_pm * rpm )
2014-09-30 10:56:38 +02:00
{
2019-06-13 16:21:54 -07:00
__intel_runtime_pm_put ( rpm , - 1 , true ) ;
2014-09-30 10:56:38 +02:00
}
2019-01-14 14:21:10 +00:00
# if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2019-05-09 20:34:36 +03:00
/**
* intel_runtime_pm_put - release a runtime pm reference
2019-06-13 16:21:54 -07:00
* @ rpm : the intel_runtime_pm structure
2019-05-09 20:34:36 +03:00
* @ wref : wakeref acquired for the reference that is being released
*
* This function drops the device - level runtime pm reference obtained by
* intel_runtime_pm_get ( ) and might power down the corresponding
* hardware block right away if this is the last reference .
*/
2019-06-13 16:21:54 -07:00
void intel_runtime_pm_put ( struct intel_runtime_pm * rpm , intel_wakeref_t wref )
2019-01-14 14:21:10 +00:00
{
2019-06-13 16:21:54 -07:00
__intel_runtime_pm_put ( rpm , wref , true ) ;
2019-01-14 14:21:10 +00:00
}
# endif
2014-09-30 10:56:42 +02:00
/**
* intel_runtime_pm_enable - enable runtime pm
2019-06-13 16:21:53 -07:00
* @ rpm : the intel_runtime_pm structure
2014-09-30 10:56:42 +02:00
*
* This function enables runtime pm at the end of the driver load sequence .
*
* Note that this function does currently not enable runtime pm for the
2018-08-16 15:37:57 +03:00
* subordinate display power domains . That is done by
* intel_power_domains_enable ( ) .
2014-09-30 10:56:42 +02:00
*/
2019-06-13 16:21:53 -07:00
void intel_runtime_pm_enable ( struct intel_runtime_pm * rpm )
2014-09-30 10:56:38 +02:00
{
2020-05-04 23:46:00 +05:30
struct drm_i915_private * i915 = container_of ( rpm ,
struct drm_i915_private ,
runtime_pm ) ;
2019-06-13 16:21:49 -07:00
struct device * kdev = rpm - > kdev ;
2014-09-30 10:56:38 +02:00
2018-08-16 15:37:56 +03:00
/*
* Disable the system suspend direct complete optimization , which can
* leave the device suspended skipping the driver ' s suspend handlers
* if the device was already runtime suspended . This is needed due to
* the difference in our runtime and system suspend sequence and
* becaue the HDA driver may require us to enable the audio power
* domain during system suspend .
*/
2020-04-18 18:53:01 +02:00
dev_pm_set_driver_flags ( kdev , DPM_FLAG_NO_DIRECT_COMPLETE ) ;
2018-08-16 15:37:56 +03:00
2016-08-22 13:32:42 +03:00
pm_runtime_set_autosuspend_delay ( kdev , 10000 ) ; /* 10s */
pm_runtime_mark_last_busy ( kdev ) ;
2015-12-17 19:04:33 +02:00
2015-12-17 13:44:56 +02:00
/*
* Take a permanent reference to disable the RPM functionality and drop
* it only when unloading the driver . Use the low level get / put helpers ,
* so the driver ' s own RPM reference tracking asserts also work on
* platforms without RPM support .
*/
2019-06-13 16:21:49 -07:00
if ( ! rpm - > available ) {
2017-03-28 12:38:55 +03:00
int ret ;
2016-08-22 13:32:42 +03:00
pm_runtime_dont_use_autosuspend ( kdev ) ;
2017-03-28 12:38:55 +03:00
ret = pm_runtime_get_sync ( kdev ) ;
2020-05-04 23:46:00 +05:30
drm_WARN ( & i915 - > drm , ret < 0 ,
" pm_runtime_get_sync() failed: %d \n " , ret ) ;
2015-12-17 19:04:33 +02:00
} else {
2016-08-22 13:32:42 +03:00
pm_runtime_use_autosuspend ( kdev ) ;
2015-12-17 19:04:33 +02:00
}
2014-09-30 10:56:38 +02:00
2015-12-15 20:10:29 +02:00
/*
* The core calls the driver load handler with an RPM reference held .
* We drop that here and will reacquire it during unloading in
* intel_power_domains_fini ( ) .
*/
2016-08-22 13:32:42 +03:00
pm_runtime_put_autosuspend ( kdev ) ;
2014-09-30 10:56:38 +02:00
}
2018-08-16 15:37:56 +03:00
2019-06-13 16:21:53 -07:00
void intel_runtime_pm_disable ( struct intel_runtime_pm * rpm )
2018-08-16 15:37:56 +03:00
{
2020-05-04 23:46:00 +05:30
struct drm_i915_private * i915 = container_of ( rpm ,
struct drm_i915_private ,
runtime_pm ) ;
2019-06-13 16:21:49 -07:00
struct device * kdev = rpm - > kdev ;
2018-08-16 15:37:56 +03:00
/* Transfer rpm ownership back to core */
2020-05-04 23:46:00 +05:30
drm_WARN ( & i915 - > drm , pm_runtime_get_sync ( kdev ) < 0 ,
" Failed to pass rpm ownership back to core \n " ) ;
2018-08-16 15:37:56 +03:00
pm_runtime_dont_use_autosuspend ( kdev ) ;
2019-06-13 16:21:49 -07:00
if ( ! rpm - > available )
2018-08-16 15:37:56 +03:00
pm_runtime_put ( kdev ) ;
}
2019-01-14 14:21:09 +00:00
2019-07-12 13:24:28 +02:00
void intel_runtime_pm_driver_release ( struct intel_runtime_pm * rpm )
2019-01-14 14:21:09 +00:00
{
2020-05-04 23:46:00 +05:30
struct drm_i915_private * i915 = container_of ( rpm ,
struct drm_i915_private ,
runtime_pm ) ;
2019-05-09 20:34:37 +03:00
int count = atomic_read ( & rpm - > wakeref_count ) ;
2019-01-14 14:21:09 +00:00
2020-05-04 23:46:00 +05:30
drm_WARN ( & i915 - > drm , count ,
" i915 raw-wakerefs=%d wakelocks=%d on cleanup \n " ,
intel_rpm_raw_wakeref_count ( count ) ,
intel_rpm_wakelock_count ( count ) ) ;
2019-01-14 14:21:09 +00:00
2019-06-13 16:21:49 -07:00
untrack_all_intel_runtime_pm_wakerefs ( rpm ) ;
2019-01-14 14:21:09 +00:00
}
2019-06-13 16:21:53 -07:00
void intel_runtime_pm_init_early ( struct intel_runtime_pm * rpm )
2019-01-14 14:21:09 +00:00
{
2019-06-13 16:21:53 -07:00
struct drm_i915_private * i915 =
container_of ( rpm , struct drm_i915_private , runtime_pm ) ;
2021-01-28 14:31:23 +01:00
struct pci_dev * pdev = to_pci_dev ( i915 - > drm . dev ) ;
2019-06-13 16:21:49 -07:00
struct device * kdev = & pdev - > dev ;
rpm - > kdev = kdev ;
rpm - > available = HAS_RUNTIME_PM ( i915 ) ;
init_intel_runtime_pm_wakeref ( rpm ) ;
2019-01-14 14:21:09 +00:00
}