2019-04-24 21:07:13 +01:00
/*
* SPDX - License - Identifier : MIT
*
* Copyright © 2019 Intel Corporation
*/
2019-06-13 16:21:56 -07:00
# include "intel_runtime_pm.h"
# include "i915_gem.h"
2019-04-24 21:07:13 +01:00
2019-06-13 16:21:56 -07:00
static void rpm_get ( struct intel_runtime_pm * rpm , struct intel_wakeref * wf )
2019-05-03 12:52:14 +01:00
{
2019-06-13 16:21:56 -07:00
wf - > wakeref = intel_runtime_pm_get ( rpm ) ;
2019-05-03 12:52:14 +01:00
}
2019-06-13 16:21:56 -07:00
static void rpm_put ( struct intel_runtime_pm * rpm , struct intel_wakeref * wf )
2019-05-03 12:52:14 +01:00
{
intel_wakeref_t wakeref = fetch_and_zero ( & wf - > wakeref ) ;
2019-06-13 16:21:56 -07:00
intel_runtime_pm_put ( rpm , wakeref ) ;
2019-05-03 12:52:14 +01:00
GEM_BUG_ON ( ! wakeref ) ;
}
2019-06-13 16:21:56 -07:00
int __intel_wakeref_get_first ( struct intel_runtime_pm * rpm ,
2019-04-24 21:07:13 +01:00
struct intel_wakeref * wf ,
int ( * fn ) ( struct intel_wakeref * wf ) )
{
/*
* Treat get / put as different subclasses , as we may need to run
* the put callback from under the shrinker and do not want to
* cross - contanimate that callback with any extra work performed
* upon acquiring the wakeref .
*/
mutex_lock_nested ( & wf - > mutex , SINGLE_DEPTH_NESTING ) ;
if ( ! atomic_read ( & wf - > count ) ) {
int err ;
2019-06-13 16:21:56 -07:00
rpm_get ( rpm , wf ) ;
2019-04-24 21:07:13 +01:00
err = fn ( wf ) ;
if ( unlikely ( err ) ) {
2019-06-13 16:21:56 -07:00
rpm_put ( rpm , wf ) ;
2019-04-24 21:07:13 +01:00
mutex_unlock ( & wf - > mutex ) ;
return err ;
}
smp_mb__before_atomic ( ) ; /* release wf->count */
}
atomic_inc ( & wf - > count ) ;
mutex_unlock ( & wf - > mutex ) ;
return 0 ;
}
2019-06-13 16:21:56 -07:00
int __intel_wakeref_put_last ( struct intel_runtime_pm * rpm ,
2019-04-24 21:07:13 +01:00
struct intel_wakeref * wf ,
int ( * fn ) ( struct intel_wakeref * wf ) )
{
int err ;
err = fn ( wf ) ;
if ( likely ( ! err ) )
2019-06-13 16:21:56 -07:00
rpm_put ( rpm , wf ) ;
2019-04-24 21:07:13 +01:00
else
atomic_inc ( & wf - > count ) ;
mutex_unlock ( & wf - > mutex ) ;
return err ;
}
void __intel_wakeref_init ( struct intel_wakeref * wf , struct lock_class_key * key )
{
__mutex_init ( & wf - > mutex , " wakeref " , key ) ;
atomic_set ( & wf - > count , 0 ) ;
2019-05-03 12:52:14 +01:00
wf - > wakeref = 0 ;
2019-04-24 21:07:13 +01:00
}
2019-05-27 12:51:14 +01:00
static void wakeref_auto_timeout ( struct timer_list * t )
{
struct intel_wakeref_auto * wf = from_timer ( wf , t , timer ) ;
intel_wakeref_t wakeref ;
unsigned long flags ;
if ( ! refcount_dec_and_lock_irqsave ( & wf - > count , & wf - > lock , & flags ) )
return ;
wakeref = fetch_and_zero ( & wf - > wakeref ) ;
spin_unlock_irqrestore ( & wf - > lock , flags ) ;
2019-06-13 16:21:56 -07:00
intel_runtime_pm_put ( wf - > rpm , wakeref ) ;
2019-05-27 12:51:14 +01:00
}
void intel_wakeref_auto_init ( struct intel_wakeref_auto * wf ,
2019-06-13 16:21:56 -07:00
struct intel_runtime_pm * rpm )
2019-05-27 12:51:14 +01:00
{
spin_lock_init ( & wf - > lock ) ;
timer_setup ( & wf - > timer , wakeref_auto_timeout , 0 ) ;
refcount_set ( & wf - > count , 0 ) ;
wf - > wakeref = 0 ;
2019-06-13 16:21:56 -07:00
wf - > rpm = rpm ;
2019-05-27 12:51:14 +01:00
}
void intel_wakeref_auto ( struct intel_wakeref_auto * wf , unsigned long timeout )
{
unsigned long flags ;
if ( ! timeout ) {
if ( del_timer_sync ( & wf - > timer ) )
wakeref_auto_timeout ( & wf - > timer ) ;
return ;
}
/* Our mission is that we only extend an already active wakeref */
2019-06-13 16:21:56 -07:00
assert_rpm_wakelock_held ( wf - > rpm ) ;
2019-05-27 12:51:14 +01:00
if ( ! refcount_inc_not_zero ( & wf - > count ) ) {
spin_lock_irqsave ( & wf - > lock , flags ) ;
2019-05-28 16:40:53 +01:00
if ( ! refcount_inc_not_zero ( & wf - > count ) ) {
2019-05-27 12:51:14 +01:00
GEM_BUG_ON ( wf - > wakeref ) ;
2019-06-13 16:21:56 -07:00
wf - > wakeref = intel_runtime_pm_get_if_in_use ( wf - > rpm ) ;
2019-05-28 16:40:53 +01:00
refcount_set ( & wf - > count , 1 ) ;
2019-05-27 12:51:14 +01:00
}
spin_unlock_irqrestore ( & wf - > lock , flags ) ;
}
/*
* If we extend a pending timer , we will only get a single timer
* callback and so need to cancel the local inc by running the
* elided callback to keep the wf - > count balanced .
*/
if ( mod_timer ( & wf - > timer , jiffies + timeout ) )
wakeref_auto_timeout ( & wf - > timer ) ;
}
void intel_wakeref_auto_fini ( struct intel_wakeref_auto * wf )
{
intel_wakeref_auto ( wf , 0 ) ;
GEM_BUG_ON ( wf - > wakeref ) ;
}