2019-04-24 21:07:13 +01:00
/*
* SPDX - License - Identifier : MIT
*
* Copyright © 2019 Intel Corporation
*/
2019-08-08 21:27:58 +01:00
# include <linux/wait_bit.h>
2019-06-13 16:21:56 -07:00
# include "intel_runtime_pm.h"
2019-06-21 19:38:01 +01:00
# include "intel_wakeref.h"
2019-04-24 21:07:13 +01:00
2019-08-08 21:27:58 +01:00
static void rpm_get ( struct intel_wakeref * wf )
2019-05-03 12:52:14 +01:00
{
2019-08-08 21:27:58 +01:00
wf - > wakeref = intel_runtime_pm_get ( wf - > rpm ) ;
2019-05-03 12:52:14 +01:00
}
2019-08-08 21:27:58 +01:00
static void rpm_put ( struct intel_wakeref * wf )
2019-05-03 12:52:14 +01:00
{
intel_wakeref_t wakeref = fetch_and_zero ( & wf - > wakeref ) ;
2019-08-08 21:27:58 +01:00
intel_runtime_pm_put ( wf - > rpm , wakeref ) ;
2019-06-21 19:38:01 +01:00
INTEL_WAKEREF_BUG_ON ( ! wakeref ) ;
2019-05-03 12:52:14 +01:00
}
2019-08-08 21:27:58 +01:00
int __intel_wakeref_get_first ( struct intel_wakeref * wf )
2019-04-24 21:07:13 +01:00
{
/*
* Treat get / put as different subclasses , as we may need to run
* the put callback from under the shrinker and do not want to
* cross - contanimate that callback with any extra work performed
* upon acquiring the wakeref .
*/
mutex_lock_nested ( & wf - > mutex , SINGLE_DEPTH_NESTING ) ;
if ( ! atomic_read ( & wf - > count ) ) {
int err ;
2019-08-08 21:27:58 +01:00
rpm_get ( wf ) ;
2019-04-24 21:07:13 +01:00
2019-08-08 21:27:58 +01:00
err = wf - > ops - > get ( wf ) ;
2019-04-24 21:07:13 +01:00
if ( unlikely ( err ) ) {
2019-08-08 21:27:58 +01:00
rpm_put ( wf ) ;
2019-04-24 21:07:13 +01:00
mutex_unlock ( & wf - > mutex ) ;
return err ;
}
smp_mb__before_atomic ( ) ; /* release wf->count */
}
atomic_inc ( & wf - > count ) ;
mutex_unlock ( & wf - > mutex ) ;
2019-06-21 19:38:01 +01:00
INTEL_WAKEREF_BUG_ON ( atomic_read ( & wf - > count ) < = 0 ) ;
2019-04-24 21:07:13 +01:00
return 0 ;
}
2019-08-08 21:27:58 +01:00
static void ____intel_wakeref_put_last ( struct intel_wakeref * wf )
2019-04-24 21:07:13 +01:00
{
2019-11-20 12:54:33 +00:00
INTEL_WAKEREF_BUG_ON ( atomic_read ( & wf - > count ) < = 0 ) ;
if ( unlikely ( ! atomic_dec_and_test ( & wf - > count ) ) )
2019-08-08 21:27:58 +01:00
goto unlock ;
2019-08-13 20:07:05 +01:00
/* ops->put() must reschedule its own release on error/deferral */
2019-08-08 21:27:58 +01:00
if ( likely ( ! wf - > ops - > put ( wf ) ) ) {
rpm_put ( wf ) ;
wake_up_var ( & wf - > wakeref ) ;
}
2019-04-24 21:07:13 +01:00
2019-08-08 21:27:58 +01:00
unlock :
2019-04-24 21:07:13 +01:00
mutex_unlock ( & wf - > mutex ) ;
2019-08-08 21:27:58 +01:00
}
2019-11-20 12:54:33 +00:00
void __intel_wakeref_put_last ( struct intel_wakeref * wf , unsigned long flags )
2019-08-08 21:27:58 +01:00
{
2020-03-23 10:32:21 +00:00
INTEL_WAKEREF_BUG_ON ( delayed_work_pending ( & wf - > work ) ) ;
2019-04-24 21:07:13 +01:00
2019-08-08 21:27:58 +01:00
/* Assume we are not in process context and so cannot sleep. */
2019-11-20 12:54:33 +00:00
if ( flags & INTEL_WAKEREF_PUT_ASYNC | | ! mutex_trylock ( & wf - > mutex ) ) {
2020-03-23 10:32:21 +00:00
mod_delayed_work ( system_wq , & wf - > work ,
FIELD_GET ( INTEL_WAKEREF_PUT_DELAY , flags ) ) ;
2019-08-08 21:27:58 +01:00
return ;
}
____intel_wakeref_put_last ( wf ) ;
2019-04-24 21:07:13 +01:00
}
2019-08-08 21:27:58 +01:00
static void __intel_wakeref_put_work ( struct work_struct * wrk )
2019-04-24 21:07:13 +01:00
{
2020-03-23 10:32:21 +00:00
struct intel_wakeref * wf = container_of ( wrk , typeof ( * wf ) , work . work ) ;
2019-08-08 21:27:58 +01:00
if ( atomic_add_unless ( & wf - > count , - 1 , 1 ) )
return ;
mutex_lock ( & wf - > mutex ) ;
____intel_wakeref_put_last ( wf ) ;
}
void __intel_wakeref_init ( struct intel_wakeref * wf ,
struct intel_runtime_pm * rpm ,
const struct intel_wakeref_ops * ops ,
2020-01-02 23:16:04 +00:00
struct intel_wakeref_lockclass * key )
2019-08-08 21:27:58 +01:00
{
wf - > rpm = rpm ;
wf - > ops = ops ;
2020-01-02 23:16:04 +00:00
__mutex_init ( & wf - > mutex , " wakeref.mutex " , & key - > mutex ) ;
2019-04-24 21:07:13 +01:00
atomic_set ( & wf - > count , 0 ) ;
2019-05-03 12:52:14 +01:00
wf - > wakeref = 0 ;
2019-08-08 21:27:58 +01:00
2020-03-23 10:32:21 +00:00
INIT_DELAYED_WORK ( & wf - > work , __intel_wakeref_put_work ) ;
lockdep_init_map ( & wf - > work . work . lockdep_map ,
" wakeref.work " , & key - > work , 0 ) ;
2019-08-08 21:27:58 +01:00
}
int intel_wakeref_wait_for_idle ( struct intel_wakeref * wf )
{
2019-11-18 23:02:46 +00:00
int err ;
might_sleep ( ) ;
err = wait_var_event_killable ( & wf - > wakeref ,
! intel_wakeref_is_active ( wf ) ) ;
if ( err )
return err ;
intel_wakeref_unlock_wait ( wf ) ;
return 0 ;
2019-04-24 21:07:13 +01:00
}
2019-05-27 12:51:14 +01:00
static void wakeref_auto_timeout ( struct timer_list * t )
{
struct intel_wakeref_auto * wf = from_timer ( wf , t , timer ) ;
intel_wakeref_t wakeref ;
unsigned long flags ;
if ( ! refcount_dec_and_lock_irqsave ( & wf - > count , & wf - > lock , & flags ) )
return ;
wakeref = fetch_and_zero ( & wf - > wakeref ) ;
spin_unlock_irqrestore ( & wf - > lock , flags ) ;
2019-06-13 16:21:56 -07:00
intel_runtime_pm_put ( wf - > rpm , wakeref ) ;
2019-05-27 12:51:14 +01:00
}
void intel_wakeref_auto_init ( struct intel_wakeref_auto * wf ,
2019-06-13 16:21:56 -07:00
struct intel_runtime_pm * rpm )
2019-05-27 12:51:14 +01:00
{
spin_lock_init ( & wf - > lock ) ;
timer_setup ( & wf - > timer , wakeref_auto_timeout , 0 ) ;
refcount_set ( & wf - > count , 0 ) ;
wf - > wakeref = 0 ;
2019-06-13 16:21:56 -07:00
wf - > rpm = rpm ;
2019-05-27 12:51:14 +01:00
}
void intel_wakeref_auto ( struct intel_wakeref_auto * wf , unsigned long timeout )
{
unsigned long flags ;
if ( ! timeout ) {
if ( del_timer_sync ( & wf - > timer ) )
wakeref_auto_timeout ( & wf - > timer ) ;
return ;
}
/* Our mission is that we only extend an already active wakeref */
2019-06-13 16:21:56 -07:00
assert_rpm_wakelock_held ( wf - > rpm ) ;
2019-05-27 12:51:14 +01:00
if ( ! refcount_inc_not_zero ( & wf - > count ) ) {
spin_lock_irqsave ( & wf - > lock , flags ) ;
2019-05-28 16:40:53 +01:00
if ( ! refcount_inc_not_zero ( & wf - > count ) ) {
2019-06-21 19:38:01 +01:00
INTEL_WAKEREF_BUG_ON ( wf - > wakeref ) ;
2019-06-13 16:21:56 -07:00
wf - > wakeref = intel_runtime_pm_get_if_in_use ( wf - > rpm ) ;
2019-05-28 16:40:53 +01:00
refcount_set ( & wf - > count , 1 ) ;
2019-05-27 12:51:14 +01:00
}
spin_unlock_irqrestore ( & wf - > lock , flags ) ;
}
/*
* If we extend a pending timer , we will only get a single timer
* callback and so need to cancel the local inc by running the
* elided callback to keep the wf - > count balanced .
*/
if ( mod_timer ( & wf - > timer , jiffies + timeout ) )
wakeref_auto_timeout ( & wf - > timer ) ;
}
void intel_wakeref_auto_fini ( struct intel_wakeref_auto * wf )
{
intel_wakeref_auto ( wf , 0 ) ;
2019-06-21 19:38:01 +01:00
INTEL_WAKEREF_BUG_ON ( wf - > wakeref ) ;
2019-05-27 12:51:14 +01:00
}