2019-04-24 21:07:13 +01:00
/*
* SPDX - License - Identifier : MIT
*
* Copyright © 2019 Intel Corporation
*/
# ifndef INTEL_WAKEREF_H
# define INTEL_WAKEREF_H
# include <linux/atomic.h>
2020-03-23 10:32:21 +00:00
# include <linux/bitfield.h>
2019-08-08 21:27:58 +01:00
# include <linux/bits.h>
2019-11-20 12:54:33 +00:00
# include <linux/lockdep.h>
2019-04-24 21:07:13 +01:00
# include <linux/mutex.h>
2019-05-27 12:51:14 +01:00
# include <linux/refcount.h>
2019-04-24 21:07:13 +01:00
# include <linux/stackdepot.h>
2019-05-27 12:51:14 +01:00
# include <linux/timer.h>
2019-08-08 21:27:58 +01:00
# include <linux/workqueue.h>
2019-04-24 21:07:13 +01:00
2019-06-21 19:38:01 +01:00
# if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
# define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
# else
# define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
# endif
2019-06-13 16:21:56 -07:00
struct intel_runtime_pm ;
2019-08-08 21:27:58 +01:00
struct intel_wakeref ;
2019-04-24 21:07:13 +01:00
typedef depot_stack_handle_t intel_wakeref_t ;
2019-08-08 21:27:58 +01:00
struct intel_wakeref_ops {
int ( * get ) ( struct intel_wakeref * wf ) ;
int ( * put ) ( struct intel_wakeref * wf ) ;
} ;
2019-04-24 21:07:13 +01:00
struct intel_wakeref {
atomic_t count ;
struct mutex mutex ;
2019-08-08 21:27:58 +01:00
2019-04-24 21:07:13 +01:00
intel_wakeref_t wakeref ;
2019-08-08 21:27:58 +01:00
struct intel_runtime_pm * rpm ;
const struct intel_wakeref_ops * ops ;
2020-03-23 10:32:21 +00:00
struct delayed_work work ;
2019-04-24 21:07:13 +01:00
} ;
2020-01-02 23:16:04 +00:00
struct intel_wakeref_lockclass {
struct lock_class_key mutex ;
struct lock_class_key work ;
} ;
2019-04-24 21:07:13 +01:00
void __intel_wakeref_init ( struct intel_wakeref * wf ,
2019-08-08 21:27:58 +01:00
struct intel_runtime_pm * rpm ,
const struct intel_wakeref_ops * ops ,
2020-01-02 23:16:04 +00:00
struct intel_wakeref_lockclass * key ) ;
2019-08-08 21:27:58 +01:00
# define intel_wakeref_init(wf, rpm, ops) do { \
2020-01-02 23:16:04 +00:00
static struct intel_wakeref_lockclass __key ; \
2019-04-24 21:07:13 +01:00
\
2019-08-08 21:27:58 +01:00
__intel_wakeref_init ( ( wf ) , ( rpm ) , ( ops ) , & __key ) ; \
2019-04-24 21:07:13 +01:00
} while ( 0 )
2019-08-08 21:27:58 +01:00
int __intel_wakeref_get_first ( struct intel_wakeref * wf ) ;
2019-11-20 12:54:33 +00:00
void __intel_wakeref_put_last ( struct intel_wakeref * wf , unsigned long flags ) ;
2019-04-24 21:07:13 +01:00
/**
* intel_wakeref_get : Acquire the wakeref
* @ wf : the wakeref
*
* Acquire a hold on the wakeref . The first user to do so , will acquire
* the runtime pm wakeref and then call the @ fn underneath the wakeref
* mutex .
*
* Note that @ fn is allowed to fail , in which case the runtime - pm wakeref
* will be released and the acquisition unwound , and an error reported .
*
* Returns : 0 if the wakeref was acquired successfully , or a negative error
* code otherwise .
*/
static inline int
2019-08-08 21:27:58 +01:00
intel_wakeref_get ( struct intel_wakeref * wf )
2019-04-24 21:07:13 +01:00
{
2019-11-21 13:05:28 +00:00
might_sleep ( ) ;
2019-04-24 21:07:13 +01:00
if ( unlikely ( ! atomic_inc_not_zero ( & wf - > count ) ) )
2019-08-08 21:27:58 +01:00
return __intel_wakeref_get_first ( wf ) ;
2019-04-24 21:07:13 +01:00
return 0 ;
}
2019-11-21 13:05:28 +00:00
/**
* __intel_wakeref_get : Acquire the wakeref , again
* @ wf : the wakeref
*
* Increment the wakeref counter , only valid if it is already held by
* the caller .
*
* See intel_wakeref_get ( ) .
*/
static inline void
__intel_wakeref_get ( struct intel_wakeref * wf )
{
INTEL_WAKEREF_BUG_ON ( atomic_read ( & wf - > count ) < = 0 ) ;
atomic_inc ( & wf - > count ) ;
}
2019-06-26 16:45:47 +01:00
/**
* intel_wakeref_get_if_in_use : Acquire the wakeref
* @ wf : the wakeref
*
* Acquire a hold on the wakeref , but only if the wakeref is already
* active .
*
* Returns : true if the wakeref was acquired , false otherwise .
*/
static inline bool
intel_wakeref_get_if_active ( struct intel_wakeref * wf )
{
return atomic_inc_not_zero ( & wf - > count ) ;
}
2020-03-23 10:32:21 +00:00
enum {
INTEL_WAKEREF_PUT_ASYNC_BIT = 0 ,
__INTEL_WAKEREF_PUT_LAST_BIT__
} ;
2021-10-14 10:19:43 -07:00
static inline void
intel_wakeref_might_get ( struct intel_wakeref * wf )
{
might_lock ( & wf - > mutex ) ;
}
2019-04-24 21:07:13 +01:00
/**
2019-11-20 12:54:33 +00:00
* intel_wakeref_put_flags : Release the wakeref
2019-04-24 21:07:13 +01:00
* @ wf : the wakeref
2019-11-20 12:54:33 +00:00
* @ flags : control flags
2019-04-24 21:07:13 +01:00
*
* Release our hold on the wakeref . When there are no more users ,
* the runtime pm wakeref will be released after the @ fn callback is called
* underneath the wakeref mutex .
*
* Note that @ fn is allowed to fail , in which case the runtime - pm wakeref
* is retained and an error reported .
*
* Returns : 0 if the wakeref was released successfully , or a negative error
* code otherwise .
*/
2019-08-08 21:27:58 +01:00
static inline void
2019-11-20 12:54:33 +00:00
__intel_wakeref_put ( struct intel_wakeref * wf , unsigned long flags )
2020-03-23 10:32:21 +00:00
# define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
# define INTEL_WAKEREF_PUT_DELAY \
GENMASK ( BITS_PER_LONG - 1 , __INTEL_WAKEREF_PUT_LAST_BIT__ )
2019-04-24 21:07:13 +01:00
{
2019-06-21 19:38:01 +01:00
INTEL_WAKEREF_BUG_ON ( atomic_read ( & wf - > count ) < = 0 ) ;
2019-08-08 21:27:58 +01:00
if ( unlikely ( ! atomic_add_unless ( & wf - > count , - 1 , 1 ) ) )
2019-11-20 12:54:33 +00:00
__intel_wakeref_put_last ( wf , flags ) ;
}
static inline void
intel_wakeref_put ( struct intel_wakeref * wf )
{
might_sleep ( ) ;
__intel_wakeref_put ( wf , 0 ) ;
}
static inline void
intel_wakeref_put_async ( struct intel_wakeref * wf )
{
__intel_wakeref_put ( wf , INTEL_WAKEREF_PUT_ASYNC ) ;
2019-04-24 21:07:13 +01:00
}
2020-03-23 10:32:21 +00:00
static inline void
intel_wakeref_put_delay ( struct intel_wakeref * wf , unsigned long delay )
{
__intel_wakeref_put ( wf ,
INTEL_WAKEREF_PUT_ASYNC |
FIELD_PREP ( INTEL_WAKEREF_PUT_DELAY , delay ) ) ;
}
2021-10-14 10:19:43 -07:00
static inline void
intel_wakeref_might_put ( struct intel_wakeref * wf )
{
might_lock ( & wf - > mutex ) ;
}
2019-04-24 21:07:13 +01:00
/**
* intel_wakeref_lock : Lock the wakeref ( mutex )
* @ wf : the wakeref
*
* Locks the wakeref to prevent it being acquired or released . New users
* can still adjust the counter , but the wakeref itself ( and callback )
* cannot be acquired or released .
*/
static inline void
intel_wakeref_lock ( struct intel_wakeref * wf )
__acquires ( wf - > mutex )
{
mutex_lock ( & wf - > mutex ) ;
}
/**
* intel_wakeref_unlock : Unlock the wakeref
* @ wf : the wakeref
*
* Releases a previously acquired intel_wakeref_lock ( ) .
*/
static inline void
intel_wakeref_unlock ( struct intel_wakeref * wf )
__releases ( wf - > mutex )
{
mutex_unlock ( & wf - > mutex ) ;
}
2019-11-18 23:02:46 +00:00
/**
* intel_wakeref_unlock_wait : Wait until the active callback is complete
* @ wf : the wakeref
*
* Waits for the active callback ( under the @ wf - > mutex or another CPU ) is
* complete .
*/
static inline void
intel_wakeref_unlock_wait ( struct intel_wakeref * wf )
{
mutex_lock ( & wf - > mutex ) ;
mutex_unlock ( & wf - > mutex ) ;
2020-03-23 10:32:21 +00:00
flush_delayed_work ( & wf - > work ) ;
2019-11-18 23:02:46 +00:00
}
2019-04-24 21:07:13 +01:00
/**
2019-06-25 14:01:14 +01:00
* intel_wakeref_is_active : Query whether the wakeref is currently held
2019-04-24 21:07:13 +01:00
* @ wf : the wakeref
*
* Returns : true if the wakeref is currently held .
*/
static inline bool
2019-06-25 14:01:14 +01:00
intel_wakeref_is_active ( const struct intel_wakeref * wf )
2019-04-24 21:07:13 +01:00
{
2019-05-03 12:52:14 +01:00
return READ_ONCE ( wf - > wakeref ) ;
2019-04-24 21:07:13 +01:00
}
2019-08-13 20:07:05 +01:00
/**
* __intel_wakeref_defer_park : Defer the current park callback
* @ wf : the wakeref
*/
static inline void
__intel_wakeref_defer_park ( struct intel_wakeref * wf )
{
2019-11-20 12:54:33 +00:00
lockdep_assert_held ( & wf - > mutex ) ;
2019-08-13 20:07:05 +01:00
INTEL_WAKEREF_BUG_ON ( atomic_read ( & wf - > count ) ) ;
atomic_set_release ( & wf - > count , 1 ) ;
}
2019-08-08 21:27:58 +01:00
/**
* intel_wakeref_wait_for_idle : Wait until the wakeref is idle
* @ wf : the wakeref
*
* Wait for the earlier asynchronous release of the wakeref . Note
* this will wait for any third party as well , so make sure you only wait
* when you have control over the wakeref and trust no one else is acquiring
* it .
*
* Return : 0 on success , error code if killed .
*/
int intel_wakeref_wait_for_idle ( struct intel_wakeref * wf ) ;
2019-05-27 12:51:14 +01:00
struct intel_wakeref_auto {
2019-06-13 16:21:56 -07:00
struct intel_runtime_pm * rpm ;
2019-05-27 12:51:14 +01:00
struct timer_list timer ;
intel_wakeref_t wakeref ;
spinlock_t lock ;
refcount_t count ;
} ;
/**
* intel_wakeref_auto : Delay the runtime - pm autosuspend
* @ wf : the wakeref
* @ timeout : relative timeout in jiffies
*
* The runtime - pm core uses a suspend delay after the last wakeref
* is released before triggering runtime suspend of the device . That
* delay is configurable via sysfs with little regard to the device
* characteristics . Instead , we want to tune the autosuspend based on our
* HW knowledge . intel_wakeref_auto ( ) delays the sleep by the supplied
* timeout .
*
* Pass @ timeout = 0 to cancel a previous autosuspend by executing the
* suspend immediately .
*/
void intel_wakeref_auto ( struct intel_wakeref_auto * wf , unsigned long timeout ) ;
void intel_wakeref_auto_init ( struct intel_wakeref_auto * wf ,
2019-06-13 16:21:56 -07:00
struct intel_runtime_pm * rpm ) ;
2019-05-27 12:51:14 +01:00
void intel_wakeref_auto_fini ( struct intel_wakeref_auto * wf ) ;
2019-04-24 21:07:13 +01:00
# endif /* INTEL_WAKEREF_H */