2019-04-24 21:07:13 +01:00
/*
* SPDX - License - Identifier : MIT
*
* Copyright © 2019 Intel Corporation
*/
# ifndef INTEL_WAKEREF_H
# define INTEL_WAKEREF_H
# include <linux/atomic.h>
2019-08-08 21:27:58 +01:00
# include <linux/bits.h>
2019-04-24 21:07:13 +01:00
# include <linux/mutex.h>
2019-05-27 12:51:14 +01:00
# include <linux/refcount.h>
2019-04-24 21:07:13 +01:00
# include <linux/stackdepot.h>
2019-05-27 12:51:14 +01:00
# include <linux/timer.h>
2019-08-08 21:27:58 +01:00
# include <linux/workqueue.h>
2019-04-24 21:07:13 +01:00
2019-06-21 19:38:01 +01:00
# if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
# define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
# else
# define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
# endif
2019-06-13 16:21:56 -07:00
struct intel_runtime_pm ;
2019-08-08 21:27:58 +01:00
struct intel_wakeref ;
2019-04-24 21:07:13 +01:00
typedef depot_stack_handle_t intel_wakeref_t ;
2019-08-08 21:27:58 +01:00
struct intel_wakeref_ops {
int ( * get ) ( struct intel_wakeref * wf ) ;
int ( * put ) ( struct intel_wakeref * wf ) ;
unsigned long flags ;
# define INTEL_WAKEREF_PUT_ASYNC BIT(0)
} ;
2019-04-24 21:07:13 +01:00
struct intel_wakeref {
atomic_t count ;
struct mutex mutex ;
2019-08-08 21:27:58 +01:00
2019-04-24 21:07:13 +01:00
intel_wakeref_t wakeref ;
2019-08-08 21:27:58 +01:00
struct intel_runtime_pm * rpm ;
const struct intel_wakeref_ops * ops ;
struct work_struct work ;
2019-04-24 21:07:13 +01:00
} ;
void __intel_wakeref_init ( struct intel_wakeref * wf ,
2019-08-08 21:27:58 +01:00
struct intel_runtime_pm * rpm ,
const struct intel_wakeref_ops * ops ,
2019-04-24 21:07:13 +01:00
struct lock_class_key * key ) ;
2019-08-08 21:27:58 +01:00
# define intel_wakeref_init(wf, rpm, ops) do { \
2019-04-24 21:07:13 +01:00
static struct lock_class_key __key ; \
\
2019-08-08 21:27:58 +01:00
__intel_wakeref_init ( ( wf ) , ( rpm ) , ( ops ) , & __key ) ; \
2019-04-24 21:07:13 +01:00
} while ( 0 )
2019-08-08 21:27:58 +01:00
int __intel_wakeref_get_first ( struct intel_wakeref * wf ) ;
void __intel_wakeref_put_last ( struct intel_wakeref * wf ) ;
2019-04-24 21:07:13 +01:00
/**
* intel_wakeref_get : Acquire the wakeref
* @ i915 : the drm_i915_private device
* @ wf : the wakeref
* @ fn : callback for acquired the wakeref , called only on first acquire .
*
* Acquire a hold on the wakeref . The first user to do so , will acquire
* the runtime pm wakeref and then call the @ fn underneath the wakeref
* mutex .
*
* Note that @ fn is allowed to fail , in which case the runtime - pm wakeref
* will be released and the acquisition unwound , and an error reported .
*
* Returns : 0 if the wakeref was acquired successfully , or a negative error
* code otherwise .
*/
static inline int
2019-08-08 21:27:58 +01:00
intel_wakeref_get ( struct intel_wakeref * wf )
2019-04-24 21:07:13 +01:00
{
if ( unlikely ( ! atomic_inc_not_zero ( & wf - > count ) ) )
2019-08-08 21:27:58 +01:00
return __intel_wakeref_get_first ( wf ) ;
2019-04-24 21:07:13 +01:00
return 0 ;
}
2019-06-26 16:45:47 +01:00
/**
* intel_wakeref_get_if_in_use : Acquire the wakeref
* @ wf : the wakeref
*
* Acquire a hold on the wakeref , but only if the wakeref is already
* active .
*
* Returns : true if the wakeref was acquired , false otherwise .
*/
static inline bool
intel_wakeref_get_if_active ( struct intel_wakeref * wf )
{
return atomic_inc_not_zero ( & wf - > count ) ;
}
2019-04-24 21:07:13 +01:00
/**
* intel_wakeref_put : Release the wakeref
* @ i915 : the drm_i915_private device
* @ wf : the wakeref
* @ fn : callback for releasing the wakeref , called only on final release .
*
* Release our hold on the wakeref . When there are no more users ,
* the runtime pm wakeref will be released after the @ fn callback is called
* underneath the wakeref mutex .
*
* Note that @ fn is allowed to fail , in which case the runtime - pm wakeref
* is retained and an error reported .
*
* Returns : 0 if the wakeref was released successfully , or a negative error
* code otherwise .
*/
2019-08-08 21:27:58 +01:00
static inline void
intel_wakeref_put ( struct intel_wakeref * wf )
2019-04-24 21:07:13 +01:00
{
2019-06-21 19:38:01 +01:00
INTEL_WAKEREF_BUG_ON ( atomic_read ( & wf - > count ) < = 0 ) ;
2019-08-08 21:27:58 +01:00
if ( unlikely ( ! atomic_add_unless ( & wf - > count , - 1 , 1 ) ) )
__intel_wakeref_put_last ( wf ) ;
2019-04-24 21:07:13 +01:00
}
/**
* intel_wakeref_lock : Lock the wakeref ( mutex )
* @ wf : the wakeref
*
* Locks the wakeref to prevent it being acquired or released . New users
* can still adjust the counter , but the wakeref itself ( and callback )
* cannot be acquired or released .
*/
static inline void
intel_wakeref_lock ( struct intel_wakeref * wf )
__acquires ( wf - > mutex )
{
mutex_lock ( & wf - > mutex ) ;
}
/**
* intel_wakeref_unlock : Unlock the wakeref
* @ wf : the wakeref
*
* Releases a previously acquired intel_wakeref_lock ( ) .
*/
static inline void
intel_wakeref_unlock ( struct intel_wakeref * wf )
__releases ( wf - > mutex )
{
mutex_unlock ( & wf - > mutex ) ;
}
/**
2019-06-25 14:01:14 +01:00
* intel_wakeref_is_active : Query whether the wakeref is currently held
2019-04-24 21:07:13 +01:00
* @ wf : the wakeref
*
* Returns : true if the wakeref is currently held .
*/
static inline bool
2019-06-25 14:01:14 +01:00
intel_wakeref_is_active ( const struct intel_wakeref * wf )
2019-04-24 21:07:13 +01:00
{
2019-05-03 12:52:14 +01:00
return READ_ONCE ( wf - > wakeref ) ;
2019-04-24 21:07:13 +01:00
}
2019-08-13 20:07:05 +01:00
/**
* __intel_wakeref_defer_park : Defer the current park callback
* @ wf : the wakeref
*/
static inline void
__intel_wakeref_defer_park ( struct intel_wakeref * wf )
{
INTEL_WAKEREF_BUG_ON ( atomic_read ( & wf - > count ) ) ;
atomic_set_release ( & wf - > count , 1 ) ;
}
2019-08-08 21:27:58 +01:00
/**
* intel_wakeref_wait_for_idle : Wait until the wakeref is idle
* @ wf : the wakeref
*
* Wait for the earlier asynchronous release of the wakeref . Note
* this will wait for any third party as well , so make sure you only wait
* when you have control over the wakeref and trust no one else is acquiring
* it .
*
* Return : 0 on success , error code if killed .
*/
int intel_wakeref_wait_for_idle ( struct intel_wakeref * wf ) ;
2019-05-27 12:51:14 +01:00
struct intel_wakeref_auto {
2019-06-13 16:21:56 -07:00
struct intel_runtime_pm * rpm ;
2019-05-27 12:51:14 +01:00
struct timer_list timer ;
intel_wakeref_t wakeref ;
spinlock_t lock ;
refcount_t count ;
} ;
/**
* intel_wakeref_auto : Delay the runtime - pm autosuspend
* @ wf : the wakeref
* @ timeout : relative timeout in jiffies
*
* The runtime - pm core uses a suspend delay after the last wakeref
* is released before triggering runtime suspend of the device . That
* delay is configurable via sysfs with little regard to the device
* characteristics . Instead , we want to tune the autosuspend based on our
* HW knowledge . intel_wakeref_auto ( ) delays the sleep by the supplied
* timeout .
*
* Pass @ timeout = 0 to cancel a previous autosuspend by executing the
* suspend immediately .
*/
void intel_wakeref_auto ( struct intel_wakeref_auto * wf , unsigned long timeout ) ;
void intel_wakeref_auto_init ( struct intel_wakeref_auto * wf ,
2019-06-13 16:21:56 -07:00
struct intel_runtime_pm * rpm ) ;
2019-05-27 12:51:14 +01:00
void intel_wakeref_auto_fini ( struct intel_wakeref_auto * wf ) ;
2019-04-24 21:07:13 +01:00
# endif /* INTEL_WAKEREF_H */