2007-02-16 12:28:00 +03:00
/*
* linux / kernel / time / clockevents . c
*
* This file contains functions which manage clock event devices .
*
* Copyright ( C ) 2005 - 2006 , Thomas Gleixner < tglx @ linutronix . de >
* Copyright ( C ) 2005 - 2007 , Red Hat , Inc . , Ingo Molnar
* Copyright ( C ) 2006 - 2007 , Timesys Corp . , Thomas Gleixner
*
* This code is licenced under the GPL version 2. For details see
* kernel - base / COPYING .
*/
# include <linux/clockchips.h>
# include <linux/hrtimer.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/smp.h>
2013-04-26 00:31:49 +04:00
# include <linux/device.h>
2007-02-16 12:28:00 +03:00
2009-10-17 02:19:01 +04:00
# include "tick-internal.h"
2007-02-16 12:28:00 +03:00
/* The registered clock event devices */
static LIST_HEAD ( clockevent_devices ) ;
static LIST_HEAD ( clockevents_released ) ;
/* Protection for the above */
2009-12-08 14:40:31 +03:00
static DEFINE_RAW_SPINLOCK ( clockevents_lock ) ;
2013-04-26 00:31:50 +04:00
/* Protection for unbind operations */
static DEFINE_MUTEX ( clockevents_mutex ) ;
struct ce_unbind {
struct clock_event_device * ce ;
int res ;
} ;
2007-02-16 12:28:00 +03:00
2013-09-24 23:50:23 +04:00
static u64 cev_delta2ns ( unsigned long latch , struct clock_event_device * evt ,
bool ismax )
2007-02-16 12:28:00 +03:00
{
2009-08-18 21:45:11 +04:00
u64 clc = ( u64 ) latch < < evt - > shift ;
2013-09-24 23:50:23 +04:00
u64 rnd ;
2007-02-16 12:28:00 +03:00
2008-01-30 15:30:03 +03:00
if ( unlikely ( ! evt - > mult ) ) {
evt - > mult = 1 ;
WARN_ON ( 1 ) ;
}
2013-09-24 23:50:23 +04:00
rnd = ( u64 ) evt - > mult - 1 ;
/*
* Upper bound sanity check . If the backwards conversion is
* not equal latch , we know that the above shift overflowed .
*/
if ( ( clc > > evt - > shift ) ! = ( u64 ) latch )
clc = ~ 0ULL ;
/*
* Scaled math oddities :
*
* For mult < = ( 1 < < shift ) we can safely add mult - 1 to
* prevent integer rounding loss . So the backwards conversion
* from nsec to device ticks will be correct .
*
* For mult > ( 1 < < shift ) , i . e . device frequency is > 1 GHz we
* need to be careful . Adding mult - 1 will result in a value
* which when converted back to device ticks can be larger
* than latch by up to ( mult - 1 ) > > shift . For the min_delta
* calculation we still want to apply this in order to stay
* above the minimum device ticks limit . For the upper limit
* we would end up with a latch value larger than the upper
* limit of the device , so we omit the add to stay below the
* device upper boundary .
*
* Also omit the add if it would overflow the u64 boundary .
*/
if ( ( ~ 0ULL - clc > rnd ) & &
2014-10-20 15:07:50 +04:00
( ! ismax | | evt - > mult < = ( 1ULL < < evt - > shift ) ) )
2013-09-24 23:50:23 +04:00
clc + = rnd ;
2008-01-30 15:30:03 +03:00
2007-02-16 12:28:00 +03:00
do_div ( clc , evt - > mult ) ;
2013-09-24 23:50:23 +04:00
/* Deltas less than 1usec are pointless noise */
return clc > 1000 ? clc : 1000 ;
}
/**
* clockevents_delta2ns - Convert a latch value ( device ticks ) to nanoseconds
* @ latch : value to convert
* @ evt : pointer to clock event device descriptor
*
* Math helper , returns latch value converted to nanoseconds ( bound checked )
*/
u64 clockevent_delta2ns ( unsigned long latch , struct clock_event_device * evt )
{
return cev_delta2ns ( latch , evt , false ) ;
2007-02-16 12:28:00 +03:00
}
2009-05-01 09:52:47 +04:00
EXPORT_SYMBOL_GPL ( clockevent_delta2ns ) ;
2007-02-16 12:28:00 +03:00
2015-06-02 15:08:46 +03:00
static int __clockevents_switch_state ( struct clock_event_device * dev ,
enum clock_event_state state )
2015-02-13 03:54:56 +03:00
{
/* Transition with legacy set_mode() callback */
if ( dev - > set_mode ) {
/* Legacy callback doesn't support new modes */
2015-02-27 14:51:33 +03:00
if ( state > CLOCK_EVT_STATE_ONESHOT )
2015-02-13 03:54:56 +03:00
return - ENOSYS ;
2015-02-27 14:51:33 +03:00
/*
* ' clock_event_state ' and ' clock_event_mode ' have 1 - to - 1
* mapping until * _ONESHOT , and so a simple cast will work .
*/
dev - > set_mode ( ( enum clock_event_mode ) state , dev ) ;
dev - > mode = ( enum clock_event_mode ) state ;
2015-02-13 03:54:56 +03:00
return 0 ;
}
if ( dev - > features & CLOCK_EVT_FEAT_DUMMY )
return 0 ;
2015-02-27 14:51:33 +03:00
/* Transition with new state-specific callbacks */
switch ( state ) {
case CLOCK_EVT_STATE_DETACHED :
2015-04-10 10:26:41 +03:00
/* The clockevent device is getting replaced. Shut it down. */
2015-02-13 03:54:56 +03:00
2015-02-27 14:51:33 +03:00
case CLOCK_EVT_STATE_SHUTDOWN :
return dev - > set_state_shutdown ( dev ) ;
2015-02-13 03:54:56 +03:00
2015-02-27 14:51:33 +03:00
case CLOCK_EVT_STATE_PERIODIC :
2015-02-13 03:54:56 +03:00
/* Core internal bug */
if ( ! ( dev - > features & CLOCK_EVT_FEAT_PERIODIC ) )
return - ENOSYS ;
2015-02-27 14:51:33 +03:00
return dev - > set_state_periodic ( dev ) ;
2015-02-13 03:54:56 +03:00
2015-02-27 14:51:33 +03:00
case CLOCK_EVT_STATE_ONESHOT :
2015-02-13 03:54:56 +03:00
/* Core internal bug */
if ( ! ( dev - > features & CLOCK_EVT_FEAT_ONESHOT ) )
return - ENOSYS ;
2015-02-27 14:51:33 +03:00
return dev - > set_state_oneshot ( dev ) ;
2015-02-13 03:54:56 +03:00
2015-04-03 06:34:04 +03:00
case CLOCK_EVT_STATE_ONESHOT_STOPPED :
/* Core internal bug */
2015-05-21 11:03:46 +03:00
if ( WARN_ONCE ( ! clockevent_state_oneshot ( dev ) ,
2015-06-02 15:13:46 +03:00
" Current state: %d \n " ,
clockevent_get_state ( dev ) ) )
2015-04-03 06:34:04 +03:00
return - EINVAL ;
if ( dev - > set_state_oneshot_stopped )
return dev - > set_state_oneshot_stopped ( dev ) ;
else
return - ENOSYS ;
2015-02-13 03:54:56 +03:00
default :
return - ENOSYS ;
}
}
2007-02-16 12:28:00 +03:00
/**
2015-06-02 15:08:46 +03:00
* clockevents_switch_state - set the operating state of a clock event device
2007-02-16 12:28:00 +03:00
* @ dev : device to modify
2015-02-27 14:51:33 +03:00
* @ state : new state
2007-02-16 12:28:00 +03:00
*
* Must be called with interrupts disabled !
*/
2015-06-02 15:08:46 +03:00
void clockevents_switch_state ( struct clock_event_device * dev ,
enum clock_event_state state )
2007-02-16 12:28:00 +03:00
{
2015-06-02 15:13:46 +03:00
if ( clockevent_get_state ( dev ) ! = state ) {
2015-06-02 15:08:46 +03:00
if ( __clockevents_switch_state ( dev , state ) )
2015-02-13 03:54:56 +03:00
return ;
2015-06-02 15:13:46 +03:00
clockevent_set_state ( dev , state ) ;
2009-01-16 11:14:38 +03:00
/*
* A nsec2cyc multiplicator of 0 is invalid and we ' d crash
* on it , so fix it up and emit a warning :
*/
2015-05-21 11:03:46 +03:00
if ( clockevent_state_oneshot ( dev ) ) {
2009-01-16 11:14:38 +03:00
if ( unlikely ( ! dev - > mult ) ) {
dev - > mult = 1 ;
WARN_ON ( 1 ) ;
}
}
2007-02-16 12:28:00 +03:00
}
}
2008-09-16 22:32:50 +04:00
/**
* clockevents_shutdown - shutdown the device and clear next_event
* @ dev : device to shutdown
*/
void clockevents_shutdown ( struct clock_event_device * dev )
{
2015-06-02 15:08:46 +03:00
clockevents_switch_state ( dev , CLOCK_EVT_STATE_SHUTDOWN ) ;
2008-09-16 22:32:50 +04:00
dev - > next_event . tv64 = KTIME_MAX ;
}
2015-02-27 14:51:32 +03:00
/**
* clockevents_tick_resume - Resume the tick device before using it again
* @ dev : device to resume
*/
int clockevents_tick_resume ( struct clock_event_device * dev )
{
int ret = 0 ;
2015-02-27 14:51:33 +03:00
if ( dev - > set_mode ) {
2015-02-27 14:51:32 +03:00
dev - > set_mode ( CLOCK_EVT_MODE_RESUME , dev ) ;
dev - > mode = CLOCK_EVT_MODE_RESUME ;
2015-02-27 14:51:33 +03:00
} else if ( dev - > tick_resume ) {
ret = dev - > tick_resume ( dev ) ;
}
2015-02-27 14:51:32 +03:00
return ret ;
}
2011-08-23 17:29:42 +04:00
# ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
/* Limit min_delta to a jiffie */
# define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
/**
* clockevents_increase_min_delta - raise minimum delta of a clock event device
* @ dev : device to increase the minimum delta
*
* Returns 0 on success , - ETIME when the minimum delta reached the limit .
*/
static int clockevents_increase_min_delta ( struct clock_event_device * dev )
{
/* Nothing to do if we already reached the limit */
if ( dev - > min_delta_ns > = MIN_DELTA_LIMIT ) {
2014-08-01 14:20:02 +04:00
printk_deferred ( KERN_WARNING
" CE: Reprogramming failure. Giving up \n " ) ;
2011-08-23 17:29:42 +04:00
dev - > next_event . tv64 = KTIME_MAX ;
return - ETIME ;
}
if ( dev - > min_delta_ns < 5000 )
dev - > min_delta_ns = 5000 ;
else
dev - > min_delta_ns + = dev - > min_delta_ns > > 1 ;
if ( dev - > min_delta_ns > MIN_DELTA_LIMIT )
dev - > min_delta_ns = MIN_DELTA_LIMIT ;
2014-08-01 14:20:02 +04:00
printk_deferred ( KERN_WARNING
" CE: %s increased min_delta_ns to %llu nsec \n " ,
dev - > name ? dev - > name : " ? " ,
( unsigned long long ) dev - > min_delta_ns ) ;
2011-08-23 17:29:42 +04:00
return 0 ;
}
/**
* clockevents_program_min_delta - Set clock event device to the minimum delay .
* @ dev : device to program
*
* Returns 0 on success , - ETIME when the retry loop failed .
*/
static int clockevents_program_min_delta ( struct clock_event_device * dev )
{
unsigned long long clc ;
int64_t delta ;
int i ;
for ( i = 0 ; ; ) {
delta = dev - > min_delta_ns ;
dev - > next_event = ktime_add_ns ( ktime_get ( ) , delta ) ;
2015-05-21 11:03:46 +03:00
if ( clockevent_state_shutdown ( dev ) )
2011-08-23 17:29:42 +04:00
return 0 ;
dev - > retries + + ;
clc = ( ( unsigned long long ) delta * dev - > mult ) > > dev - > shift ;
if ( dev - > set_next_event ( ( unsigned long ) clc , dev ) = = 0 )
return 0 ;
if ( + + i > 2 ) {
/*
* We tried 3 times to program the device with the
* given min_delta_ns . Try to increase the minimum
* delta , if that fails as well get out of here .
*/
if ( clockevents_increase_min_delta ( dev ) )
return - ETIME ;
i = 0 ;
}
}
}
# else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
/**
* clockevents_program_min_delta - Set clock event device to the minimum delay .
* @ dev : device to program
*
* Returns 0 on success , - ETIME when the retry loop failed .
*/
static int clockevents_program_min_delta ( struct clock_event_device * dev )
{
unsigned long long clc ;
int64_t delta ;
delta = dev - > min_delta_ns ;
dev - > next_event = ktime_add_ns ( ktime_get ( ) , delta ) ;
2015-05-21 11:03:46 +03:00
if ( clockevent_state_shutdown ( dev ) )
2011-08-23 17:29:42 +04:00
return 0 ;
dev - > retries + + ;
clc = ( ( unsigned long long ) delta * dev - > mult ) > > dev - > shift ;
return dev - > set_next_event ( ( unsigned long ) clc , dev ) ;
}
# endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
2007-02-16 12:28:00 +03:00
/**
* clockevents_program_event - Reprogram the clock event device .
2011-08-23 17:29:42 +04:00
* @ dev : device to program
2007-02-16 12:28:00 +03:00
* @ expires : absolute expiry time ( monotonic clock )
2011-08-23 17:29:42 +04:00
* @ force : program minimum delay if expires can not be set
2007-02-16 12:28:00 +03:00
*
* Returns 0 on success , - ETIME when the event is in the past .
*/
int clockevents_program_event ( struct clock_event_device * dev , ktime_t expires ,
2011-08-23 17:29:42 +04:00
bool force )
2007-02-16 12:28:00 +03:00
{
unsigned long long clc ;
int64_t delta ;
2011-08-23 17:29:42 +04:00
int rc ;
2007-02-16 12:28:00 +03:00
2007-12-07 21:16:17 +03:00
if ( unlikely ( expires . tv64 < 0 ) ) {
WARN_ON_ONCE ( 1 ) ;
return - ETIME ;
}
2007-02-16 12:28:00 +03:00
dev - > next_event = expires ;
2015-05-21 11:03:46 +03:00
if ( clockevent_state_shutdown ( dev ) )
2007-02-16 12:28:00 +03:00
return 0 ;
2015-04-03 06:34:05 +03:00
/* We must be in ONESHOT state here */
2015-05-21 11:03:46 +03:00
WARN_ONCE ( ! clockevent_state_oneshot ( dev ) , " Current state: %d \n " ,
2015-06-02 15:13:46 +03:00
clockevent_get_state ( dev ) ) ;
2015-04-03 06:34:05 +03:00
2011-08-23 17:29:43 +04:00
/* Shortcut for clockevent devices that can deal with ktime. */
if ( dev - > features & CLOCK_EVT_FEAT_KTIME )
return dev - > set_next_ktime ( expires , dev ) ;
2011-08-23 17:29:42 +04:00
delta = ktime_to_ns ( ktime_sub ( expires , ktime_get ( ) ) ) ;
if ( delta < = 0 )
return force ? clockevents_program_min_delta ( dev ) : - ETIME ;
2007-02-16 12:28:00 +03:00
2011-08-23 17:29:42 +04:00
delta = min ( delta , ( int64_t ) dev - > max_delta_ns ) ;
delta = max ( delta , ( int64_t ) dev - > min_delta_ns ) ;
2007-02-16 12:28:00 +03:00
2011-08-23 17:29:42 +04:00
clc = ( ( unsigned long long ) delta * dev - > mult ) > > dev - > shift ;
rc = dev - > set_next_event ( ( unsigned long ) clc , dev ) ;
return ( rc & & force ) ? clockevents_program_min_delta ( dev ) : rc ;
2007-02-16 12:28:00 +03:00
}
/*
2008-02-08 15:19:25 +03:00
* Called after a notify add to make devices available which were
2007-02-16 12:28:00 +03:00
* released from the notifier call .
*/
static void clockevents_notify_released ( void )
{
struct clock_event_device * dev ;
while ( ! list_empty ( & clockevents_released ) ) {
dev = list_entry ( clockevents_released . next ,
struct clock_event_device , list ) ;
list_del ( & dev - > list ) ;
list_add ( & dev - > list , & clockevent_devices ) ;
2013-04-26 00:31:47 +04:00
tick_check_new_device ( dev ) ;
2007-02-16 12:28:00 +03:00
}
}
2013-04-26 00:31:50 +04:00
/*
* Try to install a replacement clock event device
*/
static int clockevents_replace ( struct clock_event_device * ced )
{
struct clock_event_device * dev , * newdev = NULL ;
list_for_each_entry ( dev , & clockevent_devices , list ) {
2015-05-21 11:03:46 +03:00
if ( dev = = ced | | ! clockevent_state_detached ( dev ) )
2013-04-26 00:31:50 +04:00
continue ;
if ( ! tick_check_replacement ( newdev , dev ) )
continue ;
if ( ! try_module_get ( dev - > owner ) )
continue ;
if ( newdev )
module_put ( newdev - > owner ) ;
newdev = dev ;
}
if ( newdev ) {
tick_install_replacement ( newdev ) ;
list_del_init ( & ced - > list ) ;
}
return newdev ? 0 : - EBUSY ;
}
/*
* Called with clockevents_mutex and clockevents_lock held
*/
static int __clockevents_try_unbind ( struct clock_event_device * ced , int cpu )
{
/* Fast track. Device is unused */
2015-05-21 11:03:46 +03:00
if ( clockevent_state_detached ( ced ) ) {
2013-04-26 00:31:50 +04:00
list_del_init ( & ced - > list ) ;
return 0 ;
}
return ced = = per_cpu ( tick_cpu_device , cpu ) . evtdev ? - EAGAIN : - EBUSY ;
}
/*
* SMP function call to unbind a device
*/
static void __clockevents_unbind ( void * arg )
{
struct ce_unbind * cu = arg ;
int res ;
raw_spin_lock ( & clockevents_lock ) ;
res = __clockevents_try_unbind ( cu - > ce , smp_processor_id ( ) ) ;
if ( res = = - EAGAIN )
res = clockevents_replace ( cu - > ce ) ;
cu - > res = res ;
raw_spin_unlock ( & clockevents_lock ) ;
}
/*
* Issues smp function call to unbind a per cpu device . Called with
* clockevents_mutex held .
*/
static int clockevents_unbind ( struct clock_event_device * ced , int cpu )
{
struct ce_unbind cu = { . ce = ced , . res = - ENODEV } ;
smp_call_function_single ( cpu , __clockevents_unbind , & cu , 1 ) ;
return cu . res ;
}
/*
* Unbind a clockevents device .
*/
int clockevents_unbind_device ( struct clock_event_device * ced , int cpu )
{
int ret ;
mutex_lock ( & clockevents_mutex ) ;
ret = clockevents_unbind ( ced , cpu ) ;
mutex_unlock ( & clockevents_mutex ) ;
return ret ;
}
2015-02-27 22:25:56 +03:00
EXPORT_SYMBOL_GPL ( clockevents_unbind_device ) ;
2013-04-26 00:31:50 +04:00
2015-02-27 14:51:33 +03:00
/* Sanity check of state transition callbacks */
2015-02-13 03:54:56 +03:00
static int clockevents_sanity_check ( struct clock_event_device * dev )
{
/* Legacy set_mode() callback */
if ( dev - > set_mode ) {
/* We shouldn't be supporting new modes now */
2015-02-27 14:51:33 +03:00
WARN_ON ( dev - > set_state_periodic | | dev - > set_state_oneshot | |
2015-04-03 06:34:04 +03:00
dev - > set_state_shutdown | | dev - > tick_resume | |
dev - > set_state_oneshot_stopped ) ;
2015-02-27 14:51:34 +03:00
BUG_ON ( dev - > mode ! = CLOCK_EVT_MODE_UNUSED ) ;
2015-02-13 03:54:56 +03:00
return 0 ;
}
if ( dev - > features & CLOCK_EVT_FEAT_DUMMY )
return 0 ;
2015-02-27 14:51:33 +03:00
/* New state-specific callbacks */
if ( ! dev - > set_state_shutdown )
2015-02-13 03:54:56 +03:00
return - EINVAL ;
if ( ( dev - > features & CLOCK_EVT_FEAT_PERIODIC ) & &
2015-02-27 14:51:33 +03:00
! dev - > set_state_periodic )
2015-02-13 03:54:56 +03:00
return - EINVAL ;
if ( ( dev - > features & CLOCK_EVT_FEAT_ONESHOT ) & &
2015-02-27 14:51:33 +03:00
! dev - > set_state_oneshot )
2015-02-13 03:54:56 +03:00
return - EINVAL ;
return 0 ;
}
2007-02-16 12:28:00 +03:00
/**
* clockevents_register_device - register a clock event device
* @ dev : device to register
*/
void clockevents_register_device ( struct clock_event_device * dev )
{
2009-08-18 01:34:59 +04:00
unsigned long flags ;
2015-02-13 03:54:56 +03:00
BUG_ON ( clockevents_sanity_check ( dev ) ) ;
2015-02-27 14:51:33 +03:00
/* Initialize state to DETACHED */
2015-06-02 15:13:46 +03:00
clockevent_set_state ( dev , CLOCK_EVT_STATE_DETACHED ) ;
2015-02-27 14:51:33 +03:00
2011-06-03 13:13:33 +04:00
if ( ! dev - > cpumask ) {
WARN_ON ( num_possible_cpus ( ) > 1 ) ;
dev - > cpumask = cpumask_of ( smp_processor_id ( ) ) ;
}
2008-12-13 13:50:26 +03:00
2009-12-08 14:40:31 +03:00
raw_spin_lock_irqsave ( & clockevents_lock , flags ) ;
2007-02-16 12:28:00 +03:00
list_add ( & dev - > list , & clockevent_devices ) ;
2013-04-26 00:31:47 +04:00
tick_check_new_device ( dev ) ;
2007-02-16 12:28:00 +03:00
clockevents_notify_released ( ) ;
2009-12-08 14:40:31 +03:00
raw_spin_unlock_irqrestore ( & clockevents_lock , flags ) ;
2007-02-16 12:28:00 +03:00
}
2009-05-01 09:52:47 +04:00
EXPORT_SYMBOL_GPL ( clockevents_register_device ) ;
2007-02-16 12:28:00 +03:00
2012-05-09 18:39:34 +04:00
void clockevents_config ( struct clock_event_device * dev , u32 freq )
2011-05-19 01:33:41 +04:00
{
2011-05-20 12:50:52 +04:00
u64 sec ;
2011-05-19 01:33:41 +04:00
if ( ! ( dev - > features & CLOCK_EVT_FEAT_ONESHOT ) )
return ;
/*
* Calculate the maximum number of seconds we can sleep . Limit
* to 10 minutes for hardware which can program more than
* 32 bit ticks so we still get reasonable conversion values .
*/
sec = dev - > max_delta_ticks ;
do_div ( sec , freq ) ;
if ( ! sec )
sec = 1 ;
else if ( sec > 600 & & dev - > max_delta_ticks > UINT_MAX )
sec = 600 ;
clockevents_calc_mult_shift ( dev , freq , sec ) ;
2013-09-24 23:50:23 +04:00
dev - > min_delta_ns = cev_delta2ns ( dev - > min_delta_ticks , dev , false ) ;
dev - > max_delta_ns = cev_delta2ns ( dev - > max_delta_ticks , dev , true ) ;
2011-05-19 01:33:41 +04:00
}
/**
* clockevents_config_and_register - Configure and register a clock event device
* @ dev : device to register
* @ freq : The clock frequency
* @ min_delta : The minimum clock ticks to program in oneshot mode
* @ max_delta : The maximum clock ticks to program in oneshot mode
*
* min / max_delta can be 0 for devices which do not support oneshot mode .
*/
void clockevents_config_and_register ( struct clock_event_device * dev ,
u32 freq , unsigned long min_delta ,
unsigned long max_delta )
{
dev - > min_delta_ticks = min_delta ;
dev - > max_delta_ticks = max_delta ;
clockevents_config ( dev , freq ) ;
clockevents_register_device ( dev ) ;
}
2013-01-12 15:50:04 +04:00
EXPORT_SYMBOL_GPL ( clockevents_config_and_register ) ;
2011-05-19 01:33:41 +04:00
2014-02-04 02:34:31 +04:00
int __clockevents_update_freq ( struct clock_event_device * dev , u32 freq )
{
clockevents_config ( dev , freq ) ;
2015-05-21 11:03:46 +03:00
if ( clockevent_state_oneshot ( dev ) )
2014-02-04 02:34:32 +04:00
return clockevents_program_event ( dev , dev - > next_event , false ) ;
2015-05-21 11:03:46 +03:00
if ( clockevent_state_periodic ( dev ) )
2015-06-02 15:08:46 +03:00
return __clockevents_switch_state ( dev , CLOCK_EVT_STATE_PERIODIC ) ;
2014-02-04 02:34:31 +04:00
2014-02-04 02:34:32 +04:00
return 0 ;
2014-02-04 02:34:31 +04:00
}
2011-05-19 01:33:42 +04:00
/**
* clockevents_update_freq - Update frequency and reprogram a clock event device .
* @ dev : device to modify
* @ freq : new device frequency
*
* Reconfigure and reprogram a clock event device in oneshot
* mode . Must be called on the cpu for which the device delivers per
2014-02-04 02:34:31 +04:00
* cpu timer events . If called for the broadcast device the core takes
* care of serialization .
*
* Returns 0 on success , - ETIME when the event is in the past .
2011-05-19 01:33:42 +04:00
*/
int clockevents_update_freq ( struct clock_event_device * dev , u32 freq )
{
2014-02-04 02:34:31 +04:00
unsigned long flags ;
int ret ;
2011-05-19 01:33:42 +04:00
2014-02-04 02:34:31 +04:00
local_irq_save ( flags ) ;
ret = tick_broadcast_update_freq ( dev , freq ) ;
if ( ret = = - ENODEV )
ret = __clockevents_update_freq ( dev , freq ) ;
local_irq_restore ( flags ) ;
return ret ;
2011-05-19 01:33:42 +04:00
}
2007-02-16 12:28:00 +03:00
/*
* Noop handler when we shut down an event device
*/
2008-09-04 01:36:50 +04:00
void clockevents_handle_noop ( struct clock_event_device * dev )
2007-02-16 12:28:00 +03:00
{
}
/**
* clockevents_exchange_device - release and request clock devices
* @ old : device to release ( can be NULL )
* @ new : device to request ( can be NULL )
*
2015-03-25 15:08:27 +03:00
* Called from various tick functions with clockevents_lock held and
* interrupts disabled .
2007-02-16 12:28:00 +03:00
*/
void clockevents_exchange_device ( struct clock_event_device * old ,
struct clock_event_device * new )
{
/*
* Caller releases a clock event device . We queue it into the
* released list and do a notify add later .
*/
if ( old ) {
2013-04-26 00:31:49 +04:00
module_put ( old - > owner ) ;
2015-06-02 15:08:46 +03:00
clockevents_switch_state ( old , CLOCK_EVT_STATE_DETACHED ) ;
2007-02-16 12:28:00 +03:00
list_del ( & old - > list ) ;
list_add ( & old - > list , & clockevents_released ) ;
}
if ( new ) {
2015-05-21 11:03:46 +03:00
BUG_ON ( ! clockevent_state_detached ( new ) ) ;
2008-09-16 22:32:50 +04:00
clockevents_shutdown ( new ) ;
2007-02-16 12:28:00 +03:00
}
}
2012-08-06 03:40:41 +04:00
/**
* clockevents_suspend - suspend clock devices
*/
void clockevents_suspend ( void )
{
struct clock_event_device * dev ;
list_for_each_entry_reverse ( dev , & clockevent_devices , list )
2015-06-17 13:34:46 +03:00
if ( dev - > suspend & & ! clockevent_state_detached ( dev ) )
2012-08-06 03:40:41 +04:00
dev - > suspend ( dev ) ;
}
/**
* clockevents_resume - resume clock devices
*/
void clockevents_resume ( void )
{
struct clock_event_device * dev ;
list_for_each_entry ( dev , & clockevent_devices , list )
2015-06-17 13:34:46 +03:00
if ( dev - > resume & & ! clockevent_state_detached ( dev ) )
2012-08-06 03:40:41 +04:00
dev - > resume ( dev ) ;
}
2015-04-03 03:38:05 +03:00
# ifdef CONFIG_HOTPLUG_CPU
2007-02-16 12:28:00 +03:00
/**
2015-04-03 03:38:05 +03:00
* tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu
2007-02-16 12:28:00 +03:00
*/
2015-04-03 03:38:05 +03:00
void tick_cleanup_dead_cpu ( int cpu )
2007-02-16 12:28:00 +03:00
{
2009-12-10 17:35:10 +03:00
struct clock_event_device * dev , * tmp ;
2009-08-18 01:34:59 +04:00
unsigned long flags ;
2008-02-08 15:19:24 +03:00
2009-12-08 14:40:31 +03:00
raw_spin_lock_irqsave ( & clockevents_lock , flags ) ;
2007-02-16 12:28:00 +03:00
2015-04-03 03:38:05 +03:00
tick_shutdown_broadcast_oneshot ( cpu ) ;
tick_shutdown_broadcast ( cpu ) ;
tick_shutdown ( cpu ) ;
/*
* Unregister the clock event devices which were
* released from the users in the notify chain .
*/
list_for_each_entry_safe ( dev , tmp , & clockevents_released , list )
list_del ( & dev - > list ) ;
/*
* Now check whether the CPU has left unused per cpu devices
*/
list_for_each_entry_safe ( dev , tmp , & clockevent_devices , list ) {
if ( cpumask_test_cpu ( cpu , dev - > cpumask ) & &
cpumask_weight ( dev - > cpumask ) = = 1 & &
! tick_is_broadcast_device ( dev ) ) {
2015-05-21 11:03:46 +03:00
BUG_ON ( ! clockevent_state_detached ( dev ) ) ;
2009-12-10 17:35:10 +03:00
list_del ( & dev - > list ) ;
}
2007-02-16 12:28:00 +03:00
}
2009-12-08 14:40:31 +03:00
raw_spin_unlock_irqrestore ( & clockevents_lock , flags ) ;
2007-02-16 12:28:00 +03:00
}
2015-04-03 03:38:05 +03:00
# endif
2013-04-26 00:31:49 +04:00
# ifdef CONFIG_SYSFS
struct bus_type clockevents_subsys = {
. name = " clockevents " ,
. dev_name = " clockevent " ,
} ;
static DEFINE_PER_CPU ( struct device , tick_percpu_dev ) ;
static struct tick_device * tick_get_tick_dev ( struct device * dev ) ;
static ssize_t sysfs_show_current_tick_dev ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
struct tick_device * td ;
ssize_t count = 0 ;
raw_spin_lock_irq ( & clockevents_lock ) ;
td = tick_get_tick_dev ( dev ) ;
if ( td & & td - > evtdev )
count = snprintf ( buf , PAGE_SIZE , " %s \n " , td - > evtdev - > name ) ;
raw_spin_unlock_irq ( & clockevents_lock ) ;
return count ;
}
static DEVICE_ATTR ( current_device , 0444 , sysfs_show_current_tick_dev , NULL ) ;
2013-04-26 00:31:50 +04:00
/* We don't support the abomination of removable broadcast devices */
static ssize_t sysfs_unbind_tick_dev ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
char name [ CS_NAME_LEN ] ;
2013-10-11 21:11:55 +04:00
ssize_t ret = sysfs_get_uname ( buf , name , count ) ;
2013-04-26 00:31:50 +04:00
struct clock_event_device * ce ;
if ( ret < 0 )
return ret ;
ret = - ENODEV ;
mutex_lock ( & clockevents_mutex ) ;
raw_spin_lock_irq ( & clockevents_lock ) ;
list_for_each_entry ( ce , & clockevent_devices , list ) {
if ( ! strcmp ( ce - > name , name ) ) {
ret = __clockevents_try_unbind ( ce , dev - > id ) ;
break ;
}
}
raw_spin_unlock_irq ( & clockevents_lock ) ;
/*
* We hold clockevents_mutex , so ce can ' t go away
*/
if ( ret = = - EAGAIN )
ret = clockevents_unbind ( ce , dev - > id ) ;
mutex_unlock ( & clockevents_mutex ) ;
return ret ? ret : count ;
}
static DEVICE_ATTR ( unbind_device , 0200 , NULL , sysfs_unbind_tick_dev ) ;
2013-04-26 00:31:49 +04:00
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static struct device tick_bc_dev = {
. init_name = " broadcast " ,
. id = 0 ,
. bus = & clockevents_subsys ,
} ;
static struct tick_device * tick_get_tick_dev ( struct device * dev )
{
return dev = = & tick_bc_dev ? tick_get_broadcast_device ( ) :
& per_cpu ( tick_cpu_device , dev - > id ) ;
}
static __init int tick_broadcast_init_sysfs ( void )
{
int err = device_register ( & tick_bc_dev ) ;
if ( ! err )
err = device_create_file ( & tick_bc_dev , & dev_attr_current_device ) ;
return err ;
}
# else
static struct tick_device * tick_get_tick_dev ( struct device * dev )
{
return & per_cpu ( tick_cpu_device , dev - > id ) ;
}
static inline int tick_broadcast_init_sysfs ( void ) { return 0 ; }
2007-10-13 01:04:05 +04:00
# endif
2013-04-26 00:31:49 +04:00
static int __init tick_init_sysfs ( void )
{
int cpu ;
for_each_possible_cpu ( cpu ) {
struct device * dev = & per_cpu ( tick_percpu_dev , cpu ) ;
int err ;
dev - > id = cpu ;
dev - > bus = & clockevents_subsys ;
err = device_register ( dev ) ;
if ( ! err )
err = device_create_file ( dev , & dev_attr_current_device ) ;
2013-04-26 00:31:50 +04:00
if ( ! err )
err = device_create_file ( dev , & dev_attr_unbind_device ) ;
2013-04-26 00:31:49 +04:00
if ( err )
return err ;
}
return tick_broadcast_init_sysfs ( ) ;
}
static int __init clockevents_init_sysfs ( void )
{
int err = subsys_system_register ( & clockevents_subsys , NULL ) ;
if ( ! err )
err = tick_init_sysfs ( ) ;
return err ;
}
device_initcall ( clockevents_init_sysfs ) ;
# endif /* SYSFS */