hrtimers: Prepare for cancel on clock was set timers
Make clock_was_set() unconditional and rename hres_timers_resume to hrtimers_resume. This is a preparatory patch for hrtimers which are cancelled when clock realtime was set. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
942c3c5c32
commit
b12a03ce48
@ -148,9 +148,7 @@ struct hrtimer_clock_base {
|
||||
ktime_t resolution;
|
||||
ktime_t (*get_time)(void);
|
||||
ktime_t softirq_time;
|
||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
ktime_t offset;
|
||||
#endif
|
||||
};
|
||||
|
||||
enum hrtimer_base_type {
|
||||
@ -256,8 +254,6 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
|
||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
struct clock_event_device;
|
||||
|
||||
extern void clock_was_set(void);
|
||||
extern void hres_timers_resume(void);
|
||||
extern void hrtimer_interrupt(struct clock_event_device *dev);
|
||||
|
||||
/*
|
||||
@ -291,16 +287,8 @@ extern void hrtimer_peek_ahead_timers(void);
|
||||
# define MONOTONIC_RES_NSEC LOW_RES_NSEC
|
||||
# define KTIME_MONOTONIC_RES KTIME_LOW_RES
|
||||
|
||||
/*
|
||||
* clock_was_set() is a NOP for non- high-resolution systems. The
|
||||
* time-sorted order guarantees that a timer does not expire early and
|
||||
* is expired in the next softirq when the clock was advanced.
|
||||
*/
|
||||
static inline void clock_was_set(void) { }
|
||||
static inline void hrtimer_peek_ahead_timers(void) { }
|
||||
|
||||
static inline void hres_timers_resume(void) { }
|
||||
|
||||
/*
|
||||
* In non high resolution mode the time reference is taken from
|
||||
* the base softirq time variable.
|
||||
@ -316,11 +304,13 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void clock_was_set(void);
|
||||
extern void hrtimers_resume(void);
|
||||
|
||||
extern ktime_t ktime_get(void);
|
||||
extern ktime_t ktime_get_real(void);
|
||||
extern ktime_t ktime_get_boottime(void);
|
||||
|
||||
|
||||
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
|
||||
|
||||
|
||||
|
125
kernel/hrtimer.c
125
kernel/hrtimer.c
@ -621,66 +621,6 @@ static int hrtimer_reprogram(struct hrtimer *timer,
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Retrigger next event is called after clock was set
|
||||
*
|
||||
* Called with interrupts disabled via on_each_cpu()
|
||||
*/
|
||||
static void retrigger_next_event(void *arg)
|
||||
{
|
||||
struct hrtimer_cpu_base *base;
|
||||
struct timespec realtime_offset, wtm, sleep;
|
||||
|
||||
if (!hrtimer_hres_active())
|
||||
return;
|
||||
|
||||
get_xtime_and_monotonic_and_sleep_offset(&realtime_offset, &wtm,
|
||||
&sleep);
|
||||
set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
|
||||
|
||||
base = &__get_cpu_var(hrtimer_bases);
|
||||
|
||||
/* Adjust CLOCK_REALTIME offset */
|
||||
raw_spin_lock(&base->lock);
|
||||
base->clock_base[HRTIMER_BASE_REALTIME].offset =
|
||||
timespec_to_ktime(realtime_offset);
|
||||
base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
|
||||
timespec_to_ktime(sleep);
|
||||
|
||||
hrtimer_force_reprogram(base, 0);
|
||||
raw_spin_unlock(&base->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clock realtime was set
|
||||
*
|
||||
* Change the offset of the realtime clock vs. the monotonic
|
||||
* clock.
|
||||
*
|
||||
* We might have to reprogram the high resolution timer interrupt. On
|
||||
* SMP we call the architecture specific code to retrigger _all_ high
|
||||
* resolution timer interrupts. On UP we just disable interrupts and
|
||||
* call the high resolution interrupt code.
|
||||
*/
|
||||
void clock_was_set(void)
|
||||
{
|
||||
/* Retrigger the CPU local events everywhere */
|
||||
on_each_cpu(retrigger_next_event, NULL, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* During resume we might have to reprogram the high resolution timer
|
||||
* interrupt (on the local CPU):
|
||||
*/
|
||||
void hres_timers_resume(void)
|
||||
{
|
||||
WARN_ONCE(!irqs_disabled(),
|
||||
KERN_INFO "hres_timers_resume() called with IRQs enabled!");
|
||||
|
||||
retrigger_next_event(NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the high resolution related parts of cpu_base
|
||||
*/
|
||||
@ -714,12 +654,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void retrigger_next_event(void *arg);
|
||||
|
||||
/*
|
||||
* Switch to high resolution mode
|
||||
*/
|
||||
static int hrtimer_switch_to_hres(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int i, cpu = smp_processor_id();
|
||||
struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
|
||||
unsigned long flags;
|
||||
|
||||
@ -735,9 +677,8 @@ static int hrtimer_switch_to_hres(void)
|
||||
return 0;
|
||||
}
|
||||
base->hres_active = 1;
|
||||
base->clock_base[HRTIMER_BASE_REALTIME].resolution = KTIME_HIGH_RES;
|
||||
base->clock_base[HRTIMER_BASE_MONOTONIC].resolution = KTIME_HIGH_RES;
|
||||
base->clock_base[HRTIMER_BASE_BOOTTIME].resolution = KTIME_HIGH_RES;
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
|
||||
base->clock_base[i].resolution = KTIME_HIGH_RES;
|
||||
|
||||
tick_setup_sched_timer();
|
||||
|
||||
@ -764,6 +705,62 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
|
||||
|
||||
#endif /* CONFIG_HIGH_RES_TIMERS */
|
||||
|
||||
/*
|
||||
* Retrigger next event is called after clock was set
|
||||
*
|
||||
* Called with interrupts disabled via on_each_cpu()
|
||||
*/
|
||||
static void retrigger_next_event(void *arg)
|
||||
{
|
||||
struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
|
||||
struct timespec realtime_offset, xtim, wtm, sleep;
|
||||
|
||||
if (!hrtimer_hres_active())
|
||||
return;
|
||||
|
||||
get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
|
||||
set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
|
||||
|
||||
/* Adjust CLOCK_REALTIME offset */
|
||||
raw_spin_lock(&base->lock);
|
||||
base->clock_base[HRTIMER_BASE_REALTIME].offset =
|
||||
timespec_to_ktime(realtime_offset);
|
||||
base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
|
||||
timespec_to_ktime(sleep);
|
||||
|
||||
hrtimer_force_reprogram(base, 0);
|
||||
raw_spin_unlock(&base->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clock realtime was set
|
||||
*
|
||||
* Change the offset of the realtime clock vs. the monotonic
|
||||
* clock.
|
||||
*
|
||||
* We might have to reprogram the high resolution timer interrupt. On
|
||||
* SMP we call the architecture specific code to retrigger _all_ high
|
||||
* resolution timer interrupts. On UP we just disable interrupts and
|
||||
* call the high resolution interrupt code.
|
||||
*/
|
||||
void clock_was_set(void)
|
||||
{
|
||||
/* Retrigger the CPU local events everywhere */
|
||||
on_each_cpu(retrigger_next_event, NULL, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* During resume we might have to reprogram the high resolution timer
|
||||
* interrupt (on the local CPU):
|
||||
*/
|
||||
void hrtimers_resume(void)
|
||||
{
|
||||
WARN_ONCE(!irqs_disabled(),
|
||||
KERN_INFO "hrtimers_resume() called with IRQs enabled!");
|
||||
|
||||
retrigger_next_event(NULL);
|
||||
}
|
||||
|
||||
static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
|
||||
{
|
||||
#ifdef CONFIG_TIMER_STATS
|
||||
|
@ -680,7 +680,7 @@ static void timekeeping_resume(void)
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
|
||||
|
||||
/* Resume hrtimers */
|
||||
hres_timers_resume();
|
||||
hrtimers_resume();
|
||||
}
|
||||
|
||||
static int timekeeping_suspend(void)
|
||||
|
Loading…
Reference in New Issue
Block a user