|
|
|
@ -118,6 +118,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
|
|
|
|
|
tk->offs_boot = ktime_add(tk->offs_boot, delta);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* tk_clock_read - atomic clocksource read() helper
|
|
|
|
|
*
|
|
|
|
|
* This helper is necessary to use in the read paths because, while the
|
|
|
|
|
* seqlock ensures we don't return a bad value while structures are updated,
|
|
|
|
|
* it doesn't protect from potential crashes. There is the possibility that
|
|
|
|
|
* the tkr's clocksource may change between the read reference, and the
|
|
|
|
|
* clock reference passed to the read function. This can cause crashes if
|
|
|
|
|
* the wrong clocksource is passed to the wrong read function.
|
|
|
|
|
* This isn't necessary to use when holding the timekeeper_lock or doing
|
|
|
|
|
* a read of the fast-timekeeper tkrs (which is protected by its own locking
|
|
|
|
|
* and update logic).
|
|
|
|
|
*/
|
|
|
|
|
static inline u64 tk_clock_read(struct tk_read_base *tkr)
|
|
|
|
|
{
|
|
|
|
|
struct clocksource *clock = READ_ONCE(tkr->clock);
|
|
|
|
|
|
|
|
|
|
return clock->read(clock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_TIMEKEEPING
|
|
|
|
|
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
|
|
|
|
|
|
|
|
|
@ -175,7 +195,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
|
|
|
|
|
*/
|
|
|
|
|
do {
|
|
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
|
|
now = tkr->read(tkr->clock);
|
|
|
|
|
now = tk_clock_read(tkr);
|
|
|
|
|
last = tkr->cycle_last;
|
|
|
|
|
mask = tkr->mask;
|
|
|
|
|
max = tkr->clock->max_cycles;
|
|
|
|
@ -209,7 +229,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
|
|
|
|
|
u64 cycle_now, delta;
|
|
|
|
|
|
|
|
|
|
/* read clocksource */
|
|
|
|
|
cycle_now = tkr->read(tkr->clock);
|
|
|
|
|
cycle_now = tk_clock_read(tkr);
|
|
|
|
|
|
|
|
|
|
/* calculate the delta since the last update_wall_time */
|
|
|
|
|
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
|
|
|
|
@ -238,12 +258,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
|
|
|
|
++tk->cs_was_changed_seq;
|
|
|
|
|
old_clock = tk->tkr_mono.clock;
|
|
|
|
|
tk->tkr_mono.clock = clock;
|
|
|
|
|
tk->tkr_mono.read = clock->read;
|
|
|
|
|
tk->tkr_mono.mask = clock->mask;
|
|
|
|
|
tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
|
|
|
|
|
tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
|
|
|
|
|
|
|
|
|
|
tk->tkr_raw.clock = clock;
|
|
|
|
|
tk->tkr_raw.read = clock->read;
|
|
|
|
|
tk->tkr_raw.mask = clock->mask;
|
|
|
|
|
tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
|
|
|
|
|
|
|
|
|
@ -262,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
|
|
|
|
/* Go back from cycles -> shifted ns */
|
|
|
|
|
tk->xtime_interval = interval * clock->mult;
|
|
|
|
|
tk->xtime_remainder = ntpinterval - tk->xtime_interval;
|
|
|
|
|
tk->raw_interval = (interval * clock->mult) >> clock->shift;
|
|
|
|
|
tk->raw_interval = interval * clock->mult;
|
|
|
|
|
|
|
|
|
|
/* if changing clocks, convert xtime_nsec shift units */
|
|
|
|
|
if (old_clock) {
|
|
|
|
@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
|
|
|
|
|
|
|
|
|
now += timekeeping_delta_to_ns(tkr,
|
|
|
|
|
clocksource_delta(
|
|
|
|
|
tkr->read(tkr->clock),
|
|
|
|
|
tk_clock_read(tkr),
|
|
|
|
|
tkr->cycle_last,
|
|
|
|
|
tkr->mask));
|
|
|
|
|
} while (read_seqcount_retry(&tkf->seq, seq));
|
|
|
|
@ -461,6 +479,10 @@ static u64 dummy_clock_read(struct clocksource *cs)
|
|
|
|
|
return cycles_at_suspend;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct clocksource dummy_clock = {
|
|
|
|
|
.read = dummy_clock_read,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
|
|
|
|
|
* @tk: Timekeeper to snapshot.
|
|
|
|
@ -477,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
|
|
|
|
|
struct tk_read_base *tkr = &tk->tkr_mono;
|
|
|
|
|
|
|
|
|
|
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
|
|
|
|
|
cycles_at_suspend = tkr->read(tkr->clock);
|
|
|
|
|
tkr_dummy.read = dummy_clock_read;
|
|
|
|
|
cycles_at_suspend = tk_clock_read(tkr);
|
|
|
|
|
tkr_dummy.clock = &dummy_clock;
|
|
|
|
|
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
|
|
|
|
|
|
|
|
|
|
tkr = &tk->tkr_raw;
|
|
|
|
|
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
|
|
|
|
|
tkr_dummy.read = dummy_clock_read;
|
|
|
|
|
tkr_dummy.clock = &dummy_clock;
|
|
|
|
|
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -649,11 +671,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
|
|
|
|
|
*/
|
|
|
|
|
static void timekeeping_forward_now(struct timekeeper *tk)
|
|
|
|
|
{
|
|
|
|
|
struct clocksource *clock = tk->tkr_mono.clock;
|
|
|
|
|
u64 cycle_now, delta;
|
|
|
|
|
u64 nsec;
|
|
|
|
|
|
|
|
|
|
cycle_now = tk->tkr_mono.read(clock);
|
|
|
|
|
cycle_now = tk_clock_read(&tk->tkr_mono);
|
|
|
|
|
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
|
|
|
|
|
tk->tkr_mono.cycle_last = cycle_now;
|
|
|
|
|
tk->tkr_raw.cycle_last = cycle_now;
|
|
|
|
@ -929,8 +950,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
seq = read_seqcount_begin(&tk_core.seq);
|
|
|
|
|
|
|
|
|
|
now = tk->tkr_mono.read(tk->tkr_mono.clock);
|
|
|
|
|
now = tk_clock_read(&tk->tkr_mono);
|
|
|
|
|
systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
|
|
|
|
|
systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
|
|
|
|
|
base_real = ktime_add(tk->tkr_mono.base,
|
|
|
|
@ -1108,7 +1128,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
|
|
|
|
|
* Check whether the system counter value provided by the
|
|
|
|
|
* device driver is on the current timekeeping interval.
|
|
|
|
|
*/
|
|
|
|
|
now = tk->tkr_mono.read(tk->tkr_mono.clock);
|
|
|
|
|
now = tk_clock_read(&tk->tkr_mono);
|
|
|
|
|
interval_start = tk->tkr_mono.cycle_last;
|
|
|
|
|
if (!cycle_between(interval_start, cycles, now)) {
|
|
|
|
|
clock_was_set_seq = tk->clock_was_set_seq;
|
|
|
|
@ -1629,7 +1649,7 @@ void timekeeping_resume(void)
|
|
|
|
|
* The less preferred source will only be tried if there is no better
|
|
|
|
|
* usable source. The rtc part is handled separately in rtc core code.
|
|
|
|
|
*/
|
|
|
|
|
cycle_now = tk->tkr_mono.read(clock);
|
|
|
|
|
cycle_now = tk_clock_read(&tk->tkr_mono);
|
|
|
|
|
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
|
|
|
|
|
cycle_now > tk->tkr_mono.cycle_last) {
|
|
|
|
|
u64 nsec, cyc_delta;
|
|
|
|
@ -1976,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
|
|
|
|
|
u32 shift, unsigned int *clock_set)
|
|
|
|
|
{
|
|
|
|
|
u64 interval = tk->cycle_interval << shift;
|
|
|
|
|
u64 raw_nsecs;
|
|
|
|
|
u64 snsec_per_sec;
|
|
|
|
|
|
|
|
|
|
/* If the offset is smaller than a shifted interval, do nothing */
|
|
|
|
|
if (offset < interval)
|
|
|
|
@ -1991,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
|
|
|
|
|
*clock_set |= accumulate_nsecs_to_secs(tk);
|
|
|
|
|
|
|
|
|
|
/* Accumulate raw time */
|
|
|
|
|
raw_nsecs = (u64)tk->raw_interval << shift;
|
|
|
|
|
raw_nsecs += tk->raw_time.tv_nsec;
|
|
|
|
|
if (raw_nsecs >= NSEC_PER_SEC) {
|
|
|
|
|
u64 raw_secs = raw_nsecs;
|
|
|
|
|
raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
|
|
|
|
|
tk->raw_time.tv_sec += raw_secs;
|
|
|
|
|
tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
|
|
|
|
|
tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
|
|
|
|
|
snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
|
|
|
|
|
while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
|
|
|
|
|
tk->tkr_raw.xtime_nsec -= snsec_per_sec;
|
|
|
|
|
tk->raw_time.tv_sec++;
|
|
|
|
|
}
|
|
|
|
|
tk->raw_time.tv_nsec = raw_nsecs;
|
|
|
|
|
tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
|
|
|
|
|
tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
|
|
|
|
|
|
|
|
|
|
/* Accumulate error between NTP and clock interval */
|
|
|
|
|
tk->ntp_error += tk->ntp_tick << shift;
|
|
|
|
@ -2030,7 +2051,7 @@ void update_wall_time(void)
|
|
|
|
|
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
|
|
|
|
|
offset = real_tk->cycle_interval;
|
|
|
|
|
#else
|
|
|
|
|
offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
|
|
|
|
|
offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
|
|
|
|
|
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|