vdso: Consolidate nanoseconds calculation
Consolidate nanoseconds calculation to simplify and reduce code duplication. Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240325064023.2997-3-adrian.hunter@intel.com
This commit is contained in:
parent
c8e3a8b6f2
commit
5b26ef660a
@ -300,7 +300,7 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
|
|||||||
#define vdso_cycles_ok arch_vdso_cycles_ok
|
#define vdso_cycles_ok arch_vdso_cycles_ok
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* x86 specific delta calculation.
|
* x86 specific calculation of nanoseconds for the current cycle count
|
||||||
*
|
*
|
||||||
* The regular implementation assumes that clocksource reads are globally
|
* The regular implementation assumes that clocksource reads are globally
|
||||||
* monotonic. The TSC can be slightly off across sockets which can cause
|
* monotonic. The TSC can be slightly off across sockets which can cause
|
||||||
@ -308,8 +308,8 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
|
|||||||
* jump.
|
* jump.
|
||||||
*
|
*
|
||||||
* Therefore it needs to be verified that @cycles are greater than
|
* Therefore it needs to be verified that @cycles are greater than
|
||||||
* @last. If not then use @last, which is the base time of the current
|
* @vd->cycles_last. If not then use @vd->cycles_last, which is the base
|
||||||
* conversion period.
|
* time of the current conversion period.
|
||||||
*
|
*
|
||||||
* This variant also uses a custom mask because while the clocksource mask of
|
* This variant also uses a custom mask because while the clocksource mask of
|
||||||
* all the VDSO capable clocksources on x86 is U64_MAX, the above code uses
|
* all the VDSO capable clocksources on x86 is U64_MAX, the above code uses
|
||||||
@ -317,25 +317,24 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
|
|||||||
* declares everything with the MSB/Sign-bit set as invalid. Therefore the
|
* declares everything with the MSB/Sign-bit set as invalid. Therefore the
|
||||||
* effective mask is S64_MAX.
|
* effective mask is S64_MAX.
|
||||||
*/
|
*/
|
||||||
static __always_inline
|
static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
|
||||||
u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
|
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Due to the MSB/Sign-bit being used as invalid marker (see
|
* Due to the MSB/Sign-bit being used as invalid marker (see
|
||||||
* arch_vdso_cycles_valid() above), the effective mask is S64_MAX.
|
* arch_vdso_cycles_valid() above), the effective mask is S64_MAX.
|
||||||
*/
|
*/
|
||||||
u64 delta = (cycles - last) & S64_MAX;
|
u64 delta = (cycles - vd->cycle_last) & S64_MAX;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Due to the above mentioned TSC wobbles, filter out negative motion.
|
* Due to the above mentioned TSC wobbles, filter out negative motion.
|
||||||
* Per the above masking, the effective sign bit is now bit 62.
|
* Per the above masking, the effective sign bit is now bit 62.
|
||||||
*/
|
*/
|
||||||
if (unlikely(delta & (1ULL << 62)))
|
if (unlikely(delta & (1ULL << 62)))
|
||||||
return 0;
|
return base >> vd->shift;
|
||||||
|
|
||||||
return delta * mult;
|
return ((delta * vd->mult) + base) >> vd->shift;
|
||||||
}
|
}
|
||||||
#define vdso_calc_delta vdso_calc_delta
|
#define vdso_calc_ns vdso_calc_ns
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
|
@ -5,23 +5,12 @@
|
|||||||
#include <vdso/datapage.h>
|
#include <vdso/datapage.h>
|
||||||
#include <vdso/helpers.h>
|
#include <vdso/helpers.h>
|
||||||
|
|
||||||
#ifndef vdso_calc_delta
|
#ifndef vdso_calc_ns
|
||||||
|
|
||||||
#ifdef VDSO_DELTA_NOMASK
|
#ifdef VDSO_DELTA_NOMASK
|
||||||
# define VDSO_DELTA_MASK(mask) U64_MAX
|
# define VDSO_DELTA_MASK(vd) U64_MAX
|
||||||
#else
|
#else
|
||||||
# define VDSO_DELTA_MASK(mask) (mask)
|
# define VDSO_DELTA_MASK(vd) (vd->mask)
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Default implementation which works for all sane clocksources. That
|
|
||||||
* obviously excludes x86/TSC.
|
|
||||||
*/
|
|
||||||
static __always_inline
|
|
||||||
u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
|
|
||||||
{
|
|
||||||
return ((cycles - last) & VDSO_DELTA_MASK(mask)) * mult;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef vdso_shift_ns
|
#ifndef vdso_shift_ns
|
||||||
@ -31,6 +20,18 @@ static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Default implementation which works for all sane clocksources. That
|
||||||
|
* obviously excludes x86/TSC.
|
||||||
|
*/
|
||||||
|
static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
|
||||||
|
{
|
||||||
|
u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);
|
||||||
|
|
||||||
|
return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
|
||||||
|
}
|
||||||
|
#endif /* vdso_calc_ns */
|
||||||
|
|
||||||
#ifndef __arch_vdso_hres_capable
|
#ifndef __arch_vdso_hres_capable
|
||||||
static inline bool __arch_vdso_hres_capable(void)
|
static inline bool __arch_vdso_hres_capable(void)
|
||||||
{
|
{
|
||||||
@ -56,10 +57,10 @@ static inline bool vdso_cycles_ok(u64 cycles)
|
|||||||
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
|
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||||
struct __kernel_timespec *ts)
|
struct __kernel_timespec *ts)
|
||||||
{
|
{
|
||||||
const struct vdso_data *vd;
|
|
||||||
const struct timens_offset *offs = &vdns->offset[clk];
|
const struct timens_offset *offs = &vdns->offset[clk];
|
||||||
const struct vdso_timestamp *vdso_ts;
|
const struct vdso_timestamp *vdso_ts;
|
||||||
u64 cycles, last, ns;
|
const struct vdso_data *vd;
|
||||||
|
u64 cycles, ns;
|
||||||
u32 seq;
|
u32 seq;
|
||||||
s64 sec;
|
s64 sec;
|
||||||
|
|
||||||
@ -80,10 +81,7 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_
|
|||||||
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
|
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
|
||||||
if (unlikely(!vdso_cycles_ok(cycles)))
|
if (unlikely(!vdso_cycles_ok(cycles)))
|
||||||
return -1;
|
return -1;
|
||||||
ns = vdso_ts->nsec;
|
ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
|
||||||
last = vd->cycle_last;
|
|
||||||
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
|
|
||||||
ns = vdso_shift_ns(ns, vd->shift);
|
|
||||||
sec = vdso_ts->sec;
|
sec = vdso_ts->sec;
|
||||||
} while (unlikely(vdso_read_retry(vd, seq)));
|
} while (unlikely(vdso_read_retry(vd, seq)));
|
||||||
|
|
||||||
@ -118,7 +116,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
|
|||||||
struct __kernel_timespec *ts)
|
struct __kernel_timespec *ts)
|
||||||
{
|
{
|
||||||
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
|
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
|
||||||
u64 cycles, last, sec, ns;
|
u64 cycles, sec, ns;
|
||||||
u32 seq;
|
u32 seq;
|
||||||
|
|
||||||
/* Allows to compile the high resolution parts out */
|
/* Allows to compile the high resolution parts out */
|
||||||
@ -151,10 +149,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
|
|||||||
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
|
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
|
||||||
if (unlikely(!vdso_cycles_ok(cycles)))
|
if (unlikely(!vdso_cycles_ok(cycles)))
|
||||||
return -1;
|
return -1;
|
||||||
ns = vdso_ts->nsec;
|
ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
|
||||||
last = vd->cycle_last;
|
|
||||||
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
|
|
||||||
ns = vdso_shift_ns(ns, vd->shift);
|
|
||||||
sec = vdso_ts->sec;
|
sec = vdso_ts->sec;
|
||||||
} while (unlikely(vdso_read_retry(vd, seq)));
|
} while (unlikely(vdso_read_retry(vd, seq)));
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user