2019-06-21 12:52:29 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Generic userspace implementations of gettimeofday ( ) and similar .
*/
# include <vdso/datapage.h>
# include <vdso/helpers.h>
2019-06-26 13:02:00 +03:00
# ifndef vdso_calc_delta
/*
* Default implementation which works for all sane clocksources . That
* obviously excludes x86 / TSC .
*/
static __always_inline
u64 vdso_calc_delta ( u64 cycles , u64 last , u64 mask , u32 mult )
{
return ( ( cycles - last ) & mask ) * mult ;
}
# endif
lib/vdso: Allow architectures to override the ns shift operation
On powerpc/32, GCC (8.1) generates pretty bad code for the ns >>= vd->shift
operation taking into account that the shift is always <= 32 and the upper
part of the result is likely to be zero. GCC makes reversed assumptions
considering the shift to be likely >= 32 and the upper part to be like not
zero.
unsigned long long shift(unsigned long long x, unsigned char s)
{
return x >> s;
}
results in:
00000018 <shift>:
18: 35 25 ff e0 addic. r9,r5,-32
1c: 41 80 00 10 blt 2c <shift+0x14>
20: 7c 64 4c 30 srw r4,r3,r9
24: 38 60 00 00 li r3,0
28: 4e 80 00 20 blr
2c: 54 69 08 3c rlwinm r9,r3,1,0,30
30: 21 45 00 1f subfic r10,r5,31
34: 7c 84 2c 30 srw r4,r4,r5
38: 7d 29 50 30 slw r9,r9,r10
3c: 7c 63 2c 30 srw r3,r3,r5
40: 7d 24 23 78 or r4,r9,r4
44: 4e 80 00 20 blr
Even when forcing the shift to be smaller than 32 with an &= 31, it still
considers the shift as likely >= 32.
Move the default shift implementation into an inline which can be redefined
in architecture code via a macro.
[ tglx: Made the shift argument u32 and removed the __arch prefix ]
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Link: https://lore.kernel.org/r/b3d449de856982ed060a71e6ace8eeca4654e685.1580399657.git.christophe.leroy@c-s.fr
Link: https://lkml.kernel.org/r/20200207124403.857649978@linutronix.de
2020-02-07 15:39:03 +03:00
# ifndef vdso_shift_ns
static __always_inline u64 vdso_shift_ns ( u64 ns , u32 shift )
{
return ns > > shift ;
}
# endif
2020-02-07 15:38:50 +03:00
# ifndef __arch_vdso_hres_capable
static inline bool __arch_vdso_hres_capable ( void )
{
return true ;
}
# endif
2020-02-07 15:39:02 +03:00
# ifndef vdso_clocksource_ok
static inline bool vdso_clocksource_ok ( const struct vdso_data * vd )
{
return vd - > clock_mode ! = VDSO_CLOCKMODE_NONE ;
}
# endif
2020-06-07 00:51:16 +03:00
# ifndef vdso_cycles_ok
static inline bool vdso_cycles_ok ( u64 cycles )
{
return true ;
}
# endif
2019-11-12 04:27:09 +03:00
# ifdef CONFIG_TIME_NS
2021-03-31 19:48:44 +03:00
static __always_inline int do_hres_timens ( const struct vdso_data * vdns , clockid_t clk ,
struct __kernel_timespec * ts )
2019-11-12 04:27:09 +03:00
{
2021-03-31 19:48:45 +03:00
const struct vdso_data * vd ;
2019-11-12 04:27:09 +03:00
const struct timens_offset * offs = & vdns - > offset [ clk ] ;
const struct vdso_timestamp * vdso_ts ;
u64 cycles , last , ns ;
u32 seq ;
s64 sec ;
2021-03-31 19:48:45 +03:00
vd = vdns - ( clk = = CLOCK_MONOTONIC_RAW ? CS_RAW : CS_HRES_COARSE ) ;
vd = __arch_get_timens_vdso_data ( vd ) ;
2019-11-12 04:27:09 +03:00
if ( clk ! = CLOCK_MONOTONIC_RAW )
vd = & vd [ CS_HRES_COARSE ] ;
else
vd = & vd [ CS_RAW ] ;
vdso_ts = & vd - > basetime [ clk ] ;
do {
seq = vdso_read_begin ( vd ) ;
2020-02-07 15:38:59 +03:00
2020-02-07 15:39:02 +03:00
if ( unlikely ( ! vdso_clocksource_ok ( vd ) ) )
2020-02-07 15:38:55 +03:00
return - 1 ;
2020-02-07 15:38:59 +03:00
2020-08-04 23:37:48 +03:00
cycles = __arch_get_hw_counter ( vd - > clock_mode , vd ) ;
2020-06-07 00:51:16 +03:00
if ( unlikely ( ! vdso_cycles_ok ( cycles ) ) )
return - 1 ;
2019-11-12 04:27:09 +03:00
ns = vdso_ts - > nsec ;
last = vd - > cycle_last ;
ns + = vdso_calc_delta ( cycles , last , vd - > mask , vd - > mult ) ;
lib/vdso: Allow architectures to override the ns shift operation
On powerpc/32, GCC (8.1) generates pretty bad code for the ns >>= vd->shift
operation taking into account that the shift is always <= 32 and the upper
part of the result is likely to be zero. GCC makes reversed assumptions
considering the shift to be likely >= 32 and the upper part to be like not
zero.
unsigned long long shift(unsigned long long x, unsigned char s)
{
return x >> s;
}
results in:
00000018 <shift>:
18: 35 25 ff e0 addic. r9,r5,-32
1c: 41 80 00 10 blt 2c <shift+0x14>
20: 7c 64 4c 30 srw r4,r3,r9
24: 38 60 00 00 li r3,0
28: 4e 80 00 20 blr
2c: 54 69 08 3c rlwinm r9,r3,1,0,30
30: 21 45 00 1f subfic r10,r5,31
34: 7c 84 2c 30 srw r4,r4,r5
38: 7d 29 50 30 slw r9,r9,r10
3c: 7c 63 2c 30 srw r3,r3,r5
40: 7d 24 23 78 or r4,r9,r4
44: 4e 80 00 20 blr
Even when forcing the shift to be smaller than 32 with an &= 31, it still
considers the shift as likely >= 32.
Move the default shift implementation into an inline which can be redefined
in architecture code via a macro.
[ tglx: Made the shift argument u32 and removed the __arch prefix ]
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Link: https://lore.kernel.org/r/b3d449de856982ed060a71e6ace8eeca4654e685.1580399657.git.christophe.leroy@c-s.fr
Link: https://lkml.kernel.org/r/20200207124403.857649978@linutronix.de
2020-02-07 15:39:03 +03:00
ns = vdso_shift_ns ( ns , vd - > shift ) ;
2019-11-12 04:27:09 +03:00
sec = vdso_ts - > sec ;
} while ( unlikely ( vdso_read_retry ( vd , seq ) ) ) ;
/* Add the namespace offset */
sec + = offs - > sec ;
ns + = offs - > nsec ;
/*
* Do this outside the loop : a race inside the loop could result
* in __iter_div_u64_rem ( ) being extremely slow .
*/
ts - > tv_sec = sec + __iter_div_u64_rem ( ns , NSEC_PER_SEC , & ns ) ;
ts - > tv_nsec = ns ;
return 0 ;
}
# else
2021-03-31 19:48:45 +03:00
static __always_inline
const struct vdso_data * __arch_get_timens_vdso_data ( const struct vdso_data * vd )
2019-11-12 04:27:09 +03:00
{
return NULL ;
}
2021-03-31 19:48:44 +03:00
static __always_inline int do_hres_timens ( const struct vdso_data * vdns , clockid_t clk ,
struct __kernel_timespec * ts )
2019-11-12 04:27:09 +03:00
{
return - EINVAL ;
}
# endif
2019-11-12 04:26:51 +03:00
static __always_inline int do_hres ( const struct vdso_data * vd , clockid_t clk ,
2019-11-12 04:27:09 +03:00
struct __kernel_timespec * ts )
2019-06-21 12:52:29 +03:00
{
const struct vdso_timestamp * vdso_ts = & vd - > basetime [ clk ] ;
u64 cycles , last , sec , ns ;
u32 seq ;
2020-02-07 15:38:50 +03:00
/* Allows to compile the high resolution parts out */
if ( ! __arch_vdso_hres_capable ( ) )
return - 1 ;
2019-06-21 12:52:29 +03:00
do {
2019-11-12 04:27:09 +03:00
/*
2020-02-07 15:39:01 +03:00
* Open coded to handle VDSO_CLOCKMODE_TIMENS . Time namespace
2019-11-12 04:27:09 +03:00
* enabled tasks have a special VVAR page installed which
* has vd - > seq set to 1 and vd - > clock_mode set to
2020-02-07 15:39:01 +03:00
* VDSO_CLOCKMODE_TIMENS . For non time namespace affected tasks
2019-11-12 04:27:09 +03:00
* this does not affect performance because if vd - > seq is
* odd , i . e . a concurrent update is in progress the extra
* check for vd - > clock_mode is just a few extra
* instructions while spin waiting for vd - > seq to become
* even again .
*/
while ( unlikely ( ( seq = READ_ONCE ( vd - > seq ) ) & 1 ) ) {
if ( IS_ENABLED ( CONFIG_TIME_NS ) & &
2020-02-07 15:39:01 +03:00
vd - > clock_mode = = VDSO_CLOCKMODE_TIMENS )
2019-11-12 04:27:09 +03:00
return do_hres_timens ( vd , clk , ts ) ;
cpu_relax ( ) ;
}
smp_rmb ( ) ;
2020-02-07 15:39:02 +03:00
if ( unlikely ( ! vdso_clocksource_ok ( vd ) ) )
2020-02-07 15:38:55 +03:00
return - 1 ;
2020-02-07 15:38:59 +03:00
2020-08-04 23:37:48 +03:00
cycles = __arch_get_hw_counter ( vd - > clock_mode , vd ) ;
2020-06-07 00:51:16 +03:00
if ( unlikely ( ! vdso_cycles_ok ( cycles ) ) )
return - 1 ;
2019-06-21 12:52:29 +03:00
ns = vdso_ts - > nsec ;
last = vd - > cycle_last ;
2019-06-26 13:02:00 +03:00
ns + = vdso_calc_delta ( cycles , last , vd - > mask , vd - > mult ) ;
lib/vdso: Allow architectures to override the ns shift operation
On powerpc/32, GCC (8.1) generates pretty bad code for the ns >>= vd->shift
operation taking into account that the shift is always <= 32 and the upper
part of the result is likely to be zero. GCC makes reversed assumptions
considering the shift to be likely >= 32 and the upper part to be like not
zero.
unsigned long long shift(unsigned long long x, unsigned char s)
{
return x >> s;
}
results in:
00000018 <shift>:
18: 35 25 ff e0 addic. r9,r5,-32
1c: 41 80 00 10 blt 2c <shift+0x14>
20: 7c 64 4c 30 srw r4,r3,r9
24: 38 60 00 00 li r3,0
28: 4e 80 00 20 blr
2c: 54 69 08 3c rlwinm r9,r3,1,0,30
30: 21 45 00 1f subfic r10,r5,31
34: 7c 84 2c 30 srw r4,r4,r5
38: 7d 29 50 30 slw r9,r9,r10
3c: 7c 63 2c 30 srw r3,r3,r5
40: 7d 24 23 78 or r4,r9,r4
44: 4e 80 00 20 blr
Even when forcing the shift to be smaller than 32 with an &= 31, it still
considers the shift as likely >= 32.
Move the default shift implementation into an inline which can be redefined
in architecture code via a macro.
[ tglx: Made the shift argument u32 and removed the __arch prefix ]
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Link: https://lore.kernel.org/r/b3d449de856982ed060a71e6ace8eeca4654e685.1580399657.git.christophe.leroy@c-s.fr
Link: https://lkml.kernel.org/r/20200207124403.857649978@linutronix.de
2020-02-07 15:39:03 +03:00
ns = vdso_shift_ns ( ns , vd - > shift ) ;
2019-06-21 12:52:29 +03:00
sec = vdso_ts - > sec ;
} while ( unlikely ( vdso_read_retry ( vd , seq ) ) ) ;
/*
* Do this outside the loop : a race inside the loop could result
* in __iter_div_u64_rem ( ) being extremely slow .
*/
ts - > tv_sec = sec + __iter_div_u64_rem ( ns , NSEC_PER_SEC , & ns ) ;
ts - > tv_nsec = ns ;
return 0 ;
}
2019-11-12 04:27:09 +03:00
# ifdef CONFIG_TIME_NS
2021-03-31 19:48:44 +03:00
static __always_inline int do_coarse_timens ( const struct vdso_data * vdns , clockid_t clk ,
struct __kernel_timespec * ts )
2019-11-12 04:27:09 +03:00
{
2021-03-31 19:48:45 +03:00
const struct vdso_data * vd = __arch_get_timens_vdso_data ( vdns ) ;
2019-11-12 04:27:09 +03:00
const struct vdso_timestamp * vdso_ts = & vd - > basetime [ clk ] ;
const struct timens_offset * offs = & vdns - > offset [ clk ] ;
u64 nsec ;
s64 sec ;
s32 seq ;
do {
seq = vdso_read_begin ( vd ) ;
sec = vdso_ts - > sec ;
nsec = vdso_ts - > nsec ;
} while ( unlikely ( vdso_read_retry ( vd , seq ) ) ) ;
/* Add the namespace offset */
sec + = offs - > sec ;
nsec + = offs - > nsec ;
/*
* Do this outside the loop : a race inside the loop could result
* in __iter_div_u64_rem ( ) being extremely slow .
*/
ts - > tv_sec = sec + __iter_div_u64_rem ( nsec , NSEC_PER_SEC , & nsec ) ;
ts - > tv_nsec = nsec ;
return 0 ;
}
# else
2021-03-31 19:48:44 +03:00
static __always_inline int do_coarse_timens ( const struct vdso_data * vdns , clockid_t clk ,
struct __kernel_timespec * ts )
2019-11-12 04:27:09 +03:00
{
return - 1 ;
}
# endif
2019-11-12 04:26:51 +03:00
static __always_inline int do_coarse ( const struct vdso_data * vd , clockid_t clk ,
struct __kernel_timespec * ts )
2019-06-21 12:52:29 +03:00
{
const struct vdso_timestamp * vdso_ts = & vd - > basetime [ clk ] ;
u32 seq ;
do {
2019-11-12 04:27:09 +03:00
/*
2020-02-07 15:39:01 +03:00
* Open coded to handle VDSO_CLOCK_TIMENS . See comment in
2019-11-12 04:27:09 +03:00
* do_hres ( ) .
*/
while ( ( seq = READ_ONCE ( vd - > seq ) ) & 1 ) {
if ( IS_ENABLED ( CONFIG_TIME_NS ) & &
2020-02-07 15:39:01 +03:00
vd - > clock_mode = = VDSO_CLOCKMODE_TIMENS )
2019-11-12 04:27:09 +03:00
return do_coarse_timens ( vd , clk , ts ) ;
cpu_relax ( ) ;
}
smp_rmb ( ) ;
2019-06-21 12:52:29 +03:00
ts - > tv_sec = vdso_ts - > sec ;
ts - > tv_nsec = vdso_ts - > nsec ;
} while ( unlikely ( vdso_read_retry ( vd , seq ) ) ) ;
2019-12-23 17:31:07 +03:00
return 0 ;
2019-06-21 12:52:29 +03:00
}
2020-04-28 16:16:53 +03:00
static __always_inline int
2020-02-07 15:39:04 +03:00
__cvdso_clock_gettime_common ( const struct vdso_data * vd , clockid_t clock ,
struct __kernel_timespec * ts )
2019-06-21 12:52:29 +03:00
{
u32 msk ;
/* Check for negative values or invalid clocks */
if ( unlikely ( ( u32 ) clock > = MAX_CLOCKS ) )
2019-07-28 16:12:53 +03:00
return - 1 ;
2019-06-21 12:52:29 +03:00
/*
* Convert the clockid to a bitmask and use it to check which
* clocks are handled in the VDSO directly .
*/
msk = 1U < < clock ;
2019-12-23 17:31:07 +03:00
if ( likely ( msk & VDSO_HRES ) )
2019-11-12 04:26:51 +03:00
vd = & vd [ CS_HRES_COARSE ] ;
2019-12-23 17:31:07 +03:00
else if ( msk & VDSO_COARSE )
return do_coarse ( & vd [ CS_HRES_COARSE ] , clock , ts ) ;
else if ( msk & VDSO_RAW )
2019-11-12 04:26:51 +03:00
vd = & vd [ CS_RAW ] ;
else
return - 1 ;
2019-12-23 17:31:07 +03:00
2019-11-12 04:26:51 +03:00
return do_hres ( vd , clock , ts ) ;
2019-07-28 16:12:53 +03:00
}
2019-06-21 12:52:29 +03:00
2019-07-28 16:12:53 +03:00
static __maybe_unused int
2020-02-07 15:39:04 +03:00
__cvdso_clock_gettime_data ( const struct vdso_data * vd , clockid_t clock ,
struct __kernel_timespec * ts )
2019-07-28 16:12:53 +03:00
{
2020-02-07 15:39:04 +03:00
int ret = __cvdso_clock_gettime_common ( vd , clock , ts ) ;
2019-07-28 16:12:53 +03:00
if ( unlikely ( ret ) )
return clock_gettime_fallback ( clock , ts ) ;
return 0 ;
2019-06-21 12:52:29 +03:00
}
2020-02-07 15:39:04 +03:00
static __maybe_unused int
__cvdso_clock_gettime ( clockid_t clock , struct __kernel_timespec * ts )
{
return __cvdso_clock_gettime_data ( __arch_get_vdso_data ( ) , clock , ts ) ;
}
2019-08-30 16:58:56 +03:00
# ifdef BUILD_VDSO32
2019-06-21 12:52:29 +03:00
static __maybe_unused int
2020-02-07 15:39:04 +03:00
__cvdso_clock_gettime32_data ( const struct vdso_data * vd , clockid_t clock ,
struct old_timespec32 * res )
2019-06-21 12:52:29 +03:00
{
struct __kernel_timespec ts ;
int ret ;
2020-02-07 15:39:04 +03:00
ret = __cvdso_clock_gettime_common ( vd , clock , & ts ) ;
2019-06-21 12:52:29 +03:00
2019-07-30 12:38:50 +03:00
if ( unlikely ( ret ) )
return clock_gettime32_fallback ( clock , res ) ;
2019-07-28 16:12:53 +03:00
2019-08-30 16:58:59 +03:00
/* For ret == 0 */
res - > tv_sec = ts . tv_sec ;
res - > tv_nsec = ts . tv_nsec ;
2019-06-21 12:52:29 +03:00
return ret ;
}
2020-02-07 15:39:04 +03:00
static __maybe_unused int
__cvdso_clock_gettime32 ( clockid_t clock , struct old_timespec32 * res )
{
return __cvdso_clock_gettime32_data ( __arch_get_vdso_data ( ) , clock , res ) ;
}
2019-08-30 16:58:56 +03:00
# endif /* BUILD_VDSO32 */
2019-06-21 12:52:29 +03:00
static __maybe_unused int
2020-02-07 15:39:04 +03:00
__cvdso_gettimeofday_data ( const struct vdso_data * vd ,
struct __kernel_old_timeval * tv , struct timezone * tz )
2019-06-21 12:52:29 +03:00
{
if ( likely ( tv ! = NULL ) ) {
struct __kernel_timespec ts ;
if ( do_hres ( & vd [ CS_HRES_COARSE ] , CLOCK_REALTIME , & ts ) )
return gettimeofday_fallback ( tv , tz ) ;
tv - > tv_sec = ts . tv_sec ;
tv - > tv_usec = ( u32 ) ts . tv_nsec / NSEC_PER_USEC ;
}
if ( unlikely ( tz ! = NULL ) ) {
2019-11-12 04:27:09 +03:00
if ( IS_ENABLED ( CONFIG_TIME_NS ) & &
2020-02-07 15:39:01 +03:00
vd - > clock_mode = = VDSO_CLOCKMODE_TIMENS )
2021-03-31 19:48:45 +03:00
vd = __arch_get_timens_vdso_data ( vd ) ;
2019-11-12 04:27:09 +03:00
2019-06-21 12:52:29 +03:00
tz - > tz_minuteswest = vd [ CS_HRES_COARSE ] . tz_minuteswest ;
tz - > tz_dsttime = vd [ CS_HRES_COARSE ] . tz_dsttime ;
}
return 0 ;
}
2020-02-07 15:39:04 +03:00
static __maybe_unused int
__cvdso_gettimeofday ( struct __kernel_old_timeval * tv , struct timezone * tz )
{
return __cvdso_gettimeofday_data ( __arch_get_vdso_data ( ) , tv , tz ) ;
}
2019-06-21 12:52:29 +03:00
# ifdef VDSO_HAS_TIME
2020-02-07 15:39:04 +03:00
static __maybe_unused __kernel_old_time_t
__cvdso_time_data ( const struct vdso_data * vd , __kernel_old_time_t * time )
2019-06-21 12:52:29 +03:00
{
2019-11-12 04:27:09 +03:00
__kernel_old_time_t t ;
2020-02-07 15:39:01 +03:00
if ( IS_ENABLED ( CONFIG_TIME_NS ) & &
vd - > clock_mode = = VDSO_CLOCKMODE_TIMENS )
2021-03-31 19:48:45 +03:00
vd = __arch_get_timens_vdso_data ( vd ) ;
2019-11-12 04:27:09 +03:00
t = READ_ONCE ( vd [ CS_HRES_COARSE ] . basetime [ CLOCK_REALTIME ] . sec ) ;
2019-06-21 12:52:29 +03:00
if ( time )
* time = t ;
return t ;
}
2020-02-07 15:39:04 +03:00
static __maybe_unused __kernel_old_time_t __cvdso_time ( __kernel_old_time_t * time )
{
return __cvdso_time_data ( __arch_get_vdso_data ( ) , time ) ;
}
2019-06-21 12:52:29 +03:00
# endif /* VDSO_HAS_TIME */
# ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
2020-02-07 15:39:04 +03:00
int __cvdso_clock_getres_common ( const struct vdso_data * vd , clockid_t clock ,
struct __kernel_timespec * res )
2019-06-21 12:52:29 +03:00
{
u32 msk ;
2019-07-28 16:12:53 +03:00
u64 ns ;
2019-06-21 12:52:29 +03:00
/* Check for negative values or invalid clocks */
if ( unlikely ( ( u32 ) clock > = MAX_CLOCKS ) )
2019-07-28 16:12:53 +03:00
return - 1 ;
2019-06-21 12:52:29 +03:00
2020-02-07 15:39:01 +03:00
if ( IS_ENABLED ( CONFIG_TIME_NS ) & &
vd - > clock_mode = = VDSO_CLOCKMODE_TIMENS )
2021-03-31 19:48:45 +03:00
vd = __arch_get_timens_vdso_data ( vd ) ;
2019-11-12 04:27:09 +03:00
2019-06-21 12:52:29 +03:00
/*
* Convert the clockid to a bitmask and use it to check which
* clocks are handled in the VDSO directly .
*/
msk = 1U < < clock ;
2019-12-23 17:31:09 +03:00
if ( msk & ( VDSO_HRES | VDSO_RAW ) ) {
2019-06-21 12:52:29 +03:00
/*
* Preserves the behaviour of posix_get_hrtimer_res ( ) .
*/
2020-01-16 20:58:27 +03:00
ns = READ_ONCE ( vd [ CS_HRES_COARSE ] . hrtimer_res ) ;
2019-06-21 12:52:29 +03:00
} else if ( msk & VDSO_COARSE ) {
/*
* Preserves the behaviour of posix_get_coarse_res ( ) .
*/
ns = LOW_RES_NSEC ;
} else {
2019-07-28 16:12:53 +03:00
return - 1 ;
2019-06-21 12:52:29 +03:00
}
2019-10-21 13:07:15 +03:00
if ( likely ( res ) ) {
res - > tv_sec = 0 ;
res - > tv_nsec = ns ;
}
2019-06-21 12:52:29 +03:00
return 0 ;
2019-07-28 16:12:53 +03:00
}
2019-11-28 14:17:19 +03:00
static __maybe_unused
2020-02-07 15:39:04 +03:00
int __cvdso_clock_getres_data ( const struct vdso_data * vd , clockid_t clock ,
struct __kernel_timespec * res )
2019-07-28 16:12:53 +03:00
{
2020-02-07 15:39:04 +03:00
int ret = __cvdso_clock_getres_common ( vd , clock , res ) ;
2019-06-21 12:52:29 +03:00
2019-07-28 16:12:53 +03:00
if ( unlikely ( ret ) )
return clock_getres_fallback ( clock , res ) ;
return 0 ;
2019-06-21 12:52:29 +03:00
}
2020-02-07 15:39:04 +03:00
static __maybe_unused
int __cvdso_clock_getres ( clockid_t clock , struct __kernel_timespec * res )
{
return __cvdso_clock_getres_data ( __arch_get_vdso_data ( ) , clock , res ) ;
}
2019-08-30 16:58:56 +03:00
# ifdef BUILD_VDSO32
2019-06-21 12:52:29 +03:00
static __maybe_unused int
2020-02-07 15:39:04 +03:00
__cvdso_clock_getres_time32_data ( const struct vdso_data * vd , clockid_t clock ,
struct old_timespec32 * res )
2019-06-21 12:52:29 +03:00
{
struct __kernel_timespec ts ;
int ret ;
2020-02-07 15:39:04 +03:00
ret = __cvdso_clock_getres_common ( vd , clock , & ts ) ;
2019-07-30 12:38:50 +03:00
if ( unlikely ( ret ) )
return clock_getres32_fallback ( clock , res ) ;
2019-06-21 12:52:29 +03:00
2019-08-30 16:58:59 +03:00
if ( likely ( res ) ) {
2019-06-21 12:52:29 +03:00
res - > tv_sec = ts . tv_sec ;
res - > tv_nsec = ts . tv_nsec ;
}
return ret ;
}
2020-02-07 15:39:04 +03:00
static __maybe_unused int
__cvdso_clock_getres_time32 ( clockid_t clock , struct old_timespec32 * res )
{
return __cvdso_clock_getres_time32_data ( __arch_get_vdso_data ( ) ,
clock , res ) ;
}
2019-08-30 16:58:56 +03:00
# endif /* BUILD_VDSO32 */
2019-06-21 12:52:29 +03:00
# endif /* VDSO_HAS_CLOCK_GETRES */