2019-06-21 12:52:30 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 ARM Ltd .
*
* Generic implementation of update_vsyscall and update_vsyscall_tz .
*
* Based on the x86 specific implementation .
*/
# include <linux/hrtimer.h>
# include <linux/timekeeper_internal.h>
# include <vdso/datapage.h>
# include <vdso/helpers.h>
# include <vdso/vsyscall.h>
2020-08-04 18:01:23 +03:00
# include "timekeeping_internal.h"
2019-06-21 12:52:30 +03:00
static inline void update_vdso_data ( struct vdso_data * vdata ,
struct timekeeper * tk )
{
struct vdso_timestamp * vdso_ts ;
2019-08-22 14:00:15 +03:00
u64 nsec , sec ;
2019-06-21 12:52:30 +03:00
vdata [ CS_HRES_COARSE ] . cycle_last = tk - > tkr_mono . cycle_last ;
vdata [ CS_HRES_COARSE ] . mask = tk - > tkr_mono . mask ;
vdata [ CS_HRES_COARSE ] . mult = tk - > tkr_mono . mult ;
vdata [ CS_HRES_COARSE ] . shift = tk - > tkr_mono . shift ;
vdata [ CS_RAW ] . cycle_last = tk - > tkr_raw . cycle_last ;
vdata [ CS_RAW ] . mask = tk - > tkr_raw . mask ;
vdata [ CS_RAW ] . mult = tk - > tkr_raw . mult ;
vdata [ CS_RAW ] . shift = tk - > tkr_raw . shift ;
/* CLOCK_MONOTONIC */
vdso_ts = & vdata [ CS_HRES_COARSE ] . basetime [ CLOCK_MONOTONIC ] ;
vdso_ts - > sec = tk - > xtime_sec + tk - > wall_to_monotonic . tv_sec ;
nsec = tk - > tkr_mono . xtime_nsec ;
nsec + = ( ( u64 ) tk - > wall_to_monotonic . tv_nsec < < tk - > tkr_mono . shift ) ;
while ( nsec > = ( ( ( u64 ) NSEC_PER_SEC ) < < tk - > tkr_mono . shift ) ) {
nsec - = ( ( ( u64 ) NSEC_PER_SEC ) < < tk - > tkr_mono . shift ) ;
vdso_ts - > sec + + ;
}
vdso_ts - > nsec = nsec ;
2019-08-22 14:00:15 +03:00
/* Copy MONOTONIC time for BOOTTIME */
sec = vdso_ts - > sec ;
/* Add the boot offset */
sec + = tk - > monotonic_to_boot . tv_sec ;
nsec + = ( u64 ) tk - > monotonic_to_boot . tv_nsec < < tk - > tkr_mono . shift ;
2019-06-21 12:52:30 +03:00
/* CLOCK_BOOTTIME */
vdso_ts = & vdata [ CS_HRES_COARSE ] . basetime [ CLOCK_BOOTTIME ] ;
2019-08-22 14:00:15 +03:00
vdso_ts - > sec = sec ;
2019-06-21 12:52:30 +03:00
while ( nsec > = ( ( ( u64 ) NSEC_PER_SEC ) < < tk - > tkr_mono . shift ) ) {
nsec - = ( ( ( u64 ) NSEC_PER_SEC ) < < tk - > tkr_mono . shift ) ;
vdso_ts - > sec + + ;
}
vdso_ts - > nsec = nsec ;
2019-08-22 14:00:15 +03:00
/* CLOCK_MONOTONIC_RAW */
vdso_ts = & vdata [ CS_RAW ] . basetime [ CLOCK_MONOTONIC_RAW ] ;
vdso_ts - > sec = tk - > raw_sec ;
vdso_ts - > nsec = tk - > tkr_raw . xtime_nsec ;
2019-06-21 12:52:30 +03:00
/* CLOCK_TAI */
vdso_ts = & vdata [ CS_HRES_COARSE ] . basetime [ CLOCK_TAI ] ;
vdso_ts - > sec = tk - > xtime_sec + ( s64 ) tk - > tai_offset ;
vdso_ts - > nsec = tk - > tkr_mono . xtime_nsec ;
}
void update_vsyscall ( struct timekeeper * tk )
{
struct vdso_data * vdata = __arch_get_k_vdso_data ( ) ;
struct vdso_timestamp * vdso_ts ;
2020-02-07 15:38:55 +03:00
s32 clock_mode ;
2019-06-21 12:52:30 +03:00
u64 nsec ;
/* copy vsyscall data */
vdso_write_begin ( vdata ) ;
2020-02-07 15:38:55 +03:00
clock_mode = tk - > tkr_mono . clock - > vdso_clock_mode ;
vdata [ CS_HRES_COARSE ] . clock_mode = clock_mode ;
vdata [ CS_RAW ] . clock_mode = clock_mode ;
2019-06-21 12:52:30 +03:00
2020-01-14 21:52:39 +03:00
/* CLOCK_REALTIME also required for time() */
vdso_ts = & vdata [ CS_HRES_COARSE ] . basetime [ CLOCK_REALTIME ] ;
vdso_ts - > sec = tk - > xtime_sec ;
vdso_ts - > nsec = tk - > tkr_mono . xtime_nsec ;
2019-06-21 12:52:30 +03:00
/* CLOCK_REALTIME_COARSE */
vdso_ts = & vdata [ CS_HRES_COARSE ] . basetime [ CLOCK_REALTIME_COARSE ] ;
vdso_ts - > sec = tk - > xtime_sec ;
vdso_ts - > nsec = tk - > tkr_mono . xtime_nsec > > tk - > tkr_mono . shift ;
/* CLOCK_MONOTONIC_COARSE */
vdso_ts = & vdata [ CS_HRES_COARSE ] . basetime [ CLOCK_MONOTONIC_COARSE ] ;
vdso_ts - > sec = tk - > xtime_sec + tk - > wall_to_monotonic . tv_sec ;
nsec = tk - > tkr_mono . xtime_nsec > > tk - > tkr_mono . shift ;
nsec = nsec + tk - > wall_to_monotonic . tv_nsec ;
2019-07-10 16:01:53 +03:00
vdso_ts - > sec + = __iter_div_u64_rem ( nsec , NSEC_PER_SEC , & vdso_ts - > nsec ) ;
2019-06-21 12:52:30 +03:00
2020-01-14 21:52:39 +03:00
/*
* Read without the seqlock held by clock_getres ( ) .
* Note : No need to have a second copy .
*/
WRITE_ONCE ( vdata [ CS_HRES_COARSE ] . hrtimer_res , hrtimer_resolution ) ;
/*
2020-02-07 15:39:00 +03:00
* If the current clocksource is not VDSO capable , then spare the
2021-03-23 00:39:03 +03:00
* update of the high resolution parts .
2020-01-14 21:52:39 +03:00
*/
2020-02-07 15:39:00 +03:00
if ( clock_mode ! = VDSO_CLOCKMODE_NONE )
2020-01-14 21:52:39 +03:00
update_vdso_data ( vdata , tk ) ;
2019-06-21 12:52:30 +03:00
__arch_update_vsyscall ( vdata , tk ) ;
vdso_write_end ( vdata ) ;
__arch_sync_vdso_data ( vdata ) ;
}
void update_vsyscall_tz ( void )
{
struct vdso_data * vdata = __arch_get_k_vdso_data ( ) ;
2019-10-24 06:28:29 +03:00
vdata [ CS_HRES_COARSE ] . tz_minuteswest = sys_tz . tz_minuteswest ;
vdata [ CS_HRES_COARSE ] . tz_dsttime = sys_tz . tz_dsttime ;
2019-06-21 12:52:30 +03:00
__arch_sync_vdso_data ( vdata ) ;
}
2020-08-04 18:01:23 +03:00
/**
* vdso_update_begin - Start of a VDSO update section
*
* Allows architecture code to safely update the architecture specific VDSO
* data . Disables interrupts , acquires timekeeper lock to serialize against
* concurrent updates from timekeeping and invalidates the VDSO data
* sequence counter to prevent concurrent readers from accessing
* inconsistent data .
*
* Returns : Saved interrupt flags which need to be handed in to
* vdso_update_end ( ) .
*/
unsigned long vdso_update_begin ( void )
{
struct vdso_data * vdata = __arch_get_k_vdso_data ( ) ;
unsigned long flags ;
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
vdso_write_begin ( vdata ) ;
return flags ;
}
/**
* vdso_update_end - End of a VDSO update section
* @ flags : Interrupt flags as returned from vdso_update_begin ( )
*
* Pairs with vdso_update_begin ( ) . Marks vdso data consistent , invokes data
* synchronization if the architecture requires it , drops timekeeper lock
* and restores interrupt flags .
*/
void vdso_update_end ( unsigned long flags )
{
struct vdso_data * vdata = __arch_get_k_vdso_data ( ) ;
vdso_write_end ( vdata ) ;
__arch_sync_vdso_data ( vdata ) ;
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
}