2005-04-16 15:20:36 -07:00
/*
* include / asm - s390 / timex . h
*
* S390 version
* Copyright ( C ) 1999 IBM Deutschland Entwicklung GmbH , IBM Corporation
*
* Derived from " include/asm-i386/timex.h "
* Copyright ( C ) 1992 , Linus Torvalds
*/
# ifndef _ASM_S390_TIMEX_H
# define _ASM_S390_TIMEX_H
2009-04-14 15:36:28 +02:00
/* The value of the TOD clock for 1.1.1970. */
# define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
2007-02-05 21:18:19 +01:00
/* Inline functions for clock register access. */
static inline int set_clock ( __u64 time )
{
int cc ;
asm volatile (
" sck 0(%2) \n "
" ipm %0 \n "
" srl %0,28 \n "
: " =d " ( cc ) : " m " ( time ) , " a " ( & time ) : " cc " ) ;
return cc ;
}
static inline int store_clock ( __u64 * time )
{
int cc ;
asm volatile (
" stck 0(%2) \n "
" ipm %0 \n "
" srl %0,28 \n "
: " =d " ( cc ) , " =m " ( * time ) : " a " ( time ) : " cc " ) ;
return cc ;
}
static inline void set_clock_comparator ( __u64 time )
{
asm volatile ( " sckc 0(%1) " : : " m " (time), " a " (&time)) ;
}
static inline void store_clock_comparator ( __u64 * time )
{
asm volatile ( " stckc 0(%1) " : " = m " (*time) : " a " (time)) ;
}
2005-04-16 15:20:36 -07:00
# define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
typedef unsigned long long cycles_t ;
static inline unsigned long long get_clock ( void )
{
unsigned long long clk ;
2006-09-28 16:56:43 +02:00
# if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile ( " stck %0 " : " =Q " ( clk ) : : " cc " ) ;
# else /* __GNUC__ */
asm volatile ( " stck 0(%1) " : " = m " (clk) : " a " (&clk) : " cc " ) ;
# endif /* __GNUC__ */
2005-04-16 15:20:36 -07:00
return clk ;
}
2008-04-17 07:46:16 +02:00
static inline unsigned long long get_clock_xt ( void )
2007-02-05 21:18:22 +01:00
{
2008-04-17 07:46:16 +02:00
unsigned char clk [ 16 ] ;
2007-02-05 21:18:22 +01:00
# if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
2008-04-17 07:46:16 +02:00
asm volatile ( " stcke %0 " : " =Q " ( clk ) : : " cc " ) ;
2007-02-05 21:18:22 +01:00
# else /* __GNUC__ */
2008-04-17 07:46:16 +02:00
asm volatile ( " stcke 0(%1) " : " =m " ( clk )
: " a " ( clk ) : " cc " ) ;
2007-02-05 21:18:22 +01:00
# endif /* __GNUC__ */
2008-04-17 07:46:16 +02:00
return * ( ( unsigned long long * ) & clk [ 1 ] ) ;
2007-02-05 21:18:22 +01:00
}
2006-09-28 16:56:43 +02:00
static inline cycles_t get_cycles ( void )
{
return ( cycles_t ) get_clock ( ) > > 2 ;
}
2007-02-05 21:18:19 +01:00
int get_sync_clock ( unsigned long long * clock ) ;
2007-02-05 21:16:47 +01:00
void init_cpu_timer ( void ) ;
2008-04-17 07:46:26 +02:00
unsigned long long monotonic_clock ( void ) ;
2007-02-05 21:16:47 +01:00
2009-08-18 15:43:31 +02:00
void tod_to_timeval ( __u64 , struct timespec * ) ;
static inline
void stck_to_timespec ( unsigned long long stck , struct timespec * ts )
{
tod_to_timeval ( stck - TOD_UNIX_EPOCH , ts ) ;
}
2009-04-14 15:36:28 +02:00
extern u64 sched_clock_base_cc ;
2009-09-11 10:28:31 +02:00
/**
* get_clock_monotonic - returns current time in clock rate units
*
* The caller must ensure that preemption is disabled .
* The clock and sched_clock_base get changed via stop_machine .
* Therefore preemption must be disabled when calling this
* function , otherwise the returned value is not guaranteed to
* be monotonic .
*/
static inline unsigned long long get_clock_monotonic ( void )
{
return get_clock_xt ( ) - sched_clock_base_cc ;
}
2005-04-16 15:20:36 -07:00
# endif