2017-11-24 15:00:32 +01:00
// SPDX-License-Identifier: GPL-2.0
2005-04-16 15:20:36 -07:00
/*
* Virtual cpu timer based timer functions .
*
2012-07-20 11:15:08 +02:00
* Copyright IBM Corp . 2004 , 2012
2005-04-16 15:20:36 -07:00
* Author ( s ) : Jan Glauber < jan . glauber @ de . ibm . com >
*/
2012-07-20 11:15:08 +02:00
# include <linux/kernel_stat.h>
2017-02-05 11:48:36 +01:00
# include <linux/sched/cputime.h>
2012-07-20 11:15:08 +02:00
# include <linux/export.h>
2005-04-16 15:20:36 -07:00
# include <linux/kernel.h>
# include <linux/timex.h>
2012-07-20 11:15:08 +02:00
# include <linux/types.h>
# include <linux/time.h>
2005-04-16 15:20:36 -07:00
2012-07-20 11:15:08 +02:00
# include <asm/vtimer.h>
2013-07-16 18:50:52 +02:00
# include <asm/vtime.h>
2015-01-14 17:52:10 +01:00
# include <asm/cpu_mf.h>
# include <asm/smp.h>
2005-04-16 15:20:36 -07:00
2016-05-07 12:15:21 +02:00
# include "entry.h"
2012-07-20 11:15:08 +02:00
static void virt_timer_expire ( void ) ;
2005-04-16 15:20:36 -07:00
2012-07-20 11:15:08 +02:00
static LIST_HEAD ( virt_timer_list ) ;
static DEFINE_SPINLOCK ( virt_timer_lock ) ;
static atomic64_t virt_timer_current ;
static atomic64_t virt_timer_elapsed ;
2015-09-18 16:41:36 +02:00
DEFINE_PER_CPU ( u64 , mt_cycles [ 8 ] ) ;
2015-01-14 17:52:10 +01:00
static DEFINE_PER_CPU ( u64 , mt_scaling_mult ) = { 1 } ;
static DEFINE_PER_CPU ( u64 , mt_scaling_div ) = { 1 } ;
2015-08-03 16:16:40 +02:00
static DEFINE_PER_CPU ( u64 , mt_scaling_jiffies ) ;
2015-01-14 17:52:10 +01:00
2012-07-20 11:15:08 +02:00
static inline u64 get_vtimer ( void )
2008-12-31 15:11:41 +01:00
{
2012-07-20 11:15:08 +02:00
u64 timer ;
2008-12-31 15:11:41 +01:00
2019-04-15 12:41:08 +02:00
asm volatile ( " stpt %0 " : " =Q " ( timer ) ) ;
2008-12-31 15:11:41 +01:00
return timer ;
}
2012-07-20 11:15:08 +02:00
static inline void set_vtimer ( u64 expires )
2008-12-31 15:11:41 +01:00
{
2012-07-20 11:15:08 +02:00
u64 timer ;
2008-12-31 15:11:41 +01:00
2012-07-20 11:15:08 +02:00
asm volatile (
" stpt %0 \n " /* Store current cpu timer value */
" spt %1 " /* Set new value imm. afterwards */
2019-04-15 12:41:08 +02:00
: " =Q " ( timer ) : " Q " ( expires ) ) ;
2008-12-31 15:11:41 +01:00
S390_lowcore . system_timer + = S390_lowcore . last_update_timer - timer ;
S390_lowcore . last_update_timer = expires ;
}
2012-07-20 11:15:08 +02:00
static inline int virt_timer_forward ( u64 elapsed )
{
BUG_ON ( ! irqs_disabled ( ) ) ;
if ( list_empty ( & virt_timer_list ) )
return 0 ;
elapsed = atomic64_add_return ( elapsed , & virt_timer_elapsed ) ;
return elapsed > = atomic64_read ( & virt_timer_current ) ;
}
2015-09-18 16:41:36 +02:00
static void update_mt_scaling ( void )
{
u64 cycles_new [ 8 ] , * cycles_old ;
u64 delta , fac , mult , div ;
int i ;
2018-08-29 18:12:17 +02:00
stcctm ( MT_DIAG , smp_cpu_mtid + 1 , cycles_new ) ;
2015-09-18 16:41:36 +02:00
cycles_old = this_cpu_ptr ( mt_cycles ) ;
fac = 1 ;
mult = div = 0 ;
for ( i = 0 ; i < = smp_cpu_mtid ; i + + ) {
delta = cycles_new [ i ] - cycles_old [ i ] ;
div + = delta ;
mult * = i + 1 ;
mult + = delta * fac ;
fac * = i + 1 ;
}
div * = fac ;
if ( div > 0 ) {
/* Update scaling factor */
__this_cpu_write ( mt_scaling_mult , mult ) ;
__this_cpu_write ( mt_scaling_div , div ) ;
memcpy ( cycles_old , cycles_new ,
sizeof ( u64 ) * ( smp_cpu_mtid + 1 ) ) ;
}
__this_cpu_write ( mt_scaling_jiffies , jiffies_64 ) ;
}
2017-01-05 18:11:49 +01:00
static inline u64 update_tsk_timer ( unsigned long * tsk_vtime , u64 new )
{
u64 delta ;
delta = new - * tsk_vtime ;
* tsk_vtime = new ;
return delta ;
}
static inline u64 scale_vtime ( u64 vtime )
{
u64 mult = __this_cpu_read ( mt_scaling_mult ) ;
u64 div = __this_cpu_read ( mt_scaling_div ) ;
if ( smp_cpu_mtid )
return vtime * mult / div ;
return vtime ;
}
2017-05-12 15:31:38 +02:00
static void account_system_index_scaled ( struct task_struct * p , u64 cputime ,
2017-01-05 18:11:49 +01:00
enum cpu_usage_stat index )
{
2017-05-12 15:31:38 +02:00
p - > stimescaled + = cputime_to_nsecs ( scale_vtime ( cputime ) ) ;
2017-01-31 04:09:40 +01:00
account_system_index_time ( p , cputime_to_nsecs ( cputime ) , index ) ;
2017-01-05 18:11:49 +01:00
}
2005-04-16 15:20:36 -07:00
/*
* Update process times based on virtual cpu times stored by entry . S
* to the lowcore fields user_timer , system_timer & steal_clock .
*/
2016-12-20 07:27:59 +01:00
static int do_account_vtime ( struct task_struct * tsk )
2005-04-16 15:20:36 -07:00
{
2019-03-06 13:31:21 +02:00
u64 timer , clock , user , guest , system , hardirq , softirq ;
2005-04-16 15:20:36 -07:00
timer = S390_lowcore . last_update_timer ;
clock = S390_lowcore . last_update_clock ;
2012-07-20 11:15:08 +02:00
asm volatile (
" stpt %0 \n " /* Store current cpu timer value */
2014-10-20 10:24:39 +02:00
# ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
" stckf %1 " /* Store current tod clock value */
# else
2012-07-20 11:15:08 +02:00
" stck %1 " /* Store current tod clock value */
2014-10-20 10:24:39 +02:00
# endif
2019-04-15 12:41:08 +02:00
: " =Q " ( S390_lowcore . last_update_timer ) ,
" =Q " ( S390_lowcore . last_update_clock ) ) ;
2017-01-05 18:11:49 +01:00
clock = S390_lowcore . last_update_clock - clock ;
timer - = S390_lowcore . last_update_timer ;
if ( hardirq_count ( ) )
S390_lowcore . hardirq_timer + = timer ;
else
S390_lowcore . system_timer + = timer ;
2005-04-16 15:20:36 -07:00
2015-09-18 16:41:36 +02:00
/* Update MT utilization calculation */
2015-08-03 16:16:40 +02:00
if ( smp_cpu_mtid & &
2015-09-18 16:41:36 +02:00
time_after64 ( jiffies_64 , this_cpu_read ( mt_scaling_jiffies ) ) )
update_mt_scaling ( ) ;
2015-01-14 17:52:10 +01:00
2017-01-05 18:11:49 +01:00
/* Calculate cputime delta */
user = update_tsk_timer ( & tsk - > thread . user_timer ,
READ_ONCE ( S390_lowcore . user_timer ) ) ;
guest = update_tsk_timer ( & tsk - > thread . guest_timer ,
READ_ONCE ( S390_lowcore . guest_timer ) ) ;
system = update_tsk_timer ( & tsk - > thread . system_timer ,
READ_ONCE ( S390_lowcore . system_timer ) ) ;
hardirq = update_tsk_timer ( & tsk - > thread . hardirq_timer ,
READ_ONCE ( S390_lowcore . hardirq_timer ) ) ;
softirq = update_tsk_timer ( & tsk - > thread . softirq_timer ,
READ_ONCE ( S390_lowcore . softirq_timer ) ) ;
S390_lowcore . steal_timer + =
clock - user - guest - system - hardirq - softirq ;
/* Push account value */
if ( user ) {
2017-01-31 04:09:37 +01:00
account_user_time ( tsk , cputime_to_nsecs ( user ) ) ;
2017-01-31 04:09:23 +01:00
tsk - > utimescaled + = cputime_to_nsecs ( scale_vtime ( user ) ) ;
2017-01-05 18:11:49 +01:00
}
2015-01-14 17:52:10 +01:00
2017-01-05 18:11:49 +01:00
if ( guest ) {
2017-01-31 04:09:40 +01:00
account_guest_time ( tsk , cputime_to_nsecs ( guest ) ) ;
2017-01-31 04:09:23 +01:00
tsk - > utimescaled + = cputime_to_nsecs ( scale_vtime ( guest ) ) ;
2015-01-14 17:52:10 +01:00
}
2017-01-05 18:11:49 +01:00
if ( system )
2017-05-12 15:31:38 +02:00
account_system_index_scaled ( tsk , system , CPUTIME_SYSTEM ) ;
2017-01-05 18:11:49 +01:00
if ( hardirq )
2017-05-12 15:31:38 +02:00
account_system_index_scaled ( tsk , hardirq , CPUTIME_IRQ ) ;
2017-01-05 18:11:49 +01:00
if ( softirq )
2017-05-12 15:31:38 +02:00
account_system_index_scaled ( tsk , softirq , CPUTIME_SOFTIRQ ) ;
2005-04-16 15:20:36 -07:00
2017-01-05 18:11:49 +01:00
return virt_timer_forward ( user + guest + system + hardirq + softirq ) ;
2005-04-16 15:20:36 -07:00
}
2012-09-08 15:23:11 +02:00
void vtime_task_switch ( struct task_struct * prev )
2006-01-14 13:21:03 -08:00
{
2016-12-20 07:27:59 +01:00
do_account_vtime ( prev ) ;
2016-11-08 12:15:59 +01:00
prev - > thread . user_timer = S390_lowcore . user_timer ;
2017-01-05 18:11:49 +01:00
prev - > thread . guest_timer = S390_lowcore . guest_timer ;
2016-11-08 12:15:59 +01:00
prev - > thread . system_timer = S390_lowcore . system_timer ;
2017-01-05 18:11:49 +01:00
prev - > thread . hardirq_timer = S390_lowcore . hardirq_timer ;
prev - > thread . softirq_timer = S390_lowcore . softirq_timer ;
2016-11-08 12:15:59 +01:00
S390_lowcore . user_timer = current - > thread . user_timer ;
2017-01-05 18:11:49 +01:00
S390_lowcore . guest_timer = current - > thread . guest_timer ;
2016-11-08 12:15:59 +01:00
S390_lowcore . system_timer = current - > thread . system_timer ;
2017-01-05 18:11:49 +01:00
S390_lowcore . hardirq_timer = current - > thread . hardirq_timer ;
S390_lowcore . softirq_timer = current - > thread . softirq_timer ;
2008-12-31 15:11:39 +01:00
}
2006-01-14 13:21:03 -08:00
2012-11-13 23:51:06 +01:00
/*
* In s390 , accounting pending user time also implies
* accounting system time in order to correctly compute
* the stolen time accounting .
*/
2017-01-05 18:11:50 +01:00
void vtime_flush ( struct task_struct * tsk )
2008-12-31 15:11:39 +01:00
{
2019-03-06 13:31:21 +02:00
u64 steal , avg_steal ;
2016-12-20 07:27:59 +01:00
if ( do_account_vtime ( tsk ) )
2012-07-20 11:15:08 +02:00
virt_timer_expire ( ) ;
2019-03-06 13:31:21 +02:00
steal = S390_lowcore . steal_timer ;
avg_steal = S390_lowcore . avg_steal_timer / 2 ;
if ( ( s64 ) steal > 0 ) {
S390_lowcore . steal_timer = 0 ;
account_steal_time ( steal ) ;
avg_steal + = steal ;
}
S390_lowcore . avg_steal_timer = avg_steal ;
2006-01-14 13:21:03 -08:00
}
2005-04-16 15:20:36 -07:00
/*
* Update process times based on virtual cpu times stored by entry . S
* to the lowcore fields user_timer , system_timer & steal_clock .
*/
2012-12-16 20:00:34 +01:00
void vtime_account_irq_enter ( struct task_struct * tsk )
2005-04-16 15:20:36 -07:00
{
2017-01-05 18:11:49 +01:00
u64 timer ;
2005-04-16 15:20:36 -07:00
timer = S390_lowcore . last_update_timer ;
2008-12-31 15:11:41 +01:00
S390_lowcore . last_update_timer = get_vtimer ( ) ;
2017-01-05 18:11:49 +01:00
timer - = S390_lowcore . last_update_timer ;
if ( ( tsk - > flags & PF_VCPU ) & & ( irq_count ( ) = = 0 ) )
S390_lowcore . guest_timer + = timer ;
else if ( hardirq_count ( ) )
S390_lowcore . hardirq_timer + = timer ;
else if ( in_serving_softirq ( ) )
S390_lowcore . softirq_timer + = timer ;
else
S390_lowcore . system_timer + = timer ;
virt_timer_forward ( timer ) ;
2005-04-16 15:20:36 -07:00
}
2012-12-16 20:00:34 +01:00
EXPORT_SYMBOL_GPL ( vtime_account_irq_enter ) ;
2005-04-16 15:20:36 -07:00
2012-11-13 18:21:22 +01:00
void vtime_account_system ( struct task_struct * tsk )
2012-12-16 20:00:34 +01:00
__attribute__ ( ( alias ( " vtime_account_irq_enter " ) ) ) ;
2012-11-13 18:21:22 +01:00
EXPORT_SYMBOL_GPL ( vtime_account_system ) ;
2012-10-24 18:05:51 +02:00
2005-04-16 15:20:36 -07:00
/*
* Sorted add to a list . List is linear searched until first bigger
* element is found .
*/
static void list_add_sorted ( struct vtimer_list * timer , struct list_head * head )
{
2012-07-20 11:15:08 +02:00
struct vtimer_list * tmp ;
2005-04-16 15:20:36 -07:00
2012-07-20 11:15:08 +02:00
list_for_each_entry ( tmp , head , entry ) {
if ( tmp - > expires > timer - > expires ) {
list_add_tail ( & timer - > entry , & tmp - > entry ) ;
2005-04-16 15:20:36 -07:00
return ;
}
}
list_add_tail ( & timer - > entry , head ) ;
}
/*
2012-07-20 11:15:08 +02:00
* Handler for expired virtual CPU timer .
2005-04-16 15:20:36 -07:00
*/
2012-07-20 11:15:08 +02:00
static void virt_timer_expire ( void )
2005-04-16 15:20:36 -07:00
{
2012-07-20 11:15:08 +02:00
struct vtimer_list * timer , * tmp ;
unsigned long elapsed ;
LIST_HEAD ( cb_list ) ;
/* walk timer list, fire all expired timers */
spin_lock ( & virt_timer_lock ) ;
elapsed = atomic64_read ( & virt_timer_elapsed ) ;
list_for_each_entry_safe ( timer , tmp , & virt_timer_list , entry ) {
if ( timer - > expires < elapsed )
2008-12-31 15:11:41 +01:00
/* move expired timer to the callback queue */
2012-07-20 11:15:08 +02:00
list_move_tail ( & timer - > entry , & cb_list ) ;
2008-12-31 15:11:41 +01:00
else
2012-07-20 11:15:08 +02:00
timer - > expires - = elapsed ;
2005-04-16 15:20:36 -07:00
}
2012-07-20 11:15:08 +02:00
if ( ! list_empty ( & virt_timer_list ) ) {
timer = list_first_entry ( & virt_timer_list ,
struct vtimer_list , entry ) ;
atomic64_set ( & virt_timer_current , timer - > expires ) ;
}
atomic64_sub ( elapsed , & virt_timer_elapsed ) ;
spin_unlock ( & virt_timer_lock ) ;
/* Do callbacks and recharge periodic timers */
list_for_each_entry_safe ( timer , tmp , & cb_list , entry ) {
list_del_init ( & timer - > entry ) ;
timer - > function ( timer - > data ) ;
if ( timer - > interval ) {
/* Recharge interval timer */
timer - > expires = timer - > interval +
atomic64_read ( & virt_timer_elapsed ) ;
spin_lock ( & virt_timer_lock ) ;
list_add_sorted ( timer , & virt_timer_list ) ;
spin_unlock ( & virt_timer_lock ) ;
}
2012-03-11 11:59:27 -04:00
}
2005-04-16 15:20:36 -07:00
}
void init_virt_timer ( struct vtimer_list * timer )
{
timer - > function = NULL ;
INIT_LIST_HEAD ( & timer - > entry ) ;
}
EXPORT_SYMBOL ( init_virt_timer ) ;
static inline int vtimer_pending ( struct vtimer_list * timer )
{
2012-07-20 11:15:08 +02:00
return ! list_empty ( & timer - > entry ) ;
2005-04-16 15:20:36 -07:00
}
static void internal_add_vtimer ( struct vtimer_list * timer )
{
2012-07-20 11:15:08 +02:00
if ( list_empty ( & virt_timer_list ) ) {
/* First timer, just program it. */
atomic64_set ( & virt_timer_current , timer - > expires ) ;
atomic64_set ( & virt_timer_elapsed , 0 ) ;
list_add ( & timer - > entry , & virt_timer_list ) ;
2008-12-31 15:11:41 +01:00
} else {
2012-07-20 11:15:08 +02:00
/* Update timer against current base. */
timer - > expires + = atomic64_read ( & virt_timer_elapsed ) ;
if ( likely ( ( s64 ) timer - > expires <
( s64 ) atomic64_read ( & virt_timer_current ) ) )
2008-12-31 15:11:41 +01:00
/* The new timer expires before the current timer. */
2012-07-20 11:15:08 +02:00
atomic64_set ( & virt_timer_current , timer - > expires ) ;
/* Insert new timer into the list. */
list_add_sorted ( timer , & virt_timer_list ) ;
2005-04-16 15:20:36 -07:00
}
}
2012-07-20 11:15:08 +02:00
static void __add_vtimer ( struct vtimer_list * timer , int periodic )
2005-04-16 15:20:36 -07:00
{
2012-07-20 11:15:08 +02:00
unsigned long flags ;
timer - > interval = periodic ? timer - > expires : 0 ;
spin_lock_irqsave ( & virt_timer_lock , flags ) ;
internal_add_vtimer ( timer ) ;
spin_unlock_irqrestore ( & virt_timer_lock , flags ) ;
2005-04-16 15:20:36 -07:00
}
/*
2017-02-27 14:28:52 -08:00
* add_virt_timer - add a oneshot virtual CPU timer
2005-04-16 15:20:36 -07:00
*/
2012-07-20 11:15:08 +02:00
void add_virt_timer ( struct vtimer_list * timer )
2005-04-16 15:20:36 -07:00
{
2012-07-20 11:15:08 +02:00
__add_vtimer ( timer , 0 ) ;
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( add_virt_timer ) ;
/*
* add_virt_timer_int - add an interval virtual CPU timer
*/
2012-07-20 11:15:08 +02:00
void add_virt_timer_periodic ( struct vtimer_list * timer )
2005-04-16 15:20:36 -07:00
{
2012-07-20 11:15:08 +02:00
__add_vtimer ( timer , 1 ) ;
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL ( add_virt_timer_periodic ) ;
2012-07-20 11:15:08 +02:00
static int __mod_vtimer ( struct vtimer_list * timer , u64 expires , int periodic )
2005-04-16 15:20:36 -07:00
{
unsigned long flags ;
2012-07-20 11:15:08 +02:00
int rc ;
2005-04-16 15:20:36 -07:00
2008-07-14 09:59:23 +02:00
BUG_ON ( ! timer - > function ) ;
2005-04-16 15:20:36 -07:00
if ( timer - > expires = = expires & & vtimer_pending ( timer ) )
return 1 ;
2012-07-20 11:15:08 +02:00
spin_lock_irqsave ( & virt_timer_lock , flags ) ;
rc = vtimer_pending ( timer ) ;
if ( rc )
list_del_init ( & timer - > entry ) ;
timer - > interval = periodic ? expires : 0 ;
2005-04-16 15:20:36 -07:00
timer - > expires = expires ;
internal_add_vtimer ( timer ) ;
2012-07-20 11:15:08 +02:00
spin_unlock_irqrestore ( & virt_timer_lock , flags ) ;
return rc ;
2005-04-16 15:20:36 -07:00
}
2009-04-14 15:36:20 +02:00
/*
* returns whether it has modified a pending timer ( 1 ) or not ( 0 )
*/
2012-07-20 11:15:08 +02:00
int mod_virt_timer ( struct vtimer_list * timer , u64 expires )
2009-04-14 15:36:20 +02:00
{
return __mod_vtimer ( timer , expires , 0 ) ;
}
2005-04-16 15:20:36 -07:00
EXPORT_SYMBOL ( mod_virt_timer ) ;
2009-04-14 15:36:20 +02:00
/*
* returns whether it has modified a pending timer ( 1 ) or not ( 0 )
*/
2012-07-20 11:15:08 +02:00
int mod_virt_timer_periodic ( struct vtimer_list * timer , u64 expires )
2009-04-14 15:36:20 +02:00
{
return __mod_vtimer ( timer , expires , 1 ) ;
}
EXPORT_SYMBOL ( mod_virt_timer_periodic ) ;
2005-04-16 15:20:36 -07:00
/*
2012-07-20 11:15:08 +02:00
* Delete a virtual timer .
2005-04-16 15:20:36 -07:00
*
* returns whether the deleted timer was pending ( 1 ) or not ( 0 )
*/
int del_virt_timer ( struct vtimer_list * timer )
{
unsigned long flags ;
if ( ! vtimer_pending ( timer ) )
return 0 ;
2012-07-20 11:15:08 +02:00
spin_lock_irqsave ( & virt_timer_lock , flags ) ;
2005-04-16 15:20:36 -07:00
list_del_init ( & timer - > entry ) ;
2012-07-20 11:15:08 +02:00
spin_unlock_irqrestore ( & virt_timer_lock , flags ) ;
2005-04-16 15:20:36 -07:00
return 1 ;
}
EXPORT_SYMBOL ( del_virt_timer ) ;
/*
* Start the virtual CPU timer on the current CPU .
*/
2014-10-01 10:57:57 +02:00
void vtime_init ( void )
2005-04-16 15:20:36 -07:00
{
2012-03-11 11:59:26 -04:00
/* set initial cpu timer */
2012-07-20 11:15:08 +02:00
set_vtimer ( VTIMER_MAX_SLICE ) ;
2015-08-03 16:16:40 +02:00
/* Setup initial MT scaling values */
if ( smp_cpu_mtid ) {
__this_cpu_write ( mt_scaling_jiffies , jiffies ) ;
__this_cpu_write ( mt_scaling_mult , 1 ) ;
__this_cpu_write ( mt_scaling_div , 1 ) ;
2018-08-29 18:12:17 +02:00
stcctm ( MT_DIAG , smp_cpu_mtid + 1 , this_cpu_ptr ( mt_cycles ) ) ;
2015-08-03 16:16:40 +02:00
}
2005-04-16 15:20:36 -07:00
}