Merge branch 'timers/posixtimers' into timers/tracing
Merge reason: timer tracepoint patches depend on both branches Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
commit
f71bb0ac5e
@ -30,6 +30,7 @@ typedef u64 cputime_t;
|
||||
typedef u64 cputime64_t;
|
||||
|
||||
#define cputime_zero ((cputime_t)0)
|
||||
#define cputime_one_jiffy jiffies_to_cputime(1)
|
||||
#define cputime_max ((~((cputime_t)0) >> 1) - 1)
|
||||
#define cputime_add(__a, __b) ((__a) + (__b))
|
||||
#define cputime_sub(__a, __b) ((__a) - (__b))
|
||||
|
@ -18,6 +18,9 @@
|
||||
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
#include <asm-generic/cputime.h>
|
||||
#ifdef __KERNEL__
|
||||
static inline void setup_cputime_one_jiffy(void) { }
|
||||
#endif
|
||||
#else
|
||||
|
||||
#include <linux/types.h>
|
||||
@ -48,6 +51,11 @@ typedef u64 cputime64_t;
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* One jiffy in timebase units computed during initialization
|
||||
*/
|
||||
extern cputime_t cputime_one_jiffy;
|
||||
|
||||
/*
|
||||
* Convert cputime <-> jiffies
|
||||
*/
|
||||
@ -89,6 +97,11 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif)
|
||||
return ct;
|
||||
}
|
||||
|
||||
static inline void setup_cputime_one_jiffy(void)
|
||||
{
|
||||
cputime_one_jiffy = jiffies_to_cputime(1);
|
||||
}
|
||||
|
||||
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
|
||||
{
|
||||
cputime_t ct;
|
||||
|
@ -193,6 +193,8 @@ EXPORT_SYMBOL(__cputime_clockt_factor);
|
||||
DEFINE_PER_CPU(unsigned long, cputime_last_delta);
|
||||
DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
|
||||
|
||||
cputime_t cputime_one_jiffy;
|
||||
|
||||
static void calc_cputime_factors(void)
|
||||
{
|
||||
struct div_result res;
|
||||
@ -500,6 +502,7 @@ static int __init iSeries_tb_recal(void)
|
||||
tb_to_xs = divres.result_low;
|
||||
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
|
||||
vdso_data->tb_to_xs = tb_to_xs;
|
||||
setup_cputime_one_jiffy();
|
||||
}
|
||||
else {
|
||||
printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
|
||||
@ -950,6 +953,7 @@ void __init time_init(void)
|
||||
tb_ticks_per_usec = ppc_tb_freq / 1000000;
|
||||
tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
|
||||
calc_cputime_factors();
|
||||
setup_cputime_one_jiffy();
|
||||
|
||||
/*
|
||||
* Calculate the length of each tick in ns. It will not be
|
||||
|
@ -42,6 +42,7 @@ __div(unsigned long long n, unsigned int base)
|
||||
#endif /* __s390x__ */
|
||||
|
||||
#define cputime_zero (0ULL)
|
||||
#define cputime_one_jiffy jiffies_to_cputime(1)
|
||||
#define cputime_max ((~0UL >> 1) - 1)
|
||||
#define cputime_add(__a, __b) ((__a) + (__b))
|
||||
#define cputime_sub(__a, __b) ((__a) - (__b))
|
||||
|
@ -7,6 +7,7 @@
|
||||
typedef unsigned long cputime_t;
|
||||
|
||||
#define cputime_zero (0UL)
|
||||
#define cputime_one_jiffy jiffies_to_cputime(1)
|
||||
#define cputime_max ((~0UL >> 1) - 1)
|
||||
#define cputime_add(__a, __b) ((__a) + (__b))
|
||||
#define cputime_sub(__a, __b) ((__a) - (__b))
|
||||
|
@ -470,6 +470,13 @@ struct pacct_struct {
|
||||
unsigned long ac_minflt, ac_majflt;
|
||||
};
|
||||
|
||||
struct cpu_itimer {
|
||||
cputime_t expires;
|
||||
cputime_t incr;
|
||||
u32 error;
|
||||
u32 incr_error;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct task_cputime - collected CPU time counts
|
||||
* @utime: time spent in user mode, in &cputime_t units
|
||||
@ -564,9 +571,12 @@ struct signal_struct {
|
||||
struct pid *leader_pid;
|
||||
ktime_t it_real_incr;
|
||||
|
||||
/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
|
||||
cputime_t it_prof_expires, it_virt_expires;
|
||||
cputime_t it_prof_incr, it_virt_incr;
|
||||
/*
|
||||
* ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
|
||||
* CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
|
||||
* values are defined to 0 and 1 respectively
|
||||
*/
|
||||
struct cpu_itimer it[2];
|
||||
|
||||
/*
|
||||
* Thread group totals for process CPU timers.
|
||||
|
@ -62,6 +62,7 @@
|
||||
#include <linux/fs_struct.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/perf_counter.h>
|
||||
#include <linux/posix-timers.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
@ -790,10 +791,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
|
||||
thread_group_cputime_init(sig);
|
||||
|
||||
/* Expiration times and increments. */
|
||||
sig->it_virt_expires = cputime_zero;
|
||||
sig->it_virt_incr = cputime_zero;
|
||||
sig->it_prof_expires = cputime_zero;
|
||||
sig->it_prof_incr = cputime_zero;
|
||||
sig->it[CPUCLOCK_PROF].expires = cputime_zero;
|
||||
sig->it[CPUCLOCK_PROF].incr = cputime_zero;
|
||||
sig->it[CPUCLOCK_VIRT].expires = cputime_zero;
|
||||
sig->it[CPUCLOCK_VIRT].incr = cputime_zero;
|
||||
|
||||
/* Cached expiration times. */
|
||||
sig->cputime_expires.prof_exp = cputime_zero;
|
||||
|
164
kernel/itimer.c
164
kernel/itimer.c
@ -41,10 +41,43 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
|
||||
return ktime_to_timeval(rem);
|
||||
}
|
||||
|
||||
static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
|
||||
struct itimerval *const value)
|
||||
{
|
||||
cputime_t cval, cinterval;
|
||||
struct cpu_itimer *it = &tsk->signal->it[clock_id];
|
||||
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
|
||||
cval = it->expires;
|
||||
cinterval = it->incr;
|
||||
if (!cputime_eq(cval, cputime_zero)) {
|
||||
struct task_cputime cputime;
|
||||
cputime_t t;
|
||||
|
||||
thread_group_cputimer(tsk, &cputime);
|
||||
if (clock_id == CPUCLOCK_PROF)
|
||||
t = cputime_add(cputime.utime, cputime.stime);
|
||||
else
|
||||
/* CPUCLOCK_VIRT */
|
||||
t = cputime.utime;
|
||||
|
||||
if (cputime_le(cval, t))
|
||||
/* about to fire */
|
||||
cval = cputime_one_jiffy;
|
||||
else
|
||||
cval = cputime_sub(cval, t);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
|
||||
cputime_to_timeval(cval, &value->it_value);
|
||||
cputime_to_timeval(cinterval, &value->it_interval);
|
||||
}
|
||||
|
||||
int do_getitimer(int which, struct itimerval *value)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
cputime_t cinterval, cval;
|
||||
|
||||
switch (which) {
|
||||
case ITIMER_REAL:
|
||||
@ -55,44 +88,10 @@ int do_getitimer(int which, struct itimerval *value)
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
break;
|
||||
case ITIMER_VIRTUAL:
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
cval = tsk->signal->it_virt_expires;
|
||||
cinterval = tsk->signal->it_virt_incr;
|
||||
if (!cputime_eq(cval, cputime_zero)) {
|
||||
struct task_cputime cputime;
|
||||
cputime_t utime;
|
||||
|
||||
thread_group_cputimer(tsk, &cputime);
|
||||
utime = cputime.utime;
|
||||
if (cputime_le(cval, utime)) { /* about to fire */
|
||||
cval = jiffies_to_cputime(1);
|
||||
} else {
|
||||
cval = cputime_sub(cval, utime);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
cputime_to_timeval(cval, &value->it_value);
|
||||
cputime_to_timeval(cinterval, &value->it_interval);
|
||||
get_cpu_itimer(tsk, CPUCLOCK_VIRT, value);
|
||||
break;
|
||||
case ITIMER_PROF:
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
cval = tsk->signal->it_prof_expires;
|
||||
cinterval = tsk->signal->it_prof_incr;
|
||||
if (!cputime_eq(cval, cputime_zero)) {
|
||||
struct task_cputime times;
|
||||
cputime_t ptime;
|
||||
|
||||
thread_group_cputimer(tsk, ×);
|
||||
ptime = cputime_add(times.utime, times.stime);
|
||||
if (cputime_le(cval, ptime)) { /* about to fire */
|
||||
cval = jiffies_to_cputime(1);
|
||||
} else {
|
||||
cval = cputime_sub(cval, ptime);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
cputime_to_timeval(cval, &value->it_value);
|
||||
cputime_to_timeval(cinterval, &value->it_interval);
|
||||
get_cpu_itimer(tsk, CPUCLOCK_PROF, value);
|
||||
break;
|
||||
default:
|
||||
return(-EINVAL);
|
||||
@ -128,6 +127,54 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
|
||||
{
|
||||
struct timespec ts;
|
||||
s64 cpu_ns;
|
||||
|
||||
cputime_to_timespec(ct, &ts);
|
||||
cpu_ns = timespec_to_ns(&ts);
|
||||
|
||||
return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
|
||||
}
|
||||
|
||||
static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
|
||||
const struct itimerval *const value,
|
||||
struct itimerval *const ovalue)
|
||||
{
|
||||
cputime_t cval, nval, cinterval, ninterval;
|
||||
s64 ns_ninterval, ns_nval;
|
||||
struct cpu_itimer *it = &tsk->signal->it[clock_id];
|
||||
|
||||
nval = timeval_to_cputime(&value->it_value);
|
||||
ns_nval = timeval_to_ns(&value->it_value);
|
||||
ninterval = timeval_to_cputime(&value->it_interval);
|
||||
ns_ninterval = timeval_to_ns(&value->it_interval);
|
||||
|
||||
it->incr_error = cputime_sub_ns(ninterval, ns_ninterval);
|
||||
it->error = cputime_sub_ns(nval, ns_nval);
|
||||
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
|
||||
cval = it->expires;
|
||||
cinterval = it->incr;
|
||||
if (!cputime_eq(cval, cputime_zero) ||
|
||||
!cputime_eq(nval, cputime_zero)) {
|
||||
if (cputime_gt(nval, cputime_zero))
|
||||
nval = cputime_add(nval, cputime_one_jiffy);
|
||||
set_process_cpu_timer(tsk, clock_id, &nval, &cval);
|
||||
}
|
||||
it->expires = nval;
|
||||
it->incr = ninterval;
|
||||
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
|
||||
if (ovalue) {
|
||||
cputime_to_timeval(cval, &ovalue->it_value);
|
||||
cputime_to_timeval(cinterval, &ovalue->it_interval);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the timeval is in canonical form
|
||||
*/
|
||||
@ -139,7 +186,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
|
||||
struct task_struct *tsk = current;
|
||||
struct hrtimer *timer;
|
||||
ktime_t expires;
|
||||
cputime_t cval, cinterval, nval, ninterval;
|
||||
|
||||
/*
|
||||
* Validate the timevals in value.
|
||||
@ -174,48 +220,10 @@ again:
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
break;
|
||||
case ITIMER_VIRTUAL:
|
||||
nval = timeval_to_cputime(&value->it_value);
|
||||
ninterval = timeval_to_cputime(&value->it_interval);
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
cval = tsk->signal->it_virt_expires;
|
||||
cinterval = tsk->signal->it_virt_incr;
|
||||
if (!cputime_eq(cval, cputime_zero) ||
|
||||
!cputime_eq(nval, cputime_zero)) {
|
||||
if (cputime_gt(nval, cputime_zero))
|
||||
nval = cputime_add(nval,
|
||||
jiffies_to_cputime(1));
|
||||
set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
|
||||
&nval, &cval);
|
||||
}
|
||||
tsk->signal->it_virt_expires = nval;
|
||||
tsk->signal->it_virt_incr = ninterval;
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
if (ovalue) {
|
||||
cputime_to_timeval(cval, &ovalue->it_value);
|
||||
cputime_to_timeval(cinterval, &ovalue->it_interval);
|
||||
}
|
||||
set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue);
|
||||
break;
|
||||
case ITIMER_PROF:
|
||||
nval = timeval_to_cputime(&value->it_value);
|
||||
ninterval = timeval_to_cputime(&value->it_interval);
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
cval = tsk->signal->it_prof_expires;
|
||||
cinterval = tsk->signal->it_prof_incr;
|
||||
if (!cputime_eq(cval, cputime_zero) ||
|
||||
!cputime_eq(nval, cputime_zero)) {
|
||||
if (cputime_gt(nval, cputime_zero))
|
||||
nval = cputime_add(nval,
|
||||
jiffies_to_cputime(1));
|
||||
set_process_cpu_timer(tsk, CPUCLOCK_PROF,
|
||||
&nval, &cval);
|
||||
}
|
||||
tsk->signal->it_prof_expires = nval;
|
||||
tsk->signal->it_prof_incr = ninterval;
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
if (ovalue) {
|
||||
cputime_to_timeval(cval, &ovalue->it_value);
|
||||
cputime_to_timeval(cinterval, &ovalue->it_interval);
|
||||
}
|
||||
set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -14,11 +14,11 @@
|
||||
*/
|
||||
void update_rlimit_cpu(unsigned long rlim_new)
|
||||
{
|
||||
cputime_t cputime;
|
||||
cputime_t cputime = secs_to_cputime(rlim_new);
|
||||
struct signal_struct *const sig = current->signal;
|
||||
|
||||
cputime = secs_to_cputime(rlim_new);
|
||||
if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
|
||||
cputime_gt(current->signal->it_prof_expires, cputime)) {
|
||||
if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
|
||||
cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
@ -542,6 +542,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
|
||||
now);
|
||||
}
|
||||
|
||||
static inline int expires_gt(cputime_t expires, cputime_t new_exp)
|
||||
{
|
||||
return cputime_eq(expires, cputime_zero) ||
|
||||
cputime_gt(expires, new_exp);
|
||||
}
|
||||
|
||||
static inline int expires_le(cputime_t expires, cputime_t new_exp)
|
||||
{
|
||||
return !cputime_eq(expires, cputime_zero) &&
|
||||
cputime_le(expires, new_exp);
|
||||
}
|
||||
/*
|
||||
* Insert the timer on the appropriate list before any timers that
|
||||
* expire later. This must be called with the tasklist_lock held
|
||||
@ -586,34 +597,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
|
||||
*/
|
||||
|
||||
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
|
||||
union cpu_time_count *exp = &nt->expires;
|
||||
|
||||
switch (CPUCLOCK_WHICH(timer->it_clock)) {
|
||||
default:
|
||||
BUG();
|
||||
case CPUCLOCK_PROF:
|
||||
if (cputime_eq(p->cputime_expires.prof_exp,
|
||||
cputime_zero) ||
|
||||
cputime_gt(p->cputime_expires.prof_exp,
|
||||
nt->expires.cpu))
|
||||
p->cputime_expires.prof_exp =
|
||||
nt->expires.cpu;
|
||||
if (expires_gt(p->cputime_expires.prof_exp,
|
||||
exp->cpu))
|
||||
p->cputime_expires.prof_exp = exp->cpu;
|
||||
break;
|
||||
case CPUCLOCK_VIRT:
|
||||
if (cputime_eq(p->cputime_expires.virt_exp,
|
||||
cputime_zero) ||
|
||||
cputime_gt(p->cputime_expires.virt_exp,
|
||||
nt->expires.cpu))
|
||||
p->cputime_expires.virt_exp =
|
||||
nt->expires.cpu;
|
||||
if (expires_gt(p->cputime_expires.virt_exp,
|
||||
exp->cpu))
|
||||
p->cputime_expires.virt_exp = exp->cpu;
|
||||
break;
|
||||
case CPUCLOCK_SCHED:
|
||||
if (p->cputime_expires.sched_exp == 0 ||
|
||||
p->cputime_expires.sched_exp >
|
||||
nt->expires.sched)
|
||||
p->cputime_expires.sched_exp > exp->sched)
|
||||
p->cputime_expires.sched_exp =
|
||||
nt->expires.sched;
|
||||
exp->sched;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
struct signal_struct *const sig = p->signal;
|
||||
union cpu_time_count *exp = &timer->it.cpu.expires;
|
||||
|
||||
/*
|
||||
* For a process timer, set the cached expiration time.
|
||||
*/
|
||||
@ -621,30 +630,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
|
||||
default:
|
||||
BUG();
|
||||
case CPUCLOCK_VIRT:
|
||||
if (!cputime_eq(p->signal->it_virt_expires,
|
||||
cputime_zero) &&
|
||||
cputime_lt(p->signal->it_virt_expires,
|
||||
timer->it.cpu.expires.cpu))
|
||||
if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
|
||||
exp->cpu))
|
||||
break;
|
||||
p->signal->cputime_expires.virt_exp =
|
||||
timer->it.cpu.expires.cpu;
|
||||
sig->cputime_expires.virt_exp = exp->cpu;
|
||||
break;
|
||||
case CPUCLOCK_PROF:
|
||||
if (!cputime_eq(p->signal->it_prof_expires,
|
||||
cputime_zero) &&
|
||||
cputime_lt(p->signal->it_prof_expires,
|
||||
timer->it.cpu.expires.cpu))
|
||||
if (expires_le(sig->it[CPUCLOCK_PROF].expires,
|
||||
exp->cpu))
|
||||
break;
|
||||
i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
|
||||
i = sig->rlim[RLIMIT_CPU].rlim_cur;
|
||||
if (i != RLIM_INFINITY &&
|
||||
i <= cputime_to_secs(timer->it.cpu.expires.cpu))
|
||||
i <= cputime_to_secs(exp->cpu))
|
||||
break;
|
||||
p->signal->cputime_expires.prof_exp =
|
||||
timer->it.cpu.expires.cpu;
|
||||
sig->cputime_expires.prof_exp = exp->cpu;
|
||||
break;
|
||||
case CPUCLOCK_SCHED:
|
||||
p->signal->cputime_expires.sched_exp =
|
||||
timer->it.cpu.expires.sched;
|
||||
sig->cputime_expires.sched_exp = exp->sched;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1071,6 +1073,36 @@ static void stop_process_timers(struct task_struct *tsk)
|
||||
spin_unlock_irqrestore(&cputimer->lock, flags);
|
||||
}
|
||||
|
||||
static u32 onecputick;
|
||||
|
||||
static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
|
||||
cputime_t *expires, cputime_t cur_time, int signo)
|
||||
{
|
||||
if (cputime_eq(it->expires, cputime_zero))
|
||||
return;
|
||||
|
||||
if (cputime_ge(cur_time, it->expires)) {
|
||||
if (!cputime_eq(it->incr, cputime_zero)) {
|
||||
it->expires = cputime_add(it->expires, it->incr);
|
||||
it->error += it->incr_error;
|
||||
if (it->error >= onecputick) {
|
||||
it->expires = cputime_sub(it->expires,
|
||||
cputime_one_jiffy);
|
||||
it->error -= onecputick;
|
||||
}
|
||||
} else
|
||||
it->expires = cputime_zero;
|
||||
|
||||
__group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
|
||||
}
|
||||
|
||||
if (!cputime_eq(it->expires, cputime_zero) &&
|
||||
(cputime_eq(*expires, cputime_zero) ||
|
||||
cputime_lt(it->expires, *expires))) {
|
||||
*expires = it->expires;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for any per-thread CPU timers that have fired and move them
|
||||
* off the tsk->*_timers list onto the firing list. Per-thread timers
|
||||
@ -1090,10 +1122,10 @@ static void check_process_timers(struct task_struct *tsk,
|
||||
* Don't sample the current process CPU clocks if there are no timers.
|
||||
*/
|
||||
if (list_empty(&timers[CPUCLOCK_PROF]) &&
|
||||
cputime_eq(sig->it_prof_expires, cputime_zero) &&
|
||||
cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
|
||||
sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
|
||||
list_empty(&timers[CPUCLOCK_VIRT]) &&
|
||||
cputime_eq(sig->it_virt_expires, cputime_zero) &&
|
||||
cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
|
||||
list_empty(&timers[CPUCLOCK_SCHED])) {
|
||||
stop_process_timers(tsk);
|
||||
return;
|
||||
@ -1153,38 +1185,11 @@ static void check_process_timers(struct task_struct *tsk,
|
||||
/*
|
||||
* Check for the special case process timers.
|
||||
*/
|
||||
if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
|
||||
if (cputime_ge(ptime, sig->it_prof_expires)) {
|
||||
/* ITIMER_PROF fires and reloads. */
|
||||
sig->it_prof_expires = sig->it_prof_incr;
|
||||
if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
|
||||
sig->it_prof_expires = cputime_add(
|
||||
sig->it_prof_expires, ptime);
|
||||
}
|
||||
__group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
|
||||
}
|
||||
if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
|
||||
(cputime_eq(prof_expires, cputime_zero) ||
|
||||
cputime_lt(sig->it_prof_expires, prof_expires))) {
|
||||
prof_expires = sig->it_prof_expires;
|
||||
}
|
||||
}
|
||||
if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
|
||||
if (cputime_ge(utime, sig->it_virt_expires)) {
|
||||
/* ITIMER_VIRTUAL fires and reloads. */
|
||||
sig->it_virt_expires = sig->it_virt_incr;
|
||||
if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
|
||||
sig->it_virt_expires = cputime_add(
|
||||
sig->it_virt_expires, utime);
|
||||
}
|
||||
__group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
|
||||
}
|
||||
if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
|
||||
(cputime_eq(virt_expires, cputime_zero) ||
|
||||
cputime_lt(sig->it_virt_expires, virt_expires))) {
|
||||
virt_expires = sig->it_virt_expires;
|
||||
}
|
||||
}
|
||||
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
|
||||
SIGPROF);
|
||||
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
|
||||
SIGVTALRM);
|
||||
|
||||
if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
|
||||
unsigned long psecs = cputime_to_secs(ptime);
|
||||
cputime_t x;
|
||||
@ -1457,7 +1462,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
|
||||
if (!cputime_eq(*oldval, cputime_zero)) {
|
||||
if (cputime_le(*oldval, now.cpu)) {
|
||||
/* Just about to fire. */
|
||||
*oldval = jiffies_to_cputime(1);
|
||||
*oldval = cputime_one_jiffy;
|
||||
} else {
|
||||
*oldval = cputime_sub(*oldval, now.cpu);
|
||||
}
|
||||
@ -1703,10 +1708,15 @@ static __init int init_posix_cpu_timers(void)
|
||||
.nsleep = thread_cpu_nsleep,
|
||||
.nsleep_restart = thread_cpu_nsleep_restart,
|
||||
};
|
||||
struct timespec ts;
|
||||
|
||||
register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
|
||||
register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
|
||||
|
||||
cputime_to_timespec(cputime_one_jiffy, &ts);
|
||||
onecputick = ts.tv_nsec;
|
||||
WARN_ON(ts.tv_sec != 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
__initcall(init_posix_cpu_timers);
|
||||
|
@ -5031,17 +5031,16 @@ void account_idle_time(cputime_t cputime)
|
||||
*/
|
||||
void account_process_tick(struct task_struct *p, int user_tick)
|
||||
{
|
||||
cputime_t one_jiffy = jiffies_to_cputime(1);
|
||||
cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
|
||||
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
|
||||
struct rq *rq = this_rq();
|
||||
|
||||
if (user_tick)
|
||||
account_user_time(p, one_jiffy, one_jiffy_scaled);
|
||||
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
||||
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
|
||||
account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
|
||||
account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
|
||||
one_jiffy_scaled);
|
||||
else
|
||||
account_idle_time(one_jiffy);
|
||||
account_idle_time(cputime_one_jiffy);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user