ktime: Cleanup ktime_set() usage
ktime_set(S,N) was required for the timespec storage type and is still useful for situations where a Seconds and Nanoseconds part of a time value needs to be converted. For anything where the Seconds argument is 0, this is pointless and can be replaced with a simple assignment. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org>
This commit is contained in:
parent
2456e85535
commit
8b0e195314
@ -1872,8 +1872,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
|
||||
/ tb_ticks_per_sec;
|
||||
hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
|
||||
HRTIMER_MODE_REL);
|
||||
hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
|
||||
vcpu->arch.timer_running = 1;
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,7 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
|
||||
smp_wmb(); /* insure spu event buffer updates are written */
|
||||
/* don't want events intermingled... */
|
||||
|
||||
kt = ktime_set(0, profiling_interval);
|
||||
kt = profiling_interval;
|
||||
if (!spu_prof_running)
|
||||
goto stop;
|
||||
hrtimer_forward(timer, timer->base->get_time(), kt);
|
||||
@ -204,7 +204,7 @@ int start_spu_profiling_cycles(unsigned int cycles_reset)
|
||||
ktime_t kt;
|
||||
|
||||
pr_debug("timer resolution: %lu\n", TICK_NSEC);
|
||||
kt = ktime_set(0, profiling_interval);
|
||||
kt = profiling_interval;
|
||||
hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
hrtimer_set_expires(&timer, kt);
|
||||
timer.function = profile_spus;
|
||||
|
@ -1019,7 +1019,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
|
||||
__set_cpu_idle(vcpu);
|
||||
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
|
||||
hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
|
||||
VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
|
||||
no_timer:
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||
|
@ -1106,7 +1106,7 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
|
||||
now = ktime_get();
|
||||
remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
|
||||
if (ktime_to_ns(remaining) < 0)
|
||||
remaining = ktime_set(0, 0);
|
||||
remaining = 0;
|
||||
|
||||
ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
|
||||
tmcct = div64_u64(ns,
|
||||
@ -2057,7 +2057,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
|
||||
apic->lapic_timer.tscdeadline = 0;
|
||||
if (apic_lvtt_oneshot(apic)) {
|
||||
apic->lapic_timer.tscdeadline = 0;
|
||||
apic->lapic_timer.target_expiration = ktime_set(0, 0);
|
||||
apic->lapic_timer.target_expiration = 0;
|
||||
}
|
||||
atomic_set(&apic->lapic_timer.pending, 0);
|
||||
}
|
||||
|
@ -2569,7 +2569,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
|
||||
* This will be replaced with the stats tracking code, using
|
||||
* 'avg_completion_time / 2' as the pre-sleep target.
|
||||
*/
|
||||
kt = ktime_set(0, nsecs);
|
||||
kt = nsecs;
|
||||
|
||||
mode = HRTIMER_MODE_REL;
|
||||
hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
|
||||
|
@ -194,7 +194,7 @@ void device_pm_move_last(struct device *dev)
|
||||
|
||||
static ktime_t initcall_debug_start(struct device *dev)
|
||||
{
|
||||
ktime_t calltime = ktime_set(0, 0);
|
||||
ktime_t calltime = 0;
|
||||
|
||||
if (pm_print_times_enabled) {
|
||||
pr_info("calling %s+ @ %i, parent: %s\n",
|
||||
|
@ -1005,7 +1005,7 @@ static int print_wakeup_source_stats(struct seq_file *m,
|
||||
prevent_sleep_time = ktime_add(prevent_sleep_time,
|
||||
ktime_sub(now, ws->start_prevent_time));
|
||||
} else {
|
||||
active_time = ktime_set(0, 0);
|
||||
active_time = 0;
|
||||
}
|
||||
|
||||
seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
|
||||
|
@ -257,7 +257,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
|
||||
|
||||
static void null_cmd_end_timer(struct nullb_cmd *cmd)
|
||||
{
|
||||
ktime_t kt = ktime_set(0, completion_nsec);
|
||||
ktime_t kt = completion_nsec;
|
||||
|
||||
hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
|
||||
}
|
||||
|
@ -429,8 +429,8 @@ static int dmatest_func(void *data)
|
||||
int dst_cnt;
|
||||
int i;
|
||||
ktime_t ktime, start, diff;
|
||||
ktime_t filltime = ktime_set(0, 0);
|
||||
ktime_t comparetime = ktime_set(0, 0);
|
||||
ktime_t filltime = 0;
|
||||
ktime_t comparetime = 0;
|
||||
s64 runtime = 0;
|
||||
unsigned long long total_len = 0;
|
||||
u8 align = 0;
|
||||
|
@ -752,7 +752,7 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla
|
||||
|
||||
drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
|
||||
dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
|
||||
hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
|
||||
hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
|
||||
HRTIMER_MODE_REL);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
@ -772,11 +772,11 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad
|
||||
hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
|
||||
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
|
||||
ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
|
||||
DCE_VIRTUAL_VBLANK_PERIOD);
|
||||
adev->mode_info.crtcs[crtc]->vblank_timer.function =
|
||||
dce_virtual_vblank_timer_handle;
|
||||
hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
|
||||
ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
|
||||
DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
|
||||
} else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
|
||||
DRM_DEBUG("Disable software vsync timer\n");
|
||||
hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
|
||||
|
@ -62,7 +62,7 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
|
||||
{
|
||||
d->wake_count++;
|
||||
hrtimer_start_range_ns(&d->timer,
|
||||
ktime_set(0, NSEC_PER_MSEC),
|
||||
NSEC_PER_MSEC,
|
||||
NSEC_PER_MSEC,
|
||||
HRTIMER_MODE_REL);
|
||||
}
|
||||
|
@ -330,7 +330,7 @@ nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
|
||||
__set_current_state(intr ? TASK_INTERRUPTIBLE :
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
kt = ktime_set(0, sleep_time);
|
||||
kt = sleep_time;
|
||||
schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
|
||||
sleep_time *= 2;
|
||||
if (sleep_time > NSEC_PER_MSEC)
|
||||
|
@ -539,7 +539,7 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
|
||||
}
|
||||
|
||||
drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
|
||||
tilcdc_crtc->last_vblank = ktime_set(0, 0);
|
||||
tilcdc_crtc->last_vblank = 0;
|
||||
|
||||
tilcdc_crtc->enabled = false;
|
||||
mutex_unlock(&tilcdc_crtc->enable_lock);
|
||||
|
@ -63,7 +63,7 @@ ssize_t iio_hrtimer_store_sampling_frequency(struct device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
info->sampling_frequency = val;
|
||||
info->period = ktime_set(0, NSEC_PER_SEC / val);
|
||||
info->period = NSEC_PER_SEC / val;
|
||||
|
||||
return len;
|
||||
}
|
||||
@ -141,8 +141,7 @@ static struct iio_sw_trigger *iio_trig_hrtimer_probe(const char *name)
|
||||
trig_info->timer.function = iio_hrtimer_trig_handler;
|
||||
|
||||
trig_info->sampling_frequency = HRTIMER_DEFAULT_SAMPLING_FREQUENCY;
|
||||
trig_info->period = ktime_set(0, NSEC_PER_SEC /
|
||||
trig_info->sampling_frequency);
|
||||
trig_info->period = NSEC_PER_SEC / trig_info->sampling_frequency;
|
||||
|
||||
ret = iio_trigger_register(trig_info->swt.trigger);
|
||||
if (ret)
|
||||
|
@ -165,7 +165,7 @@ static void walkera0701_irq_handler(void *handler_data)
|
||||
RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */
|
||||
w->counter = 0;
|
||||
|
||||
hrtimer_start(&w->timer, ktime_set(0, BIN_SAMPLE), HRTIMER_MODE_REL);
|
||||
hrtimer_start(&w->timer, BIN_SAMPLE, HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart timer_handler(struct hrtimer
|
||||
|
@ -87,8 +87,7 @@ exit:
|
||||
|
||||
if (!err && (chan->txdone_method & TXDONE_BY_POLL))
|
||||
/* kick start the timer immediately to avoid delays */
|
||||
hrtimer_start(&chan->mbox->poll_hrt, ktime_set(0, 0),
|
||||
HRTIMER_MODE_REL);
|
||||
hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
static void tx_tick(struct mbox_chan *chan, int r)
|
||||
|
@ -562,7 +562,7 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
|
||||
struct dmxdev_filter *filter,
|
||||
struct dmxdev_feed *feed)
|
||||
{
|
||||
ktime_t timeout = ktime_set(0, 0);
|
||||
ktime_t timeout = 0;
|
||||
struct dmx_pes_filter_params *para = &filter->params.pes;
|
||||
dmx_output_t otype;
|
||||
int ret;
|
||||
|
@ -178,8 +178,7 @@ static enum hrtimer_restart cx88_ir_work(struct hrtimer *timer)
|
||||
struct cx88_IR *ir = container_of(timer, struct cx88_IR, timer);
|
||||
|
||||
cx88_ir_handle_key(ir);
|
||||
missed = hrtimer_forward_now(&ir->timer,
|
||||
ktime_set(0, ir->polling * 1000000));
|
||||
missed = hrtimer_forward_now(&ir->timer, ir->polling * 1000000);
|
||||
if (missed > 1)
|
||||
ir_dprintk("Missed ticks %ld\n", missed - 1);
|
||||
|
||||
@ -199,8 +198,7 @@ static int __cx88_ir_start(void *priv)
|
||||
if (ir->polling) {
|
||||
hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
ir->timer.function = cx88_ir_work;
|
||||
hrtimer_start(&ir->timer,
|
||||
ktime_set(0, ir->polling * 1000000),
|
||||
hrtimer_start(&ir->timer, ir->polling * 1000000,
|
||||
HRTIMER_MODE_REL);
|
||||
}
|
||||
if (ir->sampling) {
|
||||
|
@ -463,7 +463,7 @@ static int pt3_fetch_thread(void *data)
|
||||
|
||||
pt3_proc_dma(adap);
|
||||
|
||||
delay = ktime_set(0, PT3_FETCH_DELAY * NSEC_PER_MSEC);
|
||||
delay = PT3_FETCH_DELAY * NSEC_PER_MSEC;
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
freezable_schedule_hrtimeout_range(&delay,
|
||||
PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
|
||||
|
@ -390,7 +390,7 @@ static void softing_initialize_timestamp(struct softing *card)
|
||||
ovf = 0x100000000ULL * 16;
|
||||
do_div(ovf, card->pdat->freq ?: 16);
|
||||
|
||||
card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
|
||||
card->ts_overflow = ktime_add_us(0, ovf);
|
||||
}
|
||||
|
||||
ktime_t softing_raw2ktime(struct softing *card, u32 raw)
|
||||
@ -647,7 +647,7 @@ int softing_startstop(struct net_device *dev, int up)
|
||||
open_candev(netdev);
|
||||
if (dev != netdev) {
|
||||
/* notify other busses on the restart */
|
||||
softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
|
||||
softing_netdev_rx(netdev, &msg, 0);
|
||||
++priv->can.can_stats.restarts;
|
||||
}
|
||||
netif_wake_queue(netdev);
|
||||
|
@ -192,7 +192,7 @@ static int softing_handle_1(struct softing *card)
|
||||
/* a dead bus has no overflows */
|
||||
continue;
|
||||
++netdev->stats.rx_over_errors;
|
||||
softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
|
||||
softing_netdev_rx(netdev, &msg, 0);
|
||||
}
|
||||
/* prepare for other use */
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
|
@ -253,7 +253,7 @@ static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
|
||||
if (!netif_running(priv->net_dev))
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
|
||||
hrtimer_forward_now(timer, polling_frequency);
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
@ -427,8 +427,7 @@ static int ec_bhf_open(struct net_device *net_dev)
|
||||
|
||||
hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
priv->hrtimer.function = ec_bhf_timer_fun;
|
||||
hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
|
||||
HRTIMER_MODE_REL);
|
||||
hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -4913,7 +4913,7 @@ static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
|
||||
|
||||
if (!port_pcpu->timer_scheduled) {
|
||||
port_pcpu->timer_scheduled = true;
|
||||
interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
|
||||
interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
|
||||
hrtimer_start(&port_pcpu->tx_done_timer, interval,
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
}
|
||||
|
@ -751,7 +751,7 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
|
||||
&info->mpipe[instance].tx_wake[priv->echannel];
|
||||
|
||||
hrtimer_start(&tx_wake->timer,
|
||||
ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
|
||||
TX_TIMER_DELAY_USEC * 1000UL,
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
}
|
||||
|
||||
@ -770,7 +770,7 @@ static void tile_net_schedule_egress_timer(void)
|
||||
|
||||
if (!info->egress_timer_scheduled) {
|
||||
hrtimer_start(&info->egress_timer,
|
||||
ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
|
||||
EGRESS_TIMER_DELAY_USEC * 1000UL,
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
info->egress_timer_scheduled = true;
|
||||
}
|
||||
|
@ -510,7 +510,7 @@ at86rf230_async_state_delay(void *context)
|
||||
case STATE_TRX_OFF:
|
||||
switch (ctx->to_state) {
|
||||
case STATE_RX_AACK_ON:
|
||||
tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC);
|
||||
tim = c->t_off_to_aack * NSEC_PER_USEC;
|
||||
/* state change from TRX_OFF to RX_AACK_ON to do a
|
||||
* calibration, we need to reset the timeout for the
|
||||
* next one.
|
||||
@ -519,7 +519,7 @@ at86rf230_async_state_delay(void *context)
|
||||
goto change;
|
||||
case STATE_TX_ARET_ON:
|
||||
case STATE_TX_ON:
|
||||
tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC);
|
||||
tim = c->t_off_to_tx_on * NSEC_PER_USEC;
|
||||
/* state change from TRX_OFF to TX_ON or ARET_ON to do
|
||||
* a calibration, we need to reset the timeout for the
|
||||
* next one.
|
||||
@ -539,8 +539,7 @@ at86rf230_async_state_delay(void *context)
|
||||
* to TX_ON or TRX_OFF.
|
||||
*/
|
||||
if (!force) {
|
||||
tim = ktime_set(0, (c->t_frame + c->t_p_ack) *
|
||||
NSEC_PER_USEC);
|
||||
tim = (c->t_frame + c->t_p_ack) * NSEC_PER_USEC;
|
||||
goto change;
|
||||
}
|
||||
break;
|
||||
@ -552,7 +551,7 @@ at86rf230_async_state_delay(void *context)
|
||||
case STATE_P_ON:
|
||||
switch (ctx->to_state) {
|
||||
case STATE_TRX_OFF:
|
||||
tim = ktime_set(0, c->t_reset_to_off * NSEC_PER_USEC);
|
||||
tim = c->t_reset_to_off * NSEC_PER_USEC;
|
||||
goto change;
|
||||
default:
|
||||
break;
|
||||
|
@ -1282,7 +1282,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
|
||||
/* start timer, if not already started */
|
||||
if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
|
||||
hrtimer_start(&ctx->tx_timer,
|
||||
ktime_set(0, ctx->timer_interval),
|
||||
ctx->timer_interval,
|
||||
HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
|
@ -177,7 +177,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
|
||||
if (rt2800usb_txstatus_pending(rt2x00dev)) {
|
||||
/* Read register after 1 ms */
|
||||
hrtimer_start(&rt2x00dev->txstatus_timer,
|
||||
ktime_set(0, TXSTATUS_READ_INTERVAL),
|
||||
TXSTATUS_READ_INTERVAL,
|
||||
HRTIMER_MODE_REL);
|
||||
return false;
|
||||
}
|
||||
@ -204,7 +204,7 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
|
||||
|
||||
/* Read TX_STA_FIFO register after 2 ms */
|
||||
hrtimer_start(&rt2x00dev->txstatus_timer,
|
||||
ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
|
||||
2 * TXSTATUS_READ_INTERVAL,
|
||||
HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
|
@ -3044,7 +3044,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
|
||||
static ktime_t fixup_debug_start(struct pci_dev *dev,
|
||||
void (*fn)(struct pci_dev *dev))
|
||||
{
|
||||
ktime_t calltime = ktime_set(0, 0);
|
||||
ktime_t calltime = 0;
|
||||
|
||||
dev_dbg(&dev->dev, "calling %pF\n", fn);
|
||||
if (initcall_debug) {
|
||||
|
@ -283,7 +283,7 @@ static int __init msi_wmi_input_setup(void)
|
||||
if (err)
|
||||
goto err_free_keymap;
|
||||
|
||||
last_pressed = ktime_set(0, 0);
|
||||
last_pressed = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -169,7 +169,7 @@ static void ltc2952_poweroff_kill(void)
|
||||
|
||||
static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
|
||||
{
|
||||
data->wde_interval = ktime_set(0, 300L*1E6L);
|
||||
data->wde_interval = 300L * 1E6L;
|
||||
data->trigger_delay = ktime_set(2, 500L*1E6L);
|
||||
|
||||
hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
|
@ -363,7 +363,7 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
|
||||
rtc_timer_remove(rtc, &rtc->aie_timer);
|
||||
|
||||
rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
|
||||
rtc->aie_timer.period = ktime_set(0, 0);
|
||||
rtc->aie_timer.period = 0;
|
||||
if (alarm->enabled)
|
||||
err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
|
||||
|
||||
@ -391,7 +391,7 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
|
||||
return err;
|
||||
|
||||
rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
|
||||
rtc->aie_timer.period = ktime_set(0, 0);
|
||||
rtc->aie_timer.period = 0;
|
||||
|
||||
/* Alarm has to be enabled & in the future for us to enqueue it */
|
||||
if (alarm->enabled && (rtc_tm_to_ktime(now) <
|
||||
@ -554,7 +554,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
|
||||
int count;
|
||||
rtc = container_of(timer, struct rtc_device, pie_timer);
|
||||
|
||||
period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
|
||||
period = NSEC_PER_SEC / rtc->irq_freq;
|
||||
count = hrtimer_forward_now(timer, period);
|
||||
|
||||
rtc_handle_legacy_irq(rtc, count, RTC_PF);
|
||||
@ -665,7 +665,7 @@ static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
|
||||
return -1;
|
||||
|
||||
if (enabled) {
|
||||
ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq);
|
||||
ktime_t period = NSEC_PER_SEC / rtc->irq_freq;
|
||||
|
||||
hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ void ap_wait(enum ap_wait wait)
|
||||
case AP_WAIT_TIMEOUT:
|
||||
spin_lock_bh(&ap_poll_timer_lock);
|
||||
if (!hrtimer_is_queued(&ap_poll_timer)) {
|
||||
hr_time = ktime_set(0, poll_timeout);
|
||||
hr_time = poll_timeout;
|
||||
hrtimer_forward_now(&ap_poll_timer, hr_time);
|
||||
hrtimer_restart(&ap_poll_timer);
|
||||
}
|
||||
@ -860,7 +860,7 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
|
||||
time > 120000000000ULL)
|
||||
return -EINVAL;
|
||||
poll_timeout = time;
|
||||
hr_time = ktime_set(0, poll_timeout);
|
||||
hr_time = poll_timeout;
|
||||
|
||||
spin_lock_bh(&ap_poll_timer_lock);
|
||||
hrtimer_cancel(&ap_poll_timer);
|
||||
|
@ -1694,7 +1694,7 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
|
||||
if (!vscsi->rsp_q_timer.started) {
|
||||
if (vscsi->rsp_q_timer.timer_pops <
|
||||
MAX_TIMER_POPS) {
|
||||
kt = ktime_set(0, WAIT_NANO_SECONDS);
|
||||
kt = WAIT_NANO_SECONDS;
|
||||
} else {
|
||||
/*
|
||||
* slide the timeslice if the maximum
|
||||
|
@ -4085,7 +4085,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
|
||||
jiffies_to_timespec(delta_jiff, &ts);
|
||||
kt = ktime_set(ts.tv_sec, ts.tv_nsec);
|
||||
} else
|
||||
kt = ktime_set(0, sdebug_ndelay);
|
||||
kt = sdebug_ndelay;
|
||||
if (NULL == sd_dp) {
|
||||
sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
|
||||
if (NULL == sd_dp)
|
||||
|
@ -930,7 +930,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
|
||||
if (!hba->outstanding_reqs && scaling->is_busy_started) {
|
||||
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
|
||||
scaling->busy_start_t));
|
||||
scaling->busy_start_t = ktime_set(0, 0);
|
||||
scaling->busy_start_t = 0;
|
||||
scaling->is_busy_started = false;
|
||||
}
|
||||
}
|
||||
@ -6661,7 +6661,7 @@ start_window:
|
||||
scaling->busy_start_t = ktime_get();
|
||||
scaling->is_busy_started = true;
|
||||
} else {
|
||||
scaling->busy_start_t = ktime_set(0, 0);
|
||||
scaling->busy_start_t = 0;
|
||||
scaling->is_busy_started = false;
|
||||
}
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
@ -1113,8 +1113,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
|
||||
}
|
||||
|
||||
/* Delay the timer. */
|
||||
hrtimer_start(&ncm->task_timer,
|
||||
ktime_set(0, TX_TIMEOUT_NSECS),
|
||||
hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
|
||||
HRTIMER_MODE_REL);
|
||||
|
||||
/* Add the datagram position entries */
|
||||
|
@ -88,8 +88,7 @@ static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
|
||||
ktime_t *timeout = &ehci->hr_timeouts[event];
|
||||
|
||||
if (resched)
|
||||
*timeout = ktime_add(ktime_get(),
|
||||
ktime_set(0, event_delays_ns[event]));
|
||||
*timeout = ktime_add(ktime_get(), event_delays_ns[event]);
|
||||
ehci->enabled_hrtimer_events |= (1 << event);
|
||||
|
||||
/* Track only the lowest-numbered pending event */
|
||||
|
@ -1080,8 +1080,7 @@ static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
|
||||
ktime_t *timeout = &fotg210->hr_timeouts[event];
|
||||
|
||||
if (resched)
|
||||
*timeout = ktime_add(ktime_get(),
|
||||
ktime_set(0, event_delays_ns[event]));
|
||||
*timeout = ktime_add(ktime_get(), event_delays_ns[event]);
|
||||
fotg210->enabled_hrtimer_events |= (1 << event);
|
||||
|
||||
/* Track only the lowest-numbered pending event */
|
||||
|
@ -197,8 +197,7 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
|
||||
if (!list_empty(&controller->early_tx_list) &&
|
||||
!hrtimer_is_queued(&controller->early_tx)) {
|
||||
ret = HRTIMER_RESTART;
|
||||
hrtimer_forward_now(&controller->early_tx,
|
||||
ktime_set(0, 20 * NSEC_PER_USEC));
|
||||
hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&musb->lock, flags);
|
||||
@ -280,9 +279,9 @@ static void cppi41_dma_callback(void *private_data)
|
||||
unsigned long usecs = cppi41_channel->total_len / 10;
|
||||
|
||||
hrtimer_start_range_ns(&controller->early_tx,
|
||||
ktime_set(0, usecs * NSEC_PER_USEC),
|
||||
20 * NSEC_PER_USEC,
|
||||
HRTIMER_MODE_REL);
|
||||
usecs * NSEC_PER_USEC,
|
||||
20 * NSEC_PER_USEC,
|
||||
HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -1395,7 +1395,7 @@ static int nodeid_warned(int nodeid, int num_nodes, int *warned)
|
||||
void dlm_scan_waiters(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_lkb *lkb;
|
||||
ktime_t zero = ktime_set(0, 0);
|
||||
ktime_t zero = 0;
|
||||
s64 us;
|
||||
s64 debug_maxus = 0;
|
||||
u32 debug_scanned = 0;
|
||||
|
@ -695,7 +695,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
||||
gl->gl_target = LM_ST_UNLOCKED;
|
||||
gl->gl_demote_state = LM_ST_EXCLUSIVE;
|
||||
gl->gl_ops = glops;
|
||||
gl->gl_dstamp = ktime_set(0, 0);
|
||||
gl->gl_dstamp = 0;
|
||||
preempt_disable();
|
||||
/* We use the global stats to estimate the initial per-glock stats */
|
||||
gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
|
||||
|
@ -155,7 +155,7 @@ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
|
||||
else
|
||||
remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
|
||||
|
||||
return remaining < 0 ? ktime_set(0, 0): remaining;
|
||||
return remaining < 0 ? 0: remaining;
|
||||
}
|
||||
|
||||
static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
|
||||
|
@ -3227,7 +3227,7 @@ static inline ktime_t net_timedelta(ktime_t t)
|
||||
|
||||
static inline ktime_t net_invalid_timestamp(void)
|
||||
{
|
||||
return ktime_set(0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct sk_buff *skb_clone_sk(struct sk_buff *skb);
|
||||
|
@ -1456,7 +1456,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
||||
* yield - it could be a while.
|
||||
*/
|
||||
if (unlikely(queued)) {
|
||||
ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
|
||||
ktime_t to = NSEC_PER_SEC / HZ;
|
||||
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_hrtimeout(&to, HRTIMER_MODE_REL);
|
||||
|
@ -234,7 +234,7 @@ static int alarmtimer_suspend(struct device *dev)
|
||||
min = freezer_delta;
|
||||
expires = freezer_expires;
|
||||
type = freezer_alarmtype;
|
||||
freezer_delta = ktime_set(0, 0);
|
||||
freezer_delta = 0;
|
||||
spin_unlock_irqrestore(&freezer_delta_lock, flags);
|
||||
|
||||
rtc = alarmtimer_get_rtcdev();
|
||||
@ -277,7 +277,7 @@ static int alarmtimer_suspend(struct device *dev)
|
||||
now = ktime_add(now, min);
|
||||
|
||||
/* Set alarm, if in the past reject suspend briefly to handle */
|
||||
ret = rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
|
||||
ret = rtc_timer_start(rtc, &rtctimer, now, 0);
|
||||
if (ret < 0)
|
||||
__pm_wakeup_event(ws, MSEC_PER_SEC);
|
||||
return ret;
|
||||
|
@ -955,7 +955,7 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
|
||||
*/
|
||||
timer->is_rel = mode & HRTIMER_MODE_REL;
|
||||
if (timer->is_rel)
|
||||
tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
|
||||
tim = ktime_add_safe(tim, hrtimer_resolution);
|
||||
#endif
|
||||
return tim;
|
||||
}
|
||||
|
@ -485,7 +485,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
|
||||
*/
|
||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
{
|
||||
ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
|
||||
ktime_t kj = NSEC_PER_SEC / HZ;
|
||||
|
||||
if (timr->it.real.interval < kj)
|
||||
now = ktime_add(now, kj);
|
||||
|
@ -178,8 +178,8 @@ static void tick_setup_device(struct tick_device *td,
|
||||
struct clock_event_device *newdev, int cpu,
|
||||
const struct cpumask *cpumask)
|
||||
{
|
||||
ktime_t next_event;
|
||||
void (*handler)(struct clock_event_device *) = NULL;
|
||||
ktime_t next_event = 0;
|
||||
|
||||
/*
|
||||
* First device setup ?
|
||||
@ -195,7 +195,7 @@ static void tick_setup_device(struct tick_device *td,
|
||||
else
|
||||
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
||||
tick_next_period = ktime_get();
|
||||
tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
|
||||
tick_period = NSEC_PER_SEC / HZ;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -643,7 +643,7 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
|
||||
return HRTIMER_RESTART;
|
||||
} else {
|
||||
/* rearm throttle handling */
|
||||
op->kt_lastmsg = ktime_set(0, 0);
|
||||
op->kt_lastmsg = 0;
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
}
|
||||
@ -1196,7 +1196,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
||||
* In any case cancel the throttle timer, flush
|
||||
* potentially blocked msgs and reset throttle handling
|
||||
*/
|
||||
op->kt_lastmsg = ktime_set(0, 0);
|
||||
op->kt_lastmsg = 0;
|
||||
hrtimer_cancel(&op->thrtimer);
|
||||
bcm_rx_thr_flush(op, 1);
|
||||
}
|
||||
|
@ -80,11 +80,11 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
|
||||
|
||||
if (skb->len > max_sifs_size)
|
||||
hrtimer_start(&local->ifs_timer,
|
||||
ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC),
|
||||
hw->phy->lifs_period * NSEC_PER_USEC,
|
||||
HRTIMER_MODE_REL);
|
||||
else
|
||||
hrtimer_start(&local->ifs_timer,
|
||||
ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC),
|
||||
hw->phy->sifs_period * NSEC_PER_USEC,
|
||||
HRTIMER_MODE_REL);
|
||||
} else {
|
||||
ieee802154_wake_queue(hw);
|
||||
|
@ -509,7 +509,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
|
||||
if (delay) {
|
||||
ktime_t time;
|
||||
|
||||
time = ktime_set(0, 0);
|
||||
time = 0;
|
||||
time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
|
||||
hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ static struct sctp_transport *sctp_transport_init(struct net *net,
|
||||
*/
|
||||
peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
|
||||
|
||||
peer->last_time_heard = ktime_set(0, 0);
|
||||
peer->last_time_heard = 0;
|
||||
peer->last_time_ecne_reduced = jiffies;
|
||||
|
||||
peer->param_flags = SPP_HB_DISABLE |
|
||||
|
@ -1404,7 +1404,7 @@ int xfrm_state_check_expire(struct xfrm_state *x)
|
||||
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
|
||||
x->curlft.packets >= x->lft.hard_packet_limit) {
|
||||
x->km.state = XFRM_STATE_EXPIRED;
|
||||
tasklet_hrtimer_start(&x->mtimer, ktime_set(0, 0), HRTIMER_MODE_REL);
|
||||
tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ static int pcsp_start_playing(struct snd_pcsp *chip)
|
||||
atomic_set(&chip->timer_active, 1);
|
||||
chip->thalf = 0;
|
||||
|
||||
hrtimer_start(&pcsp_chip.timer, ktime_set(0, 0), HRTIMER_MODE_REL);
|
||||
hrtimer_start(&pcsp_chip.timer, 0, HRTIMER_MODE_REL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ static void async_midi_port_callback(struct fw_card *card, int rcode,
|
||||
snd_rawmidi_transmit_ack(substream, port->consume_bytes);
|
||||
else if (!rcode_is_permanent_error(rcode))
|
||||
/* To start next transaction immediately for recovery. */
|
||||
port->next_ktime = ktime_set(0, 0);
|
||||
port->next_ktime = 0;
|
||||
else
|
||||
/* Don't continue processing. */
|
||||
port->error = true;
|
||||
@ -156,7 +156,7 @@ static void midi_port_work(struct work_struct *work)
|
||||
if (port->consume_bytes <= 0) {
|
||||
/* Do it in next chance, immediately. */
|
||||
if (port->consume_bytes == 0) {
|
||||
port->next_ktime = ktime_set(0, 0);
|
||||
port->next_ktime = 0;
|
||||
schedule_work(&port->work);
|
||||
} else {
|
||||
/* Fatal error. */
|
||||
@ -219,7 +219,7 @@ int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port,
|
||||
port->addr = addr;
|
||||
port->fill = fill;
|
||||
port->idling = true;
|
||||
port->next_ktime = ktime_set(0, 0);
|
||||
port->next_ktime = 0;
|
||||
port->error = false;
|
||||
|
||||
INIT_WORK(&port->work, midi_port_work);
|
||||
|
@ -87,7 +87,7 @@ static void dac_audio_reset(struct snd_sh_dac *chip)
|
||||
|
||||
static void dac_audio_set_rate(struct snd_sh_dac *chip)
|
||||
{
|
||||
chip->wakeups_per_second = ktime_set(0, 1000000000 / chip->rate);
|
||||
chip->wakeups_per_second = 1000000000 / chip->rate;
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user