s390/time: rename tod clock access functions
Fix name clash with some common code device drivers and add "tod" to all tod clock access function names. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
58fece7827
commit
1aae0560d1
@ -108,7 +108,7 @@ static void appldata_get_mem_data(void *data)
|
||||
mem_data->totalswap = P2K(val.totalswap);
|
||||
mem_data->freeswap = P2K(val.freeswap);
|
||||
|
||||
mem_data->timestamp = get_clock();
|
||||
mem_data->timestamp = get_tod_clock();
|
||||
mem_data->sync_count_2++;
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ static void appldata_get_net_sum_data(void *data)
|
||||
net_data->tx_dropped = tx_dropped;
|
||||
net_data->collisions = collisions;
|
||||
|
||||
net_data->timestamp = get_clock();
|
||||
net_data->timestamp = get_tod_clock();
|
||||
net_data->sync_count_2++;
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ static void appldata_get_os_data(void *data)
|
||||
}
|
||||
ops.size = new_size;
|
||||
}
|
||||
os_data->timestamp = get_clock();
|
||||
os_data->timestamp = get_tod_clock();
|
||||
os_data->sync_count_2++;
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
|
||||
d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
|
||||
if (IS_ERR(d2fc))
|
||||
return PTR_ERR(d2fc);
|
||||
get_clock_ext(d2fc->hdr.tod_ext);
|
||||
get_tod_clock_ext(d2fc->hdr.tod_ext);
|
||||
d2fc->hdr.len = count * sizeof(struct diag2fc_data);
|
||||
d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
|
||||
d2fc->hdr.count = count;
|
||||
|
@ -15,7 +15,7 @@
|
||||
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
|
||||
|
||||
/* Inline functions for clock register access. */
|
||||
static inline int set_clock(__u64 time)
|
||||
static inline int set_tod_clock(__u64 time)
|
||||
{
|
||||
int cc;
|
||||
|
||||
@ -27,7 +27,7 @@ static inline int set_clock(__u64 time)
|
||||
return cc;
|
||||
}
|
||||
|
||||
static inline int store_clock(__u64 *time)
|
||||
static inline int store_tod_clock(__u64 *time)
|
||||
{
|
||||
int cc;
|
||||
|
||||
@ -71,7 +71,7 @@ static inline void local_tick_enable(unsigned long long comp)
|
||||
|
||||
typedef unsigned long long cycles_t;
|
||||
|
||||
static inline unsigned long long get_clock(void)
|
||||
static inline unsigned long long get_tod_clock(void)
|
||||
{
|
||||
unsigned long long clk;
|
||||
|
||||
@ -83,21 +83,21 @@ static inline unsigned long long get_clock(void)
|
||||
return clk;
|
||||
}
|
||||
|
||||
static inline void get_clock_ext(char *clk)
|
||||
static inline void get_tod_clock_ext(char *clk)
|
||||
{
|
||||
asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
|
||||
}
|
||||
|
||||
static inline unsigned long long get_clock_xt(void)
|
||||
static inline unsigned long long get_tod_clock_xt(void)
|
||||
{
|
||||
unsigned char clk[16];
|
||||
get_clock_ext(clk);
|
||||
get_tod_clock_ext(clk);
|
||||
return *((unsigned long long *)&clk[1]);
|
||||
}
|
||||
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return (cycles_t) get_clock() >> 2;
|
||||
return (cycles_t) get_tod_clock() >> 2;
|
||||
}
|
||||
|
||||
int get_sync_clock(unsigned long long *clock);
|
||||
@ -123,9 +123,9 @@ extern u64 sched_clock_base_cc;
|
||||
* function, otherwise the returned value is not guaranteed to
|
||||
* be monotonic.
|
||||
*/
|
||||
static inline unsigned long long get_clock_monotonic(void)
|
||||
static inline unsigned long long get_tod_clock_monotonic(void)
|
||||
{
|
||||
return get_clock_xt() - sched_clock_base_cc;
|
||||
return get_tod_clock_xt() - sched_clock_base_cc;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -867,7 +867,7 @@ static inline void
|
||||
debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
|
||||
int exception)
|
||||
{
|
||||
active->id.stck = get_clock();
|
||||
active->id.stck = get_tod_clock();
|
||||
active->id.fields.cpuid = smp_processor_id();
|
||||
active->caller = __builtin_return_address(0);
|
||||
active->id.fields.exception = exception;
|
||||
|
@ -47,10 +47,10 @@ static void __init reset_tod_clock(void)
|
||||
{
|
||||
u64 time;
|
||||
|
||||
if (store_clock(&time) == 0)
|
||||
if (store_tod_clock(&time) == 0)
|
||||
return;
|
||||
/* TOD clock not running. Set the clock to Unix Epoch. */
|
||||
if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
|
||||
if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
|
||||
disabled_wait(0);
|
||||
|
||||
sched_clock_base_cc = TOD_UNIX_EPOCH;
|
||||
@ -173,7 +173,7 @@ static noinline __init void create_kernel_nss(void)
|
||||
}
|
||||
|
||||
/* re-initialize cputime accounting. */
|
||||
sched_clock_base_cc = get_clock();
|
||||
sched_clock_base_cc = get_tod_clock();
|
||||
S390_lowcore.last_update_clock = sched_clock_base_cc;
|
||||
S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
|
||||
S390_lowcore.user_timer = 0;
|
||||
|
@ -293,7 +293,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
|
||||
* retry this instruction.
|
||||
*/
|
||||
spin_lock(&ipd_lock);
|
||||
tmp = get_clock();
|
||||
tmp = get_tod_clock();
|
||||
if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
|
||||
ipd_count++;
|
||||
else
|
||||
|
@ -365,16 +365,16 @@ void smp_emergency_stop(cpumask_t *cpumask)
|
||||
u64 end;
|
||||
int cpu;
|
||||
|
||||
end = get_clock() + (1000000UL << 12);
|
||||
end = get_tod_clock() + (1000000UL << 12);
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
struct pcpu *pcpu = pcpu_devices + cpu;
|
||||
set_bit(ec_stop_cpu, &pcpu->ec_mask);
|
||||
while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
|
||||
0, NULL) == SIGP_CC_BUSY &&
|
||||
get_clock() < end)
|
||||
get_tod_clock() < end)
|
||||
cpu_relax();
|
||||
}
|
||||
while (get_clock() < end) {
|
||||
while (get_tod_clock() < end) {
|
||||
for_each_cpu(cpu, cpumask)
|
||||
if (pcpu_stopped(pcpu_devices + cpu))
|
||||
cpumask_clear_cpu(cpu, cpumask);
|
||||
@ -694,7 +694,7 @@ static void __init smp_detect_cpus(void)
|
||||
*/
|
||||
static void __cpuinit smp_start_secondary(void *cpuvoid)
|
||||
{
|
||||
S390_lowcore.last_update_clock = get_clock();
|
||||
S390_lowcore.last_update_clock = get_tod_clock();
|
||||
S390_lowcore.restart_stack = (unsigned long) restart_stack;
|
||||
S390_lowcore.restart_fn = (unsigned long) do_restart;
|
||||
S390_lowcore.restart_data = 0;
|
||||
@ -947,7 +947,7 @@ static ssize_t show_idle_time(struct device *dev,
|
||||
unsigned int sequence;
|
||||
|
||||
do {
|
||||
now = get_clock();
|
||||
now = get_tod_clock();
|
||||
sequence = ACCESS_ONCE(idle->sequence);
|
||||
idle_time = ACCESS_ONCE(idle->idle_time);
|
||||
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
|
||||
|
@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
|
||||
*/
|
||||
unsigned long long notrace __kprobes sched_clock(void)
|
||||
{
|
||||
return tod_to_ns(get_clock_monotonic());
|
||||
return tod_to_ns(get_tod_clock_monotonic());
|
||||
}
|
||||
|
||||
/*
|
||||
@ -194,7 +194,7 @@ static void stp_reset(void);
|
||||
|
||||
void read_persistent_clock(struct timespec *ts)
|
||||
{
|
||||
tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts);
|
||||
tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts);
|
||||
}
|
||||
|
||||
void read_boot_clock(struct timespec *ts)
|
||||
@ -204,7 +204,7 @@ void read_boot_clock(struct timespec *ts)
|
||||
|
||||
static cycle_t read_tod_clock(struct clocksource *cs)
|
||||
{
|
||||
return get_clock();
|
||||
return get_tod_clock();
|
||||
}
|
||||
|
||||
static struct clocksource clocksource_tod = {
|
||||
@ -342,7 +342,7 @@ int get_sync_clock(unsigned long long *clock)
|
||||
|
||||
sw_ptr = &get_cpu_var(clock_sync_word);
|
||||
sw0 = atomic_read(sw_ptr);
|
||||
*clock = get_clock();
|
||||
*clock = get_tod_clock();
|
||||
sw1 = atomic_read(sw_ptr);
|
||||
put_cpu_var(clock_sync_word);
|
||||
if (sw0 == sw1 && (sw0 & 0x80000000U))
|
||||
@ -486,7 +486,7 @@ static void etr_reset(void)
|
||||
.p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
|
||||
.es = 0, .sl = 0 };
|
||||
if (etr_setr(&etr_eacr) == 0) {
|
||||
etr_tolec = get_clock();
|
||||
etr_tolec = get_tod_clock();
|
||||
set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
|
||||
if (etr_port0_online && etr_port1_online)
|
||||
set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
|
||||
@ -768,8 +768,8 @@ static int etr_sync_clock(void *data)
|
||||
__ctl_set_bit(14, 21);
|
||||
__ctl_set_bit(0, 29);
|
||||
clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
|
||||
old_clock = get_clock();
|
||||
if (set_clock(clock) == 0) {
|
||||
old_clock = get_tod_clock();
|
||||
if (set_tod_clock(clock) == 0) {
|
||||
__udelay(1); /* Wait for the clock to start. */
|
||||
__ctl_clear_bit(0, 29);
|
||||
__ctl_clear_bit(14, 21);
|
||||
@ -845,7 +845,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
|
||||
* assume that this can have caused an stepping
|
||||
* port switch.
|
||||
*/
|
||||
etr_tolec = get_clock();
|
||||
etr_tolec = get_tod_clock();
|
||||
eacr.p0 = etr_port0_online;
|
||||
if (!eacr.p0)
|
||||
eacr.e0 = 0;
|
||||
@ -858,7 +858,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
|
||||
* assume that this can have caused an stepping
|
||||
* port switch.
|
||||
*/
|
||||
etr_tolec = get_clock();
|
||||
etr_tolec = get_tod_clock();
|
||||
eacr.p1 = etr_port1_online;
|
||||
if (!eacr.p1)
|
||||
eacr.e1 = 0;
|
||||
@ -974,7 +974,7 @@ static void etr_update_eacr(struct etr_eacr eacr)
|
||||
etr_eacr = eacr;
|
||||
etr_setr(&etr_eacr);
|
||||
if (dp_changed)
|
||||
etr_tolec = get_clock();
|
||||
etr_tolec = get_tod_clock();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1012,7 +1012,7 @@ static void etr_work_fn(struct work_struct *work)
|
||||
/* Store aib to get the current ETR status word. */
|
||||
BUG_ON(etr_stetr(&aib) != 0);
|
||||
etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */
|
||||
now = get_clock();
|
||||
now = get_tod_clock();
|
||||
|
||||
/*
|
||||
* Update the port information if the last stepping port change
|
||||
@ -1537,10 +1537,10 @@ static int stp_sync_clock(void *data)
|
||||
if (stp_info.todoff[0] || stp_info.todoff[1] ||
|
||||
stp_info.todoff[2] || stp_info.todoff[3] ||
|
||||
stp_info.tmd != 2) {
|
||||
old_clock = get_clock();
|
||||
old_clock = get_tod_clock();
|
||||
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
|
||||
if (rc == 0) {
|
||||
delta = adjust_time(old_clock, get_clock(), 0);
|
||||
delta = adjust_time(old_clock, get_tod_clock(), 0);
|
||||
fixup_clock_comparator(delta);
|
||||
rc = chsc_sstpi(stp_page, &stp_info,
|
||||
sizeof(struct stp_sstpi));
|
||||
|
@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu)
|
||||
unsigned int sequence;
|
||||
|
||||
do {
|
||||
now = get_clock();
|
||||
now = get_tod_clock();
|
||||
sequence = ACCESS_ONCE(idle->sequence);
|
||||
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
|
||||
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
|
||||
|
@ -362,7 +362,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
if ((!rc) && (vcpu->arch.sie_block->ckc <
|
||||
get_clock() + vcpu->arch.sie_block->epoch)) {
|
||||
get_tod_clock() + vcpu->arch.sie_block->epoch)) {
|
||||
if ((!psw_extint_disabled(vcpu)) &&
|
||||
(vcpu->arch.sie_block->gcr[0] & 0x800ul))
|
||||
rc = 1;
|
||||
@ -402,7 +402,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
|
||||
goto no_timer;
|
||||
}
|
||||
|
||||
now = get_clock() + vcpu->arch.sie_block->epoch;
|
||||
now = get_tod_clock() + vcpu->arch.sie_block->epoch;
|
||||
if (vcpu->arch.sie_block->ckc < now) {
|
||||
__unset_cpu_idle(vcpu);
|
||||
return 0;
|
||||
@ -492,7 +492,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
if ((vcpu->arch.sie_block->ckc <
|
||||
get_clock() + vcpu->arch.sie_block->epoch))
|
||||
get_tod_clock() + vcpu->arch.sie_block->epoch))
|
||||
__try_deliver_ckc_interrupt(vcpu);
|
||||
|
||||
if (atomic_read(&fi->active)) {
|
||||
|
@ -32,7 +32,7 @@ static void __udelay_disabled(unsigned long long usecs)
|
||||
unsigned long cr0, cr6, new;
|
||||
u64 clock_saved, end;
|
||||
|
||||
end = get_clock() + (usecs << 12);
|
||||
end = get_tod_clock() + (usecs << 12);
|
||||
clock_saved = local_tick_disable();
|
||||
__ctl_store(cr0, 0, 0);
|
||||
__ctl_store(cr6, 6, 6);
|
||||
@ -45,7 +45,7 @@ static void __udelay_disabled(unsigned long long usecs)
|
||||
set_clock_comparator(end);
|
||||
vtime_stop_cpu();
|
||||
local_irq_disable();
|
||||
} while (get_clock() < end);
|
||||
} while (get_tod_clock() < end);
|
||||
lockdep_on();
|
||||
__ctl_load(cr0, 0, 0);
|
||||
__ctl_load(cr6, 6, 6);
|
||||
@ -56,7 +56,7 @@ static void __udelay_enabled(unsigned long long usecs)
|
||||
{
|
||||
u64 clock_saved, end;
|
||||
|
||||
end = get_clock() + (usecs << 12);
|
||||
end = get_tod_clock() + (usecs << 12);
|
||||
do {
|
||||
clock_saved = 0;
|
||||
if (end < S390_lowcore.clock_comparator) {
|
||||
@ -67,7 +67,7 @@ static void __udelay_enabled(unsigned long long usecs)
|
||||
local_irq_disable();
|
||||
if (clock_saved)
|
||||
local_tick_enable(clock_saved);
|
||||
} while (get_clock() < end);
|
||||
} while (get_tod_clock() < end);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -111,8 +111,8 @@ void udelay_simple(unsigned long long usecs)
|
||||
{
|
||||
u64 end;
|
||||
|
||||
end = get_clock() + (usecs << 12);
|
||||
while (get_clock() < end)
|
||||
end = get_tod_clock() + (usecs << 12);
|
||||
while (get_tod_clock() < end)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
@ -122,10 +122,10 @@ void __ndelay(unsigned long long nsecs)
|
||||
|
||||
nsecs <<= 9;
|
||||
do_div(nsecs, 125);
|
||||
end = get_clock() + nsecs;
|
||||
end = get_tod_clock() + nsecs;
|
||||
if (nsecs & ~0xfffUL)
|
||||
__udelay(nsecs >> 12);
|
||||
while (get_clock() < end)
|
||||
while (get_tod_clock() < end)
|
||||
barrier();
|
||||
}
|
||||
EXPORT_SYMBOL(__ndelay);
|
||||
|
@ -1352,7 +1352,7 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
|
||||
switch (rc) {
|
||||
case 0: /* termination successful */
|
||||
cqr->status = DASD_CQR_CLEAR_PENDING;
|
||||
cqr->stopclk = get_clock();
|
||||
cqr->stopclk = get_tod_clock();
|
||||
cqr->starttime = 0;
|
||||
DBF_DEV_EVENT(DBF_DEBUG, device,
|
||||
"terminate cqr %p successful",
|
||||
@ -1420,7 +1420,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
|
||||
cqr->status = DASD_CQR_ERROR;
|
||||
return -EIO;
|
||||
}
|
||||
cqr->startclk = get_clock();
|
||||
cqr->startclk = get_tod_clock();
|
||||
cqr->starttime = jiffies;
|
||||
cqr->retries--;
|
||||
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
|
||||
@ -1623,7 +1623,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
||||
return;
|
||||
}
|
||||
|
||||
now = get_clock();
|
||||
now = get_tod_clock();
|
||||
cqr = (struct dasd_ccw_req *) intparm;
|
||||
/* check for conditions that should be handled immediately */
|
||||
if (!cqr ||
|
||||
@ -1963,7 +1963,7 @@ int dasd_flush_device_queue(struct dasd_device *device)
|
||||
}
|
||||
break;
|
||||
case DASD_CQR_QUEUED:
|
||||
cqr->stopclk = get_clock();
|
||||
cqr->stopclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_CLEARED;
|
||||
break;
|
||||
default: /* no need to modify the others */
|
||||
@ -2210,7 +2210,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
|
||||
wait_event(generic_waitq, _wait_for_wakeup(cqr));
|
||||
}
|
||||
|
||||
maincqr->endclk = get_clock();
|
||||
maincqr->endclk = get_tod_clock();
|
||||
if ((maincqr->status != DASD_CQR_DONE) &&
|
||||
(maincqr->intrc != -ERESTARTSYS))
|
||||
dasd_log_sense(maincqr, &maincqr->irb);
|
||||
@ -2340,7 +2340,7 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
|
||||
"Cancelling request %p failed with rc=%d\n",
|
||||
cqr, rc);
|
||||
} else {
|
||||
cqr->stopclk = get_clock();
|
||||
cqr->stopclk = get_tod_clock();
|
||||
}
|
||||
break;
|
||||
default: /* already finished or clear pending - do nothing */
|
||||
@ -2568,7 +2568,7 @@ restart:
|
||||
}
|
||||
|
||||
/* Rechain finished requests to final queue */
|
||||
cqr->endclk = get_clock();
|
||||
cqr->endclk = get_tod_clock();
|
||||
list_move_tail(&cqr->blocklist, final_queue);
|
||||
}
|
||||
}
|
||||
@ -2711,7 +2711,7 @@ restart_cb:
|
||||
}
|
||||
/* call the callback function */
|
||||
spin_lock_irq(&block->request_queue_lock);
|
||||
cqr->endclk = get_clock();
|
||||
cqr->endclk = get_tod_clock();
|
||||
list_del_init(&cqr->blocklist);
|
||||
__dasd_cleanup_cqr(cqr);
|
||||
spin_unlock_irq(&block->request_queue_lock);
|
||||
@ -3504,7 +3504,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
|
||||
cqr->memdev = device;
|
||||
cqr->expires = 10*HZ;
|
||||
cqr->retries = 256;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
return cqr;
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
|
||||
dctl_cqr->expires = 5 * 60 * HZ;
|
||||
dctl_cqr->retries = 2;
|
||||
|
||||
dctl_cqr->buildclk = get_clock();
|
||||
dctl_cqr->buildclk = get_tod_clock();
|
||||
|
||||
dctl_cqr->status = DASD_CQR_FILLED;
|
||||
|
||||
@ -1719,7 +1719,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
|
||||
erp->magic = default_erp->magic;
|
||||
erp->expires = default_erp->expires;
|
||||
erp->retries = 256;
|
||||
erp->buildclk = get_clock();
|
||||
erp->buildclk = get_tod_clock();
|
||||
erp->status = DASD_CQR_FILLED;
|
||||
|
||||
/* remove the default erp */
|
||||
@ -2322,7 +2322,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
|
||||
DBF_DEV_EVENT(DBF_ERR, device, "%s",
|
||||
"Unable to allocate ERP request");
|
||||
cqr->status = DASD_CQR_FAILED;
|
||||
cqr->stopclk = get_clock ();
|
||||
cqr->stopclk = get_tod_clock();
|
||||
} else {
|
||||
DBF_DEV_EVENT(DBF_ERR, device,
|
||||
"Unable to allocate ERP request "
|
||||
@ -2364,7 +2364,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
|
||||
erp->magic = cqr->magic;
|
||||
erp->expires = cqr->expires;
|
||||
erp->retries = 256;
|
||||
erp->buildclk = get_clock();
|
||||
erp->buildclk = get_tod_clock();
|
||||
erp->status = DASD_CQR_FILLED;
|
||||
|
||||
return erp;
|
||||
|
@ -448,7 +448,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
|
||||
ccw->count = sizeof(*(lcu->uac));
|
||||
ccw->cda = (__u32)(addr_t) lcu->uac;
|
||||
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
||||
/* need to unset flag here to detect race with summary unit check */
|
||||
@ -733,7 +733,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
|
||||
cqr->memdev = device;
|
||||
cqr->block = NULL;
|
||||
cqr->expires = 5 * HZ;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
||||
rc = dasd_sleep_on_immediatly(cqr);
|
||||
|
@ -184,14 +184,14 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
|
||||
private->iob.bio_list = dreq->bio;
|
||||
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
|
||||
|
||||
cqr->startclk = get_clock();
|
||||
cqr->startclk = get_tod_clock();
|
||||
cqr->starttime = jiffies;
|
||||
cqr->retries--;
|
||||
|
||||
rc = dia250(&private->iob, RW_BIO);
|
||||
switch (rc) {
|
||||
case 0: /* Synchronous I/O finished successfully */
|
||||
cqr->stopclk = get_clock();
|
||||
cqr->stopclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_SUCCESS;
|
||||
/* Indicate to calling function that only a dasd_schedule_bh()
|
||||
and no timer is needed */
|
||||
@ -222,7 +222,7 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
|
||||
mdsk_term_io(device);
|
||||
mdsk_init_io(device, device->block->bp_block, 0, NULL);
|
||||
cqr->status = DASD_CQR_CLEAR_PENDING;
|
||||
cqr->stopclk = get_clock();
|
||||
cqr->stopclk = get_tod_clock();
|
||||
dasd_schedule_device_bh(device);
|
||||
return 0;
|
||||
}
|
||||
@ -276,7 +276,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
|
||||
return;
|
||||
}
|
||||
|
||||
cqr->stopclk = get_clock();
|
||||
cqr->stopclk = get_tod_clock();
|
||||
|
||||
expires = 0;
|
||||
if ((ext_code.subcode & 0xff) == 0) {
|
||||
@ -556,7 +556,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
|
||||
}
|
||||
}
|
||||
cqr->retries = DIAG_MAX_RETRIES;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
if (blk_noretry_request(req) ||
|
||||
block->base->features & DASD_FEATURE_FAILFAST)
|
||||
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
|
||||
|
@ -862,7 +862,7 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
|
||||
cqr->expires = 10*HZ;
|
||||
cqr->lpm = lpm;
|
||||
cqr->retries = 256;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
|
||||
}
|
||||
@ -1449,7 +1449,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
|
||||
ccw->count = sizeof(struct dasd_rssd_features);
|
||||
ccw->cda = (__u32)(addr_t) features;
|
||||
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
rc = dasd_sleep_on(cqr);
|
||||
if (rc == 0) {
|
||||
@ -1501,7 +1501,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
|
||||
cqr->block = NULL;
|
||||
cqr->retries = 256;
|
||||
cqr->expires = 10*HZ;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
return cqr;
|
||||
}
|
||||
@ -1841,7 +1841,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
|
||||
cqr->startdev = device;
|
||||
cqr->memdev = device;
|
||||
cqr->retries = 255;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
return cqr;
|
||||
}
|
||||
@ -2241,7 +2241,7 @@ dasd_eckd_format_device(struct dasd_device * device,
|
||||
fcp->startdev = device;
|
||||
fcp->memdev = device;
|
||||
fcp->retries = 256;
|
||||
fcp->buildclk = get_clock();
|
||||
fcp->buildclk = get_tod_clock();
|
||||
fcp->status = DASD_CQR_FILLED;
|
||||
return fcp;
|
||||
}
|
||||
@ -2530,7 +2530,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
|
||||
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
||||
cqr->lpm = startdev->path_data.ppm;
|
||||
cqr->retries = 256;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
return cqr;
|
||||
}
|
||||
@ -2705,7 +2705,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
|
||||
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
||||
cqr->lpm = startdev->path_data.ppm;
|
||||
cqr->retries = 256;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
return cqr;
|
||||
}
|
||||
@ -2998,7 +2998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
|
||||
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
|
||||
cqr->lpm = startdev->path_data.ppm;
|
||||
cqr->retries = 256;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
return cqr;
|
||||
out_error:
|
||||
@ -3201,7 +3201,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
|
||||
cqr->expires = startdev->default_expires * HZ;
|
||||
cqr->lpm = startdev->path_data.ppm;
|
||||
cqr->retries = 256;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
||||
if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
|
||||
@ -3402,7 +3402,7 @@ dasd_eckd_release(struct dasd_device *device)
|
||||
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
|
||||
cqr->retries = 2; /* set retry counter to enable basic ERP */
|
||||
cqr->expires = 2 * HZ;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
||||
rc = dasd_sleep_on_immediatly(cqr);
|
||||
@ -3457,7 +3457,7 @@ dasd_eckd_reserve(struct dasd_device *device)
|
||||
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
|
||||
cqr->retries = 2; /* set retry counter to enable basic ERP */
|
||||
cqr->expires = 2 * HZ;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
||||
rc = dasd_sleep_on_immediatly(cqr);
|
||||
@ -3511,7 +3511,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
|
||||
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
|
||||
cqr->retries = 2; /* set retry counter to enable basic ERP */
|
||||
cqr->expires = 2 * HZ;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
||||
rc = dasd_sleep_on_immediatly(cqr);
|
||||
@ -3572,7 +3572,7 @@ static int dasd_eckd_snid(struct dasd_device *device,
|
||||
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
|
||||
cqr->retries = 5;
|
||||
cqr->expires = 10 * HZ;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
cqr->lpm = usrparm.path_mask;
|
||||
|
||||
@ -3642,7 +3642,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
|
||||
ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
|
||||
ccw->cda = (__u32)(addr_t) stats;
|
||||
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
rc = dasd_sleep_on(cqr);
|
||||
if (rc == 0) {
|
||||
@ -3768,7 +3768,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
|
||||
cqr->memdev = device;
|
||||
cqr->retries = 3;
|
||||
cqr->expires = 10 * HZ;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
|
||||
/* Build the ccws */
|
||||
|
@ -481,7 +481,7 @@ int dasd_eer_enable(struct dasd_device *device)
|
||||
ccw->flags = 0;
|
||||
ccw->cda = (__u32)(addr_t) cqr->data;
|
||||
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
cqr->callback = dasd_eer_snss_cb;
|
||||
|
||||
|
@ -102,7 +102,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
|
||||
pr_err("%s: default ERP has run out of retries and failed\n",
|
||||
dev_name(&device->cdev->dev));
|
||||
cqr->status = DASD_CQR_FAILED;
|
||||
cqr->stopclk = get_clock();
|
||||
cqr->stopclk = get_tod_clock();
|
||||
}
|
||||
return cqr;
|
||||
} /* end dasd_default_erp_action */
|
||||
@ -146,7 +146,7 @@ struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
|
||||
cqr->status = DASD_CQR_DONE;
|
||||
else {
|
||||
cqr->status = DASD_CQR_FAILED;
|
||||
cqr->stopclk = get_clock();
|
||||
cqr->stopclk = get_tod_clock();
|
||||
}
|
||||
|
||||
return cqr;
|
||||
|
@ -370,7 +370,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
|
||||
cqr->block = block;
|
||||
cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
|
||||
cqr->retries = 32;
|
||||
cqr->buildclk = get_clock();
|
||||
cqr->buildclk = get_tod_clock();
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
return cqr;
|
||||
}
|
||||
|
@ -450,7 +450,7 @@ sclp_sync_wait(void)
|
||||
timeout = 0;
|
||||
if (timer_pending(&sclp_request_timer)) {
|
||||
/* Get timeout TOD value */
|
||||
timeout = get_clock() +
|
||||
timeout = get_tod_clock() +
|
||||
sclp_tod_from_jiffies(sclp_request_timer.expires -
|
||||
jiffies);
|
||||
}
|
||||
@ -472,7 +472,7 @@ sclp_sync_wait(void)
|
||||
while (sclp_running_state != sclp_running_state_idle) {
|
||||
/* Check for expired request timer */
|
||||
if (timer_pending(&sclp_request_timer) &&
|
||||
get_clock() > timeout &&
|
||||
get_tod_clock() > timeout &&
|
||||
del_timer(&sclp_request_timer))
|
||||
sclp_request_timer.function(sclp_request_timer.data);
|
||||
cpu_relax();
|
||||
|
@ -637,7 +637,7 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
|
||||
hdr->rmem_size = memory;
|
||||
hdr->mem_end = sys_info.mem_size;
|
||||
hdr->num_pages = memory / PAGE_SIZE;
|
||||
hdr->tod = get_clock();
|
||||
hdr->tod = get_tod_clock();
|
||||
get_cpu_id(&hdr->cpu_id);
|
||||
for (i = 0; zfcpdump_save_areas[i]; i++) {
|
||||
prefix = zfcpdump_save_areas[i]->pref_reg;
|
||||
|
@ -962,9 +962,9 @@ static void css_reset(void)
|
||||
atomic_inc(&chpid_reset_count);
|
||||
}
|
||||
/* Wait for machine check for all channel paths. */
|
||||
timeout = get_clock() + (RCHP_TIMEOUT << 12);
|
||||
timeout = get_tod_clock() + (RCHP_TIMEOUT << 12);
|
||||
while (atomic_read(&chpid_reset_count) != 0) {
|
||||
if (get_clock() > timeout)
|
||||
if (get_tod_clock() > timeout)
|
||||
break;
|
||||
cpu_relax();
|
||||
}
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/timex.h> /* get_clock() */
|
||||
#include <linux/timex.h> /* get_tod_clock() */
|
||||
|
||||
#include <asm/ccwdev.h>
|
||||
#include <asm/cio.h>
|
||||
@ -326,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev)
|
||||
memcpy(cmb_data->last_block, hw_block, cmb_data->size);
|
||||
memcpy(reference_buf, hw_block, cmb_data->size);
|
||||
} while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
|
||||
cmb_data->last_update = get_clock();
|
||||
cmb_data->last_update = get_tod_clock();
|
||||
kfree(reference_buf);
|
||||
return 0;
|
||||
}
|
||||
@ -428,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev)
|
||||
memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
|
||||
cmb_data->last_update = 0;
|
||||
}
|
||||
cdev->private->cmb_start_time = get_clock();
|
||||
cdev->private->cmb_start_time = get_tod_clock();
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
}
|
||||
|
||||
|
@ -780,7 +780,7 @@ static int __init setup_css(int nr)
|
||||
css->cssid = nr;
|
||||
dev_set_name(&css->device, "css%x", nr);
|
||||
css->device.release = channel_subsystem_release;
|
||||
tod_high = (u32) (get_clock() >> 32);
|
||||
tod_high = (u32) (get_tod_clock() >> 32);
|
||||
css_generate_pgid(css, tod_high);
|
||||
return 0;
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
|
||||
cc = stsch_err(sch->schid, &schib);
|
||||
|
||||
printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
|
||||
"device information:\n", get_clock());
|
||||
"device information:\n", get_tod_clock());
|
||||
printk(KERN_WARNING "cio: orb:\n");
|
||||
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
|
||||
orb, sizeof(*orb), 0);
|
||||
|
@ -338,10 +338,10 @@ again:
|
||||
retries++;
|
||||
|
||||
if (!start_time) {
|
||||
start_time = get_clock();
|
||||
start_time = get_tod_clock();
|
||||
goto again;
|
||||
}
|
||||
if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
|
||||
if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
|
||||
goto again;
|
||||
}
|
||||
if (retries) {
|
||||
@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
|
||||
int count, stop;
|
||||
unsigned char state = 0;
|
||||
|
||||
q->timestamp = get_clock();
|
||||
q->timestamp = get_tod_clock();
|
||||
|
||||
/*
|
||||
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
|
||||
@ -563,7 +563,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
|
||||
if (bufnr != q->last_move) {
|
||||
q->last_move = bufnr;
|
||||
if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
|
||||
q->u.in.timestamp = get_clock();
|
||||
q->u.in.timestamp = get_tod_clock();
|
||||
return 1;
|
||||
} else
|
||||
return 0;
|
||||
@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
|
||||
* At this point we know, that inbound first_to_check
|
||||
* has (probably) not moved (see qdio_inbound_processing).
|
||||
*/
|
||||
if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
|
||||
if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
|
||||
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
|
||||
q->first_to_check);
|
||||
return 1;
|
||||
@ -772,7 +772,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
|
||||
int count, stop;
|
||||
unsigned char state = 0;
|
||||
|
||||
q->timestamp = get_clock();
|
||||
q->timestamp = get_tod_clock();
|
||||
|
||||
if (need_siga_sync(q))
|
||||
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
|
||||
|
@ -816,7 +816,7 @@ static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
|
||||
|
||||
static inline int qeth_get_micros(void)
|
||||
{
|
||||
return (int) (get_clock() >> 12);
|
||||
return (int) (get_tod_clock() >> 12);
|
||||
}
|
||||
|
||||
static inline int qeth_get_ip_version(struct sk_buff *skb)
|
||||
|
@ -727,7 +727,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
|
||||
zfcp_reqlist_add(adapter->req_list, req);
|
||||
|
||||
req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
|
||||
req->issued = get_clock();
|
||||
req->issued = get_tod_clock();
|
||||
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
|
||||
del_timer(&req->timer);
|
||||
/* lookup request again, list might have changed */
|
||||
|
@ -68,7 +68,7 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
|
||||
unsigned long long now, span;
|
||||
int used;
|
||||
|
||||
now = get_clock_monotonic();
|
||||
now = get_tod_clock_monotonic();
|
||||
span = (now - qdio->req_q_time) >> 12;
|
||||
used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
|
||||
qdio->req_q_util += used * span;
|
||||
|
Loading…
Reference in New Issue
Block a user