clocksource: pass clocksource to read() callback
Pass clocksource pointer to the read() callback for clocksources. This allows us to share the callback between multiple instances. [hugh@veritas.com: fix powerpc build of clocksource pass clocksource mods] [akpm@linux-foundation.org: cleanup] Signed-off-by: Magnus Damm <damm@igel.co.jp> Acked-by: John Stultz <johnstul@us.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ff14ed5db6
commit
8e19608e8b
@ -85,7 +85,7 @@ static struct irqaction at91rm9200_timer_irq = {
|
|||||||
.handler = at91rm9200_timer_interrupt
|
.handler = at91rm9200_timer_interrupt
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t read_clk32k(void)
|
static cycle_t read_clk32k(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return read_CRTR();
|
return read_CRTR();
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,7 @@ static u32 pit_cnt; /* access only w/system irq blocked */
|
|||||||
* Clocksource: just a monotonic counter of MCK/16 cycles.
|
* Clocksource: just a monotonic counter of MCK/16 cycles.
|
||||||
* We don't care whether or not PIT irqs are enabled.
|
* We don't care whether or not PIT irqs are enabled.
|
||||||
*/
|
*/
|
||||||
static cycle_t read_pit_clk(void)
|
static cycle_t read_pit_clk(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 elapsed;
|
u32 elapsed;
|
||||||
|
@ -238,7 +238,7 @@ static void __init timer_init(void)
|
|||||||
/*
|
/*
|
||||||
* clocksource
|
* clocksource
|
||||||
*/
|
*/
|
||||||
static cycle_t read_cycles(void)
|
static cycle_t read_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct timer_s *t = &timers[TID_CLOCKSOURCE];
|
struct timer_s *t = &timers[TID_CLOCKSOURCE];
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ static void __init imx_timer_hardware_init(void)
|
|||||||
IMX_TCTL(TIMER_BASE) = TCTL_FRR | TCTL_CLK_PCLK1 | TCTL_TEN;
|
IMX_TCTL(TIMER_BASE) = TCTL_FRR | TCTL_CLK_PCLK1 | TCTL_TEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle_t imx_get_cycles(void)
|
cycle_t imx_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return IMX_TCN(TIMER_BASE);
|
return IMX_TCN(TIMER_BASE);
|
||||||
}
|
}
|
||||||
|
@ -401,7 +401,7 @@ void __init ixp4xx_sys_init(void)
|
|||||||
/*
|
/*
|
||||||
* clocksource
|
* clocksource
|
||||||
*/
|
*/
|
||||||
cycle_t ixp4xx_get_cycles(void)
|
cycle_t ixp4xx_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return *IXP4XX_OSTS;
|
return *IXP4XX_OSTS;
|
||||||
}
|
}
|
||||||
|
@ -57,12 +57,12 @@ static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
|
|||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t msm_gpt_read(void)
|
static cycle_t msm_gpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return readl(MSM_GPT_BASE + TIMER_COUNT_VAL);
|
return readl(MSM_GPT_BASE + TIMER_COUNT_VAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t msm_dgt_read(void)
|
static cycle_t msm_dgt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return readl(MSM_DGT_BASE + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT;
|
return readl(MSM_DGT_BASE + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT;
|
||||||
}
|
}
|
||||||
|
@ -104,7 +104,7 @@ static struct irqaction netx_timer_irq = {
|
|||||||
.handler = netx_timer_interrupt,
|
.handler = netx_timer_interrupt,
|
||||||
};
|
};
|
||||||
|
|
||||||
cycle_t netx_get_cycles(void)
|
cycle_t netx_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return readl(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE));
|
return readl(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE));
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
#define TIMER_CLOCKEVENT 1
|
#define TIMER_CLOCKEVENT 1
|
||||||
static u32 latch;
|
static u32 latch;
|
||||||
|
|
||||||
static cycle_t ns9360_clocksource_read(void)
|
static cycle_t ns9360_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return __raw_readl(SYS_TR(TIMER_CLOCKSOURCE));
|
return __raw_readl(SYS_TR(TIMER_CLOCKSOURCE));
|
||||||
}
|
}
|
||||||
|
@ -198,7 +198,7 @@ static struct irqaction omap_mpu_timer2_irq = {
|
|||||||
.handler = omap_mpu_timer2_interrupt,
|
.handler = omap_mpu_timer2_interrupt,
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t mpu_read(void)
|
static cycle_t mpu_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return ~omap_mpu_timer_read(1);
|
return ~omap_mpu_timer_read(1);
|
||||||
}
|
}
|
||||||
|
@ -138,7 +138,7 @@ static inline void __init omap2_gp_clocksource_init(void) {}
|
|||||||
* clocksource
|
* clocksource
|
||||||
*/
|
*/
|
||||||
static struct omap_dm_timer *gpt_clocksource;
|
static struct omap_dm_timer *gpt_clocksource;
|
||||||
static cycle_t clocksource_read_cycles(void)
|
static cycle_t clocksource_read_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)omap_dm_timer_read_counter(gpt_clocksource);
|
return (cycle_t)omap_dm_timer_read_counter(gpt_clocksource);
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,7 @@ static struct clock_event_device ckevt_pxa_osmr0 = {
|
|||||||
.set_mode = pxa_osmr0_set_mode,
|
.set_mode = pxa_osmr0_set_mode,
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t pxa_read_oscr(void)
|
static cycle_t pxa_read_oscr(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return OSCR;
|
return OSCR;
|
||||||
}
|
}
|
||||||
|
@ -715,7 +715,7 @@ static struct irqaction realview_timer_irq = {
|
|||||||
.handler = realview_timer_interrupt,
|
.handler = realview_timer_interrupt,
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t realview_get_cycles(void)
|
static cycle_t realview_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return ~readl(timer3_va_base + TIMER_VALUE);
|
return ~readl(timer3_va_base + TIMER_VALUE);
|
||||||
}
|
}
|
||||||
|
@ -948,7 +948,7 @@ static struct irqaction versatile_timer_irq = {
|
|||||||
.handler = versatile_timer_interrupt,
|
.handler = versatile_timer_interrupt,
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t versatile_get_cycles(void)
|
static cycle_t versatile_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return ~readl(TIMER3_VA_BASE + TIMER_VALUE);
|
return ~readl(TIMER3_VA_BASE + TIMER_VALUE);
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ static enum clock_event_mode clockevent_mode = CLOCK_EVT_MODE_UNUSED;
|
|||||||
|
|
||||||
/* clock source */
|
/* clock source */
|
||||||
|
|
||||||
static cycle_t mxc_get_cycles(void)
|
static cycle_t mxc_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return __raw_readl(TIMER_BASE + MXC_TCN);
|
return __raw_readl(TIMER_BASE + MXC_TCN);
|
||||||
}
|
}
|
||||||
|
@ -185,7 +185,7 @@ console_initcall(omap_add_serial_console);
|
|||||||
|
|
||||||
#include <linux/clocksource.h>
|
#include <linux/clocksource.h>
|
||||||
|
|
||||||
static cycle_t omap_32k_read(void)
|
static cycle_t omap_32k_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return omap_readl(TIMER_32K_SYNCHRONIZED);
|
return omap_readl(TIMER_32K_SYNCHRONIZED);
|
||||||
}
|
}
|
||||||
@ -207,7 +207,7 @@ unsigned long long sched_clock(void)
|
|||||||
{
|
{
|
||||||
unsigned long long ret;
|
unsigned long long ret;
|
||||||
|
|
||||||
ret = (unsigned long long)omap_32k_read();
|
ret = (unsigned long long)omap_32k_read(&clocksource_32k);
|
||||||
ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift;
|
ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ static u32 ticks_per_jiffy;
|
|||||||
/*
|
/*
|
||||||
* Clocksource handling.
|
* Clocksource handling.
|
||||||
*/
|
*/
|
||||||
static cycle_t orion_clksrc_read(void)
|
static cycle_t orion_clksrc_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return 0xffffffff - readl(TIMER0_VAL);
|
return 0xffffffff - readl(TIMER0_VAL);
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
#include <mach/pm.h>
|
#include <mach/pm.h>
|
||||||
|
|
||||||
|
|
||||||
static cycle_t read_cycle_count(void)
|
static cycle_t read_cycle_count(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)sysreg_read(COUNT);
|
return (cycle_t)sysreg_read(COUNT);
|
||||||
}
|
}
|
||||||
|
@ -58,16 +58,11 @@ static inline unsigned long long cycles_2_ns(cycle_t cyc)
|
|||||||
return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
|
return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t read_cycles(void)
|
static cycle_t read_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
|
return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long long sched_clock(void)
|
|
||||||
{
|
|
||||||
return cycles_2_ns(read_cycles());
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct clocksource clocksource_bfin = {
|
static struct clocksource clocksource_bfin = {
|
||||||
.name = "bfin_cycles",
|
.name = "bfin_cycles",
|
||||||
.rating = 350,
|
.rating = 350,
|
||||||
@ -77,6 +72,11 @@ static struct clocksource clocksource_bfin = {
|
|||||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
unsigned long long sched_clock(void)
|
||||||
|
{
|
||||||
|
return cycles_2_ns(read_cycles(&clocksource_bfin));
|
||||||
|
}
|
||||||
|
|
||||||
static int __init bfin_clocksource_init(void)
|
static int __init bfin_clocksource_init(void)
|
||||||
{
|
{
|
||||||
set_cyc2ns_scale(get_cclk() / 1000);
|
set_cyc2ns_scale(get_cclk() / 1000);
|
||||||
|
@ -21,7 +21,7 @@ void __init cyclone_setup(void)
|
|||||||
|
|
||||||
static void __iomem *cyclone_mc;
|
static void __iomem *cyclone_mc;
|
||||||
|
|
||||||
static cycle_t read_cyclone(void)
|
static cycle_t read_cyclone(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)readq((void __iomem *)cyclone_mc);
|
return (cycle_t)readq((void __iomem *)cyclone_mc);
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
|
|
||||||
#include "fsyscall_gtod_data.h"
|
#include "fsyscall_gtod_data.h"
|
||||||
|
|
||||||
static cycle_t itc_get_cycles(void);
|
static cycle_t itc_get_cycles(struct clocksource *cs);
|
||||||
|
|
||||||
struct fsyscall_gtod_data_t fsyscall_gtod_data = {
|
struct fsyscall_gtod_data_t fsyscall_gtod_data = {
|
||||||
.lock = SEQLOCK_UNLOCKED,
|
.lock = SEQLOCK_UNLOCKED,
|
||||||
@ -383,7 +383,7 @@ ia64_init_itm (void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t itc_get_cycles(void)
|
static cycle_t itc_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
u64 lcycle, now, ret;
|
u64 lcycle, now, ret;
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
|
|
||||||
extern unsigned long sn_rtc_cycles_per_second;
|
extern unsigned long sn_rtc_cycles_per_second;
|
||||||
|
|
||||||
static cycle_t read_sn2(void)
|
static cycle_t read_sn2(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)readq(RTC_COUNTER_ADDR);
|
return (cycle_t)readq(RTC_COUNTER_ADDR);
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ static struct irqaction m68328_timer_irq = {
|
|||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
static cycle_t m68328_read_clk(void)
|
static cycle_t m68328_read_clk(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 cycles;
|
u32 cycles;
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
#define DMA_DTMR_CLK_DIV_16 (2 << 1)
|
#define DMA_DTMR_CLK_DIV_16 (2 << 1)
|
||||||
#define DMA_DTMR_ENABLE (1 << 0)
|
#define DMA_DTMR_ENABLE (1 << 0)
|
||||||
|
|
||||||
static cycle_t cf_dt_get_cycles(void)
|
static cycle_t cf_dt_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return __raw_readl(DTCN0);
|
return __raw_readl(DTCN0);
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,7 @@ static struct irqaction pit_irq = {
|
|||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
static cycle_t pit_read_clk(void)
|
static cycle_t pit_read_clk(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 cycles;
|
u32 cycles;
|
||||||
|
@ -78,7 +78,7 @@ static struct irqaction mcftmr_timer_irq = {
|
|||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
static cycle_t mcftmr_read_clk(void)
|
static cycle_t mcftmr_read_clk(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 cycles;
|
u32 cycles;
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
|
|
||||||
static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr;
|
static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr;
|
||||||
|
|
||||||
static cycle_t txx9_cs_read(void)
|
static cycle_t txx9_cs_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return __raw_readl(&txx9_cs_tmrptr->trr);
|
return __raw_readl(&txx9_cs_tmrptr->trr);
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@
|
|||||||
|
|
||||||
#include <asm/sibyte/sb1250.h>
|
#include <asm/sibyte/sb1250.h>
|
||||||
|
|
||||||
static cycle_t bcm1480_hpt_read(void)
|
static cycle_t bcm1480_hpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
|
return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
#include <asm/dec/ioasic.h>
|
#include <asm/dec/ioasic.h>
|
||||||
#include <asm/dec/ioasic_addrs.h>
|
#include <asm/dec/ioasic_addrs.h>
|
||||||
|
|
||||||
static cycle_t dec_ioasic_hpt_read(void)
|
static cycle_t dec_ioasic_hpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return ioasic_read(IO_REG_FCTR);
|
return ioasic_read(IO_REG_FCTR);
|
||||||
}
|
}
|
||||||
@ -47,13 +47,13 @@ void __init dec_ioasic_clocksource_init(void)
|
|||||||
while (!ds1287_timer_state())
|
while (!ds1287_timer_state())
|
||||||
;
|
;
|
||||||
|
|
||||||
start = dec_ioasic_hpt_read();
|
start = dec_ioasic_hpt_read(&clocksource_dec);
|
||||||
|
|
||||||
while (i--)
|
while (i--)
|
||||||
while (!ds1287_timer_state())
|
while (!ds1287_timer_state())
|
||||||
;
|
;
|
||||||
|
|
||||||
end = dec_ioasic_hpt_read();
|
end = dec_ioasic_hpt_read(&clocksource_dec);
|
||||||
|
|
||||||
freq = (end - start) * 10;
|
freq = (end - start) * 10;
|
||||||
printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
|
printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
#include <asm/time.h>
|
#include <asm/time.h>
|
||||||
|
|
||||||
static cycle_t c0_hpt_read(void)
|
static cycle_t c0_hpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return read_c0_count();
|
return read_c0_count();
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
|
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
|
||||||
* again.
|
* again.
|
||||||
*/
|
*/
|
||||||
static cycle_t sb1250_hpt_read(void)
|
static cycle_t sb1250_hpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ void __init setup_pit_timer(void)
|
|||||||
* to just read by itself. So use jiffies to emulate a free
|
* to just read by itself. So use jiffies to emulate a free
|
||||||
* running counter:
|
* running counter:
|
||||||
*/
|
*/
|
||||||
static cycle_t pit_read(void)
|
static cycle_t pit_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int count;
|
int count;
|
||||||
|
@ -35,7 +35,7 @@
|
|||||||
|
|
||||||
static unsigned long cpj;
|
static unsigned long cpj;
|
||||||
|
|
||||||
static cycle_t hpt_read(void)
|
static cycle_t hpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return read_c0_count2();
|
return read_c0_count2();
|
||||||
}
|
}
|
||||||
|
@ -159,7 +159,7 @@ static void __init hub_rt_clock_event_global_init(void)
|
|||||||
setup_irq(irq, &hub_rt_irqaction);
|
setup_irq(irq, &hub_rt_irqaction);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t hub_rt_read(void)
|
static cycle_t hub_rt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
|
return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
|
||||||
}
|
}
|
||||||
|
@ -77,7 +77,7 @@
|
|||||||
#include <linux/clockchips.h>
|
#include <linux/clockchips.h>
|
||||||
#include <linux/clocksource.h>
|
#include <linux/clocksource.h>
|
||||||
|
|
||||||
static cycle_t rtc_read(void);
|
static cycle_t rtc_read(struct clocksource *);
|
||||||
static struct clocksource clocksource_rtc = {
|
static struct clocksource clocksource_rtc = {
|
||||||
.name = "rtc",
|
.name = "rtc",
|
||||||
.rating = 400,
|
.rating = 400,
|
||||||
@ -88,7 +88,7 @@ static struct clocksource clocksource_rtc = {
|
|||||||
.read = rtc_read,
|
.read = rtc_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t timebase_read(void);
|
static cycle_t timebase_read(struct clocksource *);
|
||||||
static struct clocksource clocksource_timebase = {
|
static struct clocksource clocksource_timebase = {
|
||||||
.name = "timebase",
|
.name = "timebase",
|
||||||
.rating = 400,
|
.rating = 400,
|
||||||
@ -766,12 +766,12 @@ unsigned long read_persistent_clock(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* clocksource code */
|
/* clocksource code */
|
||||||
static cycle_t rtc_read(void)
|
static cycle_t rtc_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)get_rtc();
|
return (cycle_t)get_rtc();
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t timebase_read(void)
|
static cycle_t timebase_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)get_tb();
|
return (cycle_t)get_tb();
|
||||||
}
|
}
|
||||||
|
@ -201,7 +201,7 @@ unsigned long read_persistent_clock(void)
|
|||||||
return ts.tv_sec;
|
return ts.tv_sec;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t read_tod_clock(void)
|
static cycle_t read_tod_clock(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return get_clock();
|
return get_clock();
|
||||||
}
|
}
|
||||||
|
@ -208,7 +208,7 @@ unsigned long long sched_clock(void)
|
|||||||
if (!clocksource_sh.rating)
|
if (!clocksource_sh.rating)
|
||||||
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
|
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
|
||||||
|
|
||||||
cycles = clocksource_sh.read();
|
cycles = clocksource_sh.read(&clocksource_sh);
|
||||||
return cyc2ns(&clocksource_sh, cycles);
|
return cyc2ns(&clocksource_sh, cycles);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -81,7 +81,7 @@ static int tmu_timer_stop(void)
|
|||||||
*/
|
*/
|
||||||
static int tmus_are_scaled;
|
static int tmus_are_scaled;
|
||||||
|
|
||||||
static cycle_t tmu_timer_read(void)
|
static cycle_t tmu_timer_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled;
|
return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled;
|
||||||
}
|
}
|
||||||
|
@ -814,6 +814,11 @@ void udelay(unsigned long usecs)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(udelay);
|
EXPORT_SYMBOL(udelay);
|
||||||
|
|
||||||
|
static cycle_t clocksource_tick_read(struct clocksource *cs)
|
||||||
|
{
|
||||||
|
return tick_ops->get_tick();
|
||||||
|
}
|
||||||
|
|
||||||
void __init time_init(void)
|
void __init time_init(void)
|
||||||
{
|
{
|
||||||
unsigned long freq = sparc64_init_timers();
|
unsigned long freq = sparc64_init_timers();
|
||||||
@ -827,7 +832,7 @@ void __init time_init(void)
|
|||||||
clocksource_tick.mult =
|
clocksource_tick.mult =
|
||||||
clocksource_hz2mult(freq,
|
clocksource_hz2mult(freq,
|
||||||
clocksource_tick.shift);
|
clocksource_tick.shift);
|
||||||
clocksource_tick.read = tick_ops->get_tick;
|
clocksource_tick.read = clocksource_tick_read;
|
||||||
|
|
||||||
printk("clocksource: mult[%x] shift[%d]\n",
|
printk("clocksource: mult[%x] shift[%d]\n",
|
||||||
clocksource_tick.mult, clocksource_tick.shift);
|
clocksource_tick.mult, clocksource_tick.shift);
|
||||||
|
@ -65,7 +65,7 @@ static irqreturn_t um_timer(int irq, void *dev)
|
|||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t itimer_read(void)
|
static cycle_t itimer_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return os_nsecs() / 1000;
|
return os_nsecs() / 1000;
|
||||||
}
|
}
|
||||||
|
@ -722,7 +722,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
|
|||||||
/*
|
/*
|
||||||
* Clock source related code
|
* Clock source related code
|
||||||
*/
|
*/
|
||||||
static cycle_t read_hpet(void)
|
static cycle_t read_hpet(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)hpet_readl(HPET_COUNTER);
|
return (cycle_t)hpet_readl(HPET_COUNTER);
|
||||||
}
|
}
|
||||||
@ -756,7 +756,7 @@ static int hpet_clocksource_register(void)
|
|||||||
hpet_restart_counter();
|
hpet_restart_counter();
|
||||||
|
|
||||||
/* Verify whether hpet counter works */
|
/* Verify whether hpet counter works */
|
||||||
t1 = read_hpet();
|
t1 = hpet_readl(HPET_COUNTER);
|
||||||
rdtscll(start);
|
rdtscll(start);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -770,7 +770,7 @@ static int hpet_clocksource_register(void)
|
|||||||
rdtscll(now);
|
rdtscll(now);
|
||||||
} while ((now - start) < 200000UL);
|
} while ((now - start) < 200000UL);
|
||||||
|
|
||||||
if (t1 == read_hpet()) {
|
if (t1 == hpet_readl(HPET_COUNTER)) {
|
||||||
printk(KERN_WARNING
|
printk(KERN_WARNING
|
||||||
"HPET counter not counting. HPET disabled\n");
|
"HPET counter not counting. HPET disabled\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
@ -129,7 +129,7 @@ void __init setup_pit_timer(void)
|
|||||||
* to just read by itself. So use jiffies to emulate a free
|
* to just read by itself. So use jiffies to emulate a free
|
||||||
* running counter:
|
* running counter:
|
||||||
*/
|
*/
|
||||||
static cycle_t pit_read(void)
|
static cycle_t pit_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
static int old_count;
|
static int old_count;
|
||||||
static u32 old_jifs;
|
static u32 old_jifs;
|
||||||
|
@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
|
||||||
|
{
|
||||||
|
return kvm_clock_read();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we don't do that, there is the possibility that the guest
|
* If we don't do that, there is the possibility that the guest
|
||||||
* will calibrate under heavy load - thus, getting a lower lpj -
|
* will calibrate under heavy load - thus, getting a lower lpj -
|
||||||
@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void)
|
|||||||
|
|
||||||
static struct clocksource kvm_clock = {
|
static struct clocksource kvm_clock = {
|
||||||
.name = "kvm-clock",
|
.name = "kvm-clock",
|
||||||
.read = kvm_clock_read,
|
.read = kvm_clock_get_cycles,
|
||||||
.rating = 400,
|
.rating = 400,
|
||||||
.mask = CLOCKSOURCE_MASK(64),
|
.mask = CLOCKSOURCE_MASK(64),
|
||||||
.mult = 1 << KVM_SCALE,
|
.mult = 1 << KVM_SCALE,
|
||||||
|
@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc;
|
|||||||
* code, which is necessary to support wrapping clocksources like pm
|
* code, which is necessary to support wrapping clocksources like pm
|
||||||
* timer.
|
* timer.
|
||||||
*/
|
*/
|
||||||
static cycle_t read_tsc(void)
|
static cycle_t read_tsc(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
cycle_t ret = (cycle_t)get_cycles();
|
cycle_t ret = (cycle_t)get_cycles();
|
||||||
|
|
||||||
|
@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void)
|
|||||||
/** vmi clocksource */
|
/** vmi clocksource */
|
||||||
static struct clocksource clocksource_vmi;
|
static struct clocksource clocksource_vmi;
|
||||||
|
|
||||||
static cycle_t read_real_cycles(void)
|
static cycle_t read_real_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
|
cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
|
||||||
return max(ret, clocksource_vmi.cycle_last);
|
return max(ret, clocksource_vmi.cycle_last);
|
||||||
|
@ -663,7 +663,7 @@ static unsigned long lguest_tsc_khz(void)
|
|||||||
|
|
||||||
/* If we can't use the TSC, the kernel falls back to our lower-priority
|
/* If we can't use the TSC, the kernel falls back to our lower-priority
|
||||||
* "lguest_clock", where we read the time value given to us by the Host. */
|
* "lguest_clock", where we read the time value given to us by the Host. */
|
||||||
static cycle_t lguest_clock_read(void)
|
static cycle_t lguest_clock_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long sec, nsec;
|
unsigned long sec, nsec;
|
||||||
|
|
||||||
|
@ -213,6 +213,11 @@ cycle_t xen_clocksource_read(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
|
||||||
|
{
|
||||||
|
return xen_clocksource_read();
|
||||||
|
}
|
||||||
|
|
||||||
static void xen_read_wallclock(struct timespec *ts)
|
static void xen_read_wallclock(struct timespec *ts)
|
||||||
{
|
{
|
||||||
struct shared_info *s = HYPERVISOR_shared_info;
|
struct shared_info *s = HYPERVISOR_shared_info;
|
||||||
@ -241,7 +246,7 @@ int xen_set_wallclock(unsigned long now)
|
|||||||
static struct clocksource xen_clocksource __read_mostly = {
|
static struct clocksource xen_clocksource __read_mostly = {
|
||||||
.name = "xen",
|
.name = "xen",
|
||||||
.rating = 400,
|
.rating = 400,
|
||||||
.read = xen_clocksource_read,
|
.read = xen_clocksource_get_cycles,
|
||||||
.mask = ~0,
|
.mask = ~0,
|
||||||
.mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
|
.mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
|
||||||
.shift = XEN_SHIFT,
|
.shift = XEN_SHIFT,
|
||||||
|
@ -72,7 +72,7 @@ static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
|
|||||||
#ifdef CONFIG_IA64
|
#ifdef CONFIG_IA64
|
||||||
static void __iomem *hpet_mctr;
|
static void __iomem *hpet_mctr;
|
||||||
|
|
||||||
static cycle_t read_hpet(void)
|
static cycle_t read_hpet(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)read_counter((void __iomem *)hpet_mctr);
|
return (cycle_t)read_counter((void __iomem *)hpet_mctr);
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ u32 acpi_pm_read_verified(void)
|
|||||||
return v2;
|
return v2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t acpi_pm_read(void)
|
static cycle_t acpi_pm_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)read_pmtmr();
|
return (cycle_t)read_pmtmr();
|
||||||
}
|
}
|
||||||
@ -83,7 +83,7 @@ static int __init acpi_pm_good_setup(char *__str)
|
|||||||
}
|
}
|
||||||
__setup("acpi_pm_good", acpi_pm_good_setup);
|
__setup("acpi_pm_good", acpi_pm_good_setup);
|
||||||
|
|
||||||
static cycle_t acpi_pm_read_slow(void)
|
static cycle_t acpi_pm_read_slow(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)acpi_pm_read_verified();
|
return (cycle_t)acpi_pm_read_verified();
|
||||||
}
|
}
|
||||||
@ -156,9 +156,9 @@ static int verify_pmtmr_rate(void)
|
|||||||
unsigned long count, delta;
|
unsigned long count, delta;
|
||||||
|
|
||||||
mach_prepare_counter();
|
mach_prepare_counter();
|
||||||
value1 = clocksource_acpi_pm.read();
|
value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
|
||||||
mach_countup(&count);
|
mach_countup(&count);
|
||||||
value2 = clocksource_acpi_pm.read();
|
value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
|
||||||
delta = (value2 - value1) & ACPI_PM_MASK;
|
delta = (value2 - value1) & ACPI_PM_MASK;
|
||||||
|
|
||||||
/* Check that the PMTMR delta is within 5% of what we expect */
|
/* Check that the PMTMR delta is within 5% of what we expect */
|
||||||
@ -195,9 +195,9 @@ static int __init init_acpi_pm_clocksource(void)
|
|||||||
/* "verify" this timing source: */
|
/* "verify" this timing source: */
|
||||||
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
|
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
|
||||||
udelay(100 * j);
|
udelay(100 * j);
|
||||||
value1 = clocksource_acpi_pm.read();
|
value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
|
||||||
for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
|
for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
|
||||||
value2 = clocksource_acpi_pm.read();
|
value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
|
||||||
if (value2 == value1)
|
if (value2 == value1)
|
||||||
continue;
|
continue;
|
||||||
if (value2 > value1)
|
if (value2 > value1)
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
int use_cyclone = 0;
|
int use_cyclone = 0;
|
||||||
static void __iomem *cyclone_ptr;
|
static void __iomem *cyclone_ptr;
|
||||||
|
|
||||||
static cycle_t read_cyclone(void)
|
static cycle_t read_cyclone(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)readl(cyclone_ptr);
|
return (cycle_t)readl(cyclone_ptr);
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)");
|
|||||||
/* The base timer frequency, * 27 if selected */
|
/* The base timer frequency, * 27 if selected */
|
||||||
#define HRT_FREQ 1000000
|
#define HRT_FREQ 1000000
|
||||||
|
|
||||||
static cycle_t read_hrt(void)
|
static cycle_t read_hrt(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
/* Read the timer value */
|
/* Read the timer value */
|
||||||
return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
|
return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
|
||||||
|
@ -39,7 +39,7 @@
|
|||||||
|
|
||||||
static void __iomem *tcaddr;
|
static void __iomem *tcaddr;
|
||||||
|
|
||||||
static cycle_t tc_get_cycles(void)
|
static cycle_t tc_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 lower, upper;
|
u32 lower, upper;
|
||||||
|
@ -143,7 +143,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
|
|||||||
* 400-499: Perfect
|
* 400-499: Perfect
|
||||||
* The ideal clocksource. A must-use where
|
* The ideal clocksource. A must-use where
|
||||||
* available.
|
* available.
|
||||||
* @read: returns a cycle value
|
* @read: returns a cycle value, passes clocksource as argument
|
||||||
* @mask: bitmask for two's complement
|
* @mask: bitmask for two's complement
|
||||||
* subtraction of non 64 bit counters
|
* subtraction of non 64 bit counters
|
||||||
* @mult: cycle to nanosecond multiplier (adjusted by NTP)
|
* @mult: cycle to nanosecond multiplier (adjusted by NTP)
|
||||||
@ -162,7 +162,7 @@ struct clocksource {
|
|||||||
char *name;
|
char *name;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
int rating;
|
int rating;
|
||||||
cycle_t (*read)(void);
|
cycle_t (*read)(struct clocksource *cs);
|
||||||
cycle_t mask;
|
cycle_t mask;
|
||||||
u32 mult;
|
u32 mult;
|
||||||
u32 mult_orig;
|
u32 mult_orig;
|
||||||
@ -271,7 +271,7 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
|
|||||||
*/
|
*/
|
||||||
static inline cycle_t clocksource_read(struct clocksource *cs)
|
static inline cycle_t clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return cs->read();
|
return cs->read(cs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data)
|
|||||||
|
|
||||||
resumed = test_and_clear_bit(0, &watchdog_resumed);
|
resumed = test_and_clear_bit(0, &watchdog_resumed);
|
||||||
|
|
||||||
wdnow = watchdog->read();
|
wdnow = watchdog->read(watchdog);
|
||||||
wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
|
wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
|
||||||
watchdog_last = wdnow;
|
watchdog_last = wdnow;
|
||||||
|
|
||||||
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
|
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
|
||||||
csnow = cs->read();
|
csnow = cs->read(cs);
|
||||||
|
|
||||||
if (unlikely(resumed)) {
|
if (unlikely(resumed)) {
|
||||||
cs->wd_last = csnow;
|
cs->wd_last = csnow;
|
||||||
@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
|
|||||||
|
|
||||||
list_add(&cs->wd_list, &watchdog_list);
|
list_add(&cs->wd_list, &watchdog_list);
|
||||||
if (!started && watchdog) {
|
if (!started && watchdog) {
|
||||||
watchdog_last = watchdog->read();
|
watchdog_last = watchdog->read(watchdog);
|
||||||
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
|
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
|
||||||
add_timer_on(&watchdog_timer,
|
add_timer_on(&watchdog_timer,
|
||||||
cpumask_first(cpu_online_mask));
|
cpumask_first(cpu_online_mask));
|
||||||
@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
|
|||||||
cse->flags &= ~CLOCK_SOURCE_WATCHDOG;
|
cse->flags &= ~CLOCK_SOURCE_WATCHDOG;
|
||||||
/* Start if list is not empty */
|
/* Start if list is not empty */
|
||||||
if (!list_empty(&watchdog_list)) {
|
if (!list_empty(&watchdog_list)) {
|
||||||
watchdog_last = watchdog->read();
|
watchdog_last = watchdog->read(watchdog);
|
||||||
watchdog_timer.expires =
|
watchdog_timer.expires =
|
||||||
jiffies + WATCHDOG_INTERVAL;
|
jiffies + WATCHDOG_INTERVAL;
|
||||||
add_timer_on(&watchdog_timer,
|
add_timer_on(&watchdog_timer,
|
||||||
|
@ -50,7 +50,7 @@
|
|||||||
*/
|
*/
|
||||||
#define JIFFIES_SHIFT 8
|
#define JIFFIES_SHIFT 8
|
||||||
|
|
||||||
static cycle_t jiffies_read(void)
|
static cycle_t jiffies_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t) jiffies;
|
return (cycle_t) jiffies;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user