Merge branch 'clockevents/3.13' of git://git.linaro.org/people/dlezcano/linux into timers/core

Pull (mostly) ARM clocksource driver updates from Daniel Lezcano:

" - Soren Brinkmann added FEAT_PERCPU to a clock device when it is local
    per cpu. This feature prevents the clock framework to choose a per cpu
    timer as a broadcast timer. This problem arised when the ARM global
    timer is used when switching to the broadcast timer which is the case
    now on Xillinx with its cpuidle driver.

  - Stephen Boyd extended the generic sched_clock code to support 64bit
    counters and removes the setup_sched_clock deprecation, as that causes
    lots of warnings since there's still users in the arch/arm tree. He
    added also the CLOCK_SOURCE_SUSPEND_NONSTOP flag on the architected
    timer as they continue counting during suspend.

  - Uwe Kleine-König added some missing __init sections and consolidated the
    code by moving the of_node_put call from the drivers to the function
    clocksource_of_init. "

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2013-10-03 07:55:07 +02:00
commit 68e9074028
19 changed files with 41 additions and 38 deletions

View File

@ -92,6 +92,14 @@
};
};
global_timer: timer@f8f00200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0xf8f00200 0x20>;
interrupts = <1 11 0x301>;
interrupt-parent = <&intc>;
clocks = <&clkc 4>;
};
ttc0: ttc0@f8001000 {
interrupt-parent = <&intc>;
interrupts = < 0 10 4 0 11 4 0 12 4 >;

View File

@ -274,7 +274,6 @@ static void __init msm_dt_timer_init(struct device_node *np)
pr_err("Unknown frequency\n");
return;
}
of_node_put(np);
event_base = base + 0x4;
sts_base = base + 0x88;

View File

@ -13,5 +13,6 @@ config ARCH_ZYNQ
select HAVE_SMP
select SPARSE_IRQ
select CADENCE_TTC_TIMER
select ARM_GLOBAL_TIMER
help
Support for Xilinx Zynq ARM Cortex A9 Platform

View File

@ -389,7 +389,7 @@ static struct clocksource clocksource_counter = {
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
static struct cyclecounter cyclecounter = {

View File

@ -169,7 +169,8 @@ static int gt_clockevents_init(struct clock_event_device *clk)
int cpu = smp_processor_id();
clk->name = "arm_global_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERCPU;
clk->set_mode = gt_clockevent_set_mode;
clk->set_next_event = gt_clockevent_set_next_event;
clk->cpumask = cpumask_of(cpu);

View File

@ -49,7 +49,7 @@ struct bcm2835_timer {
static void __iomem *system_clock __read_mostly;
static u32 notrace bcm2835_sched_read(void)
static u64 notrace bcm2835_sched_read(void)
{
return readl_relaxed(system_clock);
}
@ -110,7 +110,7 @@ static void __init bcm2835_timer_init(struct device_node *node)
panic("Can't read clock-frequency");
system_clock = base + REG_COUNTER_LO;
setup_sched_clock(bcm2835_sched_read, 32, freq);
sched_clock_register(bcm2835_sched_read, 32, freq);
clocksource_mmio_init(base + REG_COUNTER_LO, node->name,
freq, 300, 32, clocksource_mmio_readl_up);

View File

@ -53,7 +53,7 @@ static struct clocksource clocksource_dbx500_prcmu = {
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
static u32 notrace dbx500_prcmu_sched_clock_read(void)
static u64 notrace dbx500_prcmu_sched_clock_read(void)
{
if (unlikely(!clksrc_dbx500_timer_base))
return 0;
@ -81,8 +81,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base)
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
}
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
setup_sched_clock(dbx500_prcmu_sched_clock_read,
32, RATE_32K);
sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);
#endif
clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
}

View File

@ -35,5 +35,6 @@ void __init clocksource_of_init(void)
init_func = match->data;
init_func(np);
of_node_put(np);
}
}

View File

@ -23,7 +23,7 @@
#include <linux/clk.h>
#include <linux/sched_clock.h>
static void timer_get_base_and_rate(struct device_node *np,
static void __init timer_get_base_and_rate(struct device_node *np,
void __iomem **base, u32 *rate)
{
struct clk *timer_clk;
@ -59,7 +59,7 @@ try_clock_freq:
panic("No clock nor clock-frequency property for %s", np->name);
}
static void add_clockevent(struct device_node *event_timer)
static void __init add_clockevent(struct device_node *event_timer)
{
void __iomem *iobase;
struct dw_apb_clock_event_device *ced;
@ -82,7 +82,7 @@ static void add_clockevent(struct device_node *event_timer)
static void __iomem *sched_io_base;
static u32 sched_rate;
static void add_clocksource(struct device_node *source_timer)
static void __init add_clocksource(struct device_node *source_timer)
{
void __iomem *iobase;
struct dw_apb_clocksource *cs;
@ -106,7 +106,7 @@ static void add_clocksource(struct device_node *source_timer)
sched_rate = rate;
}
static u32 read_sched_clock(void)
static u64 read_sched_clock(void)
{
return __raw_readl(sched_io_base);
}
@ -117,7 +117,7 @@ static const struct of_device_id sptimer_ids[] __initconst = {
{ /* Sentinel */ },
};
static void init_sched_clock(void)
static void __init init_sched_clock(void)
{
struct device_node *sched_timer;
@ -128,7 +128,7 @@ static void init_sched_clock(void)
of_node_put(sched_timer);
}
setup_sched_clock(read_sched_clock, 32, sched_rate);
sched_clock_register(read_sched_clock, 32, sched_rate);
}
static int num_called;
@ -138,12 +138,10 @@ static void __init dw_apb_timer_init(struct device_node *timer)
case 0:
pr_debug("%s: found clockevent timer\n", __func__);
add_clockevent(timer);
of_node_put(timer);
break;
case 1:
pr_debug("%s: found clocksource timer\n", __func__);
add_clocksource(timer);
of_node_put(timer);
init_sched_clock();
break;
default:

View File

@ -222,7 +222,7 @@ static struct clocksource clocksource_mxs = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static u32 notrace mxs_read_sched_clock_v2(void)
static u64 notrace mxs_read_sched_clock_v2(void)
{
return ~readl_relaxed(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1));
}
@ -236,7 +236,7 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
else {
clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1),
"mxs_timer", c, 200, 32, clocksource_mmio_readl_down);
setup_sched_clock(mxs_read_sched_clock_v2, 32, c);
sched_clock_register(mxs_read_sched_clock_v2, 32, c);
}
return 0;

View File

@ -76,7 +76,7 @@ static struct delay_timer mtu_delay_timer;
* local implementation which uses the clocksource to get some
* better resolution when scheduling the kernel.
*/
static u32 notrace nomadik_read_sched_clock(void)
static u64 notrace nomadik_read_sched_clock(void)
{
if (unlikely(!mtu_base))
return 0;
@ -231,7 +231,7 @@ static void __init __nmdk_timer_init(void __iomem *base, int irq,
"mtu_0");
#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
setup_sched_clock(nomadik_read_sched_clock, 32, rate);
sched_clock_register(nomadik_read_sched_clock, 32, rate);
#endif
/* Timer 1 is used for events, register irq and clockevents */

View File

@ -331,7 +331,7 @@ static struct clocksource samsung_clocksource = {
* this wraps around for now, since it is just a relative time
* stamp. (Inspired by U300 implementation.)
*/
static u32 notrace samsung_read_sched_clock(void)
static u64 notrace samsung_read_sched_clock(void)
{
return samsung_clocksource_read(NULL);
}
@ -357,7 +357,7 @@ static void __init samsung_clocksource_init(void)
else
pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14;
setup_sched_clock(samsung_read_sched_clock,
sched_clock_register(samsung_read_sched_clock,
pwm.variant.bits, clock_rate);
samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);

View File

@ -98,7 +98,7 @@ static struct clock_event_device tegra_clockevent = {
.set_mode = tegra_timer_set_mode,
};
static u32 notrace tegra_read_sched_clock(void)
static u64 notrace tegra_read_sched_clock(void)
{
return timer_readl(TIMERUS_CNTR_1US);
}
@ -181,8 +181,6 @@ static void __init tegra20_init_timer(struct device_node *np)
rate = clk_get_rate(clk);
}
of_node_put(np);
switch (rate) {
case 12000000:
timer_writel(0x000b, TIMERUS_USEC_CFG);
@ -200,7 +198,7 @@ static void __init tegra20_init_timer(struct device_node *np)
WARN(1, "Unknown clock rate");
}
setup_sched_clock(tegra_read_sched_clock, 32, 1000000);
sched_clock_register(tegra_read_sched_clock, 32, 1000000);
if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
@ -241,8 +239,6 @@ static void __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
of_node_put(np);
register_persistent_clock(NULL, tegra_read_persistent_clock);
}
CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);

View File

@ -96,7 +96,7 @@ static void local_timer_ctrl_clrset(u32 clr, u32 set)
local_base + TIMER_CTRL_OFF);
}
static u32 notrace armada_370_xp_read_sched_clock(void)
static u64 notrace armada_370_xp_read_sched_clock(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
}
@ -258,7 +258,7 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
/*
* Set scale and timer for sched_clock.
*/
setup_sched_clock(armada_370_xp_read_sched_clock, 32, timer_clk);
sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
/*
* Setup free-running clocksource timer (interrupts

View File

@ -165,9 +165,9 @@ static struct irqaction sirfsoc_timer_irq = {
};
/* Overwrite weak default sched_clock with more precise one */
static u32 notrace sirfsoc_read_sched_clock(void)
static u64 notrace sirfsoc_read_sched_clock(void)
{
return (u32)(sirfsoc_timer_read(NULL) & 0xffffffff);
return sirfsoc_timer_read(NULL);
}
static void __init sirfsoc_clockevent_init(void)
@ -206,7 +206,7 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
setup_sched_clock(sirfsoc_read_sched_clock, 32, CLOCK_TICK_RATE);
sched_clock_register(sirfsoc_read_sched_clock, 64, CLOCK_TICK_RATE);
BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));

View File

@ -52,7 +52,7 @@ static inline void pit_irq_acknowledge(void)
__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
}
static unsigned int pit_read_sched_clock(void)
static u64 pit_read_sched_clock(void)
{
return __raw_readl(clksrc_base + PITCVAL);
}
@ -64,7 +64,7 @@ static int __init pit_clocksource_init(unsigned long rate)
__raw_writel(~0UL, clksrc_base + PITLDVAL);
__raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL);
setup_sched_clock(pit_read_sched_clock, 32, rate);
sched_clock_register(pit_read_sched_clock, 32, rate);
return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,
300, 32, clocksource_mmio_readl_down);
}

View File

@ -137,14 +137,12 @@ static void __init vt8500_timer_init(struct device_node *np)
if (!regbase) {
pr_err("%s: Missing iobase description in Device Tree\n",
__func__);
of_node_put(np);
return;
}
timer_irq = irq_of_parse_and_map(np, 0);
if (!timer_irq) {
pr_err("%s: Missing irq description in Device Tree\n",
__func__);
of_node_put(np);
return;
}

View File

@ -60,6 +60,7 @@ enum clock_event_mode {
* Core shall set the interrupt affinity dynamically in broadcast mode
*/
#define CLOCK_EVT_FEAT_DYNIRQ 0x000020
#define CLOCK_EVT_FEAT_PERCPU 0x000040
/**
* struct clock_event_device - clock event device descriptor

View File

@ -70,6 +70,7 @@ static bool tick_check_broadcast_device(struct clock_event_device *curdev,
struct clock_event_device *newdev)
{
if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
(newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
(newdev->features & CLOCK_EVT_FEAT_C3STOP))
return false;