diff options
Diffstat (limited to 'drivers/clocksource')
-rw-r--r-- | drivers/clocksource/Kconfig | 1 | ||||
-rw-r--r-- | drivers/clocksource/Makefile | 1 | ||||
-rw-r--r-- | drivers/clocksource/acpi_pm.c | 4 | ||||
-rw-r--r-- | drivers/clocksource/arm_arch_timer.c | 447 | ||||
-rw-r--r-- | drivers/clocksource/bcm_kona_timer.c | 8 | ||||
-rw-r--r-- | drivers/clocksource/cadence_ttc_timer.c | 13 | ||||
-rw-r--r-- | drivers/clocksource/exynos_mct.c | 58 | ||||
-rw-r--r-- | drivers/clocksource/moxart_timer.c | 165 | ||||
-rw-r--r-- | drivers/clocksource/samsung_pwm_timer.c | 108 | ||||
-rw-r--r-- | drivers/clocksource/sun4i_timer.c | 110 | ||||
-rw-r--r-- | drivers/clocksource/time-armada-370-xp.c | 92 | ||||
-rw-r--r-- | drivers/clocksource/time-orion.c | 2 | ||||
-rw-r--r-- | drivers/clocksource/timer-marco.c | 98 |
13 files changed, 841 insertions, 266 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index b7b9b040a89b..41c69469ce20 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -99,7 +99,6 @@ config CLKSRC_EXYNOS_MCT config CLKSRC_SAMSUNG_PWM bool - select CLKSRC_MMIO help This is a new clocksource driver for the PWM timer found in Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 8b00c5cebfa4..704d6d342adc 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o obj-$(CONFIG_ORION_TIMER) += time-orion.o obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o obj-$(CONFIG_ARCH_MARCO) += timer-marco.o +obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o obj-$(CONFIG_ARCH_MXS) += mxs_timer.o obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index 6efe4d1ab3aa..6eab88985670 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -200,14 +200,14 @@ static int __init init_acpi_pm_clocksource(void) if ((value2 < value1) && ((value2) < 0xFFF)) break; printk(KERN_INFO "PM-Timer had inconsistent results:" - " 0x%#llx, 0x%#llx - aborting.\n", + " %#llx, %#llx - aborting.\n", value1, value2); pmtmr_ioport = 0; return -EINVAL; } if (i == ACPI_PM_READ_CHECKS) { printk(KERN_INFO "PM-Timer failed consistency check " - " (0x%#llx) - aborting.\n", value1); + " (%#llx) - aborting.\n", value1); pmtmr_ioport = 0; return -ENODEV; } diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index ffadd836e0b5..fbd9ccd5e114 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -16,13 +16,39 @@ #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/of_irq.h> +#include <linux/of_address.h> #include <linux/io.h> +#include <linux/slab.h> #include <asm/arch_timer.h> #include <asm/virt.h> #include <clocksource/arm_arch_timer.h> +#define CNTTIDR 0x08 +#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) + +#define CNTVCT_LO 0x08 +#define CNTVCT_HI 0x0c +#define CNTFRQ 0x10 +#define CNTP_TVAL 0x28 +#define CNTP_CTL 0x2c +#define CNTV_TVAL 0x38 +#define CNTV_CTL 0x3c + +#define ARCH_CP15_TIMER BIT(0) +#define ARCH_MEM_TIMER BIT(1) +static unsigned arch_timers_present __initdata; + +static void __iomem *arch_counter_base; + +struct arch_timer { + void __iomem *base; + struct clock_event_device evt; +}; + +#define to_arch_timer(e) container_of(e, struct arch_timer, evt) + static u32 arch_timer_rate; enum ppi_nr { @@ -38,19 +64,83 @@ static int arch_timer_ppi[MAX_TIMER_PPI]; static struct clock_event_device __percpu *arch_timer_evt; static bool arch_timer_use_virtual = true; +static bool arch_timer_mem_use_virtual; /* * Architected system timer support. */ -static inline irqreturn_t timer_handler(const int access, +static __always_inline +void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, + struct clock_event_device *clk) +{ + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + writel_relaxed(val, timer->base + CNTP_CTL); + break; + case ARCH_TIMER_REG_TVAL: + writel_relaxed(val, timer->base + CNTP_TVAL); + break; + } + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + writel_relaxed(val, timer->base + CNTV_CTL); + break; + case ARCH_TIMER_REG_TVAL: + writel_relaxed(val, timer->base + CNTV_TVAL); + break; + } + } else { + arch_timer_reg_write_cp15(access, reg, val); + } +} + +static __always_inline +u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, + struct clock_event_device *clk) +{ + u32 val; + + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + val = readl_relaxed(timer->base + CNTP_CTL); + break; + case ARCH_TIMER_REG_TVAL: + val = readl_relaxed(timer->base + CNTP_TVAL); + break; + } + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + val = readl_relaxed(timer->base + CNTV_CTL); + break; + case ARCH_TIMER_REG_TVAL: + val = readl_relaxed(timer->base + CNTV_TVAL); + break; + } + } else { + val = arch_timer_reg_read_cp15(access, reg); + } + + return val; +} + +static __always_inline irqreturn_t timer_handler(const int access, struct clock_event_device *evt) { unsigned long ctrl; - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); + + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt); if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { ctrl |= ARCH_TIMER_CTRL_IT_MASK; - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); evt->event_handler(evt); return IRQ_HANDLED; } @@ -72,15 +162,30 @@ static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); } -static inline void timer_set_mode(const int access, int mode) +static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt); +} + +static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); +} + +static __always_inline void timer_set_mode(const int access, int mode, + struct clock_event_device *clk) { unsigned long ctrl; switch (mode) { case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); ctrl &= ~ARCH_TIMER_CTRL_ENABLE; - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); break; default: break; @@ -90,60 +195,108 @@ static inline void timer_set_mode(const int access, int mode) static void arch_timer_set_mode_virt(enum clock_event_mode mode, struct clock_event_device *clk) { - timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode); + timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk); } static void arch_timer_set_mode_phys(enum clock_event_mode mode, struct clock_event_device *clk) { - timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode); + timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk); +} + +static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode, + struct clock_event_device *clk) +{ + timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk); } -static inline void set_next_event(const int access, unsigned long evt) +static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode, + struct clock_event_device *clk) +{ + timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk); +} + +static __always_inline void set_next_event(const int access, unsigned long evt, + struct clock_event_device *clk) { unsigned long ctrl; - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); ctrl |= ARCH_TIMER_CTRL_ENABLE; ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; - arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt); - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); + arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk); + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); } static int arch_timer_set_next_event_virt(unsigned long evt, - struct clock_event_device *unused) + struct clock_event_device *clk) { - set_next_event(ARCH_TIMER_VIRT_ACCESS, evt); + set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); return 0; } static int arch_timer_set_next_event_phys(unsigned long evt, - struct clock_event_device *unused) + struct clock_event_device *clk) { - set_next_event(ARCH_TIMER_PHYS_ACCESS, evt); + set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); return 0; } -static int arch_timer_setup(struct clock_event_device *clk) +static int arch_timer_set_next_event_virt_mem(unsigned long evt, + struct clock_event_device *clk) { - clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; - clk->name = "arch_sys_timer"; - clk->rating = 450; - if (arch_timer_use_virtual) { - clk->irq = arch_timer_ppi[VIRT_PPI]; - clk->set_mode = arch_timer_set_mode_virt; - clk->set_next_event = arch_timer_set_next_event_virt; + set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); + return 0; +} + +static int arch_timer_set_next_event_phys_mem(unsigned long evt, + struct clock_event_device *clk) +{ + set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); + return 0; +} + +static void __arch_timer_setup(unsigned type, + struct clock_event_device *clk) +{ + clk->features = CLOCK_EVT_FEAT_ONESHOT; + + if (type == ARCH_CP15_TIMER) { + clk->features |= CLOCK_EVT_FEAT_C3STOP; + clk->name = "arch_sys_timer"; + clk->rating = 450; + clk->cpumask = cpumask_of(smp_processor_id()); + if (arch_timer_use_virtual) { + clk->irq = arch_timer_ppi[VIRT_PPI]; + clk->set_mode = arch_timer_set_mode_virt; + clk->set_next_event = arch_timer_set_next_event_virt; + } else { + clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; + clk->set_mode = arch_timer_set_mode_phys; + clk->set_next_event = arch_timer_set_next_event_phys; + } } else { - clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; - clk->set_mode = arch_timer_set_mode_phys; - clk->set_next_event = arch_timer_set_next_event_phys; + clk->name = "arch_mem_timer"; + clk->rating = 400; + clk->cpumask = cpu_all_mask; + if (arch_timer_mem_use_virtual) { + clk->set_mode = arch_timer_set_mode_virt_mem; + clk->set_next_event = + arch_timer_set_next_event_virt_mem; + } else { + clk->set_mode = arch_timer_set_mode_phys_mem; + clk->set_next_event = + arch_timer_set_next_event_phys_mem; + } } - clk->cpumask = cpumask_of(smp_processor_id()); + clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk); - clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); + clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); +} - clockevents_config_and_register(clk, arch_timer_rate, - 0xf, 0x7fffffff); +static int arch_timer_setup(struct clock_event_device *clk) +{ + __arch_timer_setup(ARCH_CP15_TIMER, clk); if (arch_timer_use_virtual) enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); @@ -158,27 +311,41 @@ static int arch_timer_setup(struct clock_event_device *clk) return 0; } -static int arch_timer_available(void) +static void +arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np) { - u32 freq; - - if (arch_timer_rate == 0) { - freq = arch_timer_get_cntfrq(); - - /* Check the timer frequency. */ - if (freq == 0) { - pr_warn("Architected timer frequency not available\n"); - return -EINVAL; - } + /* Who has more than one independent system counter? */ + if (arch_timer_rate) + return; - arch_timer_rate = freq; + /* Try to determine the frequency from the device tree or CNTFRQ */ + if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) { + if (cntbase) + arch_timer_rate = readl_relaxed(cntbase + CNTFRQ); + else + arch_timer_rate = arch_timer_get_cntfrq(); } - pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", + /* Check the timer frequency. */ + if (arch_timer_rate == 0) + pr_warn("Architected timer frequency not available\n"); +} + +static void arch_timer_banner(unsigned type) +{ + pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", + type & ARCH_CP15_TIMER ? "cp15" : "", + type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "", + type & ARCH_MEM_TIMER ? "mmio" : "", (unsigned long)arch_timer_rate / 1000000, (unsigned long)(arch_timer_rate / 10000) % 100, - arch_timer_use_virtual ? "virt" : "phys"); - return 0; + type & ARCH_CP15_TIMER ? + arch_timer_use_virtual ? "virt" : "phys" : + "", + type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "", + type & ARCH_MEM_TIMER ? + arch_timer_mem_use_virtual ? "virt" : "phys" : + ""); } u32 arch_timer_get_rate(void) @@ -186,19 +353,35 @@ u32 arch_timer_get_rate(void) return arch_timer_rate; } -u64 arch_timer_read_counter(void) +static u64 arch_counter_get_cntvct_mem(void) { - return arch_counter_get_cntvct(); + u32 vct_lo, vct_hi, tmp_hi; + + do { + vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); + vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO); + tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); + } while (vct_hi != tmp_hi); + + return ((u64) vct_hi << 32) | vct_lo; } +/* + * Default to cp15 based access because arm64 uses this function for + * sched_clock() before DT is probed and the cp15 method is guaranteed + * to exist on arm64. arm doesn't use this before DT is probed so even + * if we don't have the cp15 accessors we won't have a problem. + */ +u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; + static cycle_t arch_counter_read(struct clocksource *cs) { - return arch_counter_get_cntvct(); + return arch_timer_read_counter(); } static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) { - return arch_counter_get_cntvct(); + return arch_timer_read_counter(); } static struct clocksource clocksource_counter = { @@ -221,6 +404,23 @@ struct timecounter *arch_timer_get_timecounter(void) return &timecounter; } +static void __init arch_counter_register(unsigned type) +{ + u64 start_count; + + /* Register the CP15 based counter if we have one */ + if (type & ARCH_CP15_TIMER) + arch_timer_read_counter = arch_counter_get_cntvct; + else + arch_timer_read_counter = arch_counter_get_cntvct_mem; + + start_count = arch_timer_read_counter(); + clocksource_register_hz(&clocksource_counter, arch_timer_rate); + cyclecounter.mult = clocksource_counter.mult; + cyclecounter.shift = clocksource_counter.shift; + timecounter_init(&timecounter, &cyclecounter, start_count); +} + static void arch_timer_stop(struct clock_event_device *clk) { pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", @@ -265,22 +465,12 @@ static int __init arch_timer_register(void) int err; int ppi; - err = arch_timer_available(); - if (err) - goto out; - arch_timer_evt = alloc_percpu(struct clock_event_device); if (!arch_timer_evt) { err = -ENOMEM; goto out; } - clocksource_register_hz(&clocksource_counter, arch_timer_rate); - cyclecounter.mult = clocksource_counter.mult; - cyclecounter.shift = clocksource_counter.shift; - timecounter_init(&timecounter, &cyclecounter, - arch_counter_get_cntvct()); - if (arch_timer_use_virtual) { ppi = arch_timer_ppi[VIRT_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_virt, @@ -331,24 +521,77 @@ out: return err; } +static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq) +{ + int ret; + irq_handler_t func; + struct arch_timer *t; + + t = kzalloc(sizeof(*t), GFP_KERNEL); + if (!t) + return -ENOMEM; + + t->base = base; + t->evt.irq = irq; + __arch_timer_setup(ARCH_MEM_TIMER, &t->evt); + + if (arch_timer_mem_use_virtual) + func = arch_timer_handler_virt_mem; + else + func = arch_timer_handler_phys_mem; + + ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); + if (ret) { + pr_err("arch_timer: Failed to request mem timer irq\n"); + kfree(t); + } + + return ret; +} + +static const struct of_device_id arch_timer_of_match[] __initconst = { + { .compatible = "arm,armv7-timer", }, + { .compatible = "arm,armv8-timer", }, + {}, +}; + +static const struct of_device_id arch_timer_mem_of_match[] __initconst = { + { .compatible = "arm,armv7-timer-mem", }, + {}, +}; + +static void __init arch_timer_common_init(void) +{ + unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER; + + /* Wait until both nodes are probed if we have two timers */ + if ((arch_timers_present & mask) != mask) { + if (of_find_matching_node(NULL, arch_timer_mem_of_match) && + !(arch_timers_present & ARCH_MEM_TIMER)) + return; + if (of_find_matching_node(NULL, arch_timer_of_match) && + !(arch_timers_present & ARCH_CP15_TIMER)) + return; + } + + arch_timer_banner(arch_timers_present); + arch_counter_register(arch_timers_present); + arch_timer_arch_init(); +} + static void __init arch_timer_init(struct device_node *np) { - u32 freq; int i; - if (arch_timer_get_rate()) { + if (arch_timers_present & ARCH_CP15_TIMER) { pr_warn("arch_timer: multiple nodes in dt, skipping\n"); return; } - /* Try to determine the frequency from the device tree or CNTFRQ */ - if (!of_property_read_u32(np, "clock-frequency", &freq)) - arch_timer_rate = freq; - + arch_timers_present |= ARCH_CP15_TIMER; for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) arch_timer_ppi[i] = irq_of_parse_and_map(np, i); - - of_node_put(np); + arch_timer_detect_rate(NULL, np); /* * If HYP mode is available, we know that the physical timer @@ -369,7 +612,73 @@ static void __init arch_timer_init(struct device_node *np) } arch_timer_register(); - arch_timer_arch_init(); + arch_timer_common_init(); } CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init); CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init); + +static void __init arch_timer_mem_init(struct device_node *np) +{ + struct device_node *frame, *best_frame = NULL; + void __iomem *cntctlbase, *base; + unsigned int irq; + u32 cnttidr; + + arch_timers_present |= ARCH_MEM_TIMER; + cntctlbase = of_iomap(np, 0); + if (!cntctlbase) { + pr_err("arch_timer: Can't find CNTCTLBase\n"); + return; + } + + cnttidr = readl_relaxed(cntctlbase + CNTTIDR); + iounmap(cntctlbase); + + /* + * Try to find a virtual capable frame. Otherwise fall back to a + * physical capable frame. + */ + for_each_available_child_of_node(np, frame) { + int n; + + if (of_property_read_u32(frame, "frame-number", &n)) { + pr_err("arch_timer: Missing frame-number\n"); + of_node_put(best_frame); + of_node_put(frame); + return; + } + + if (cnttidr & CNTTIDR_VIRT(n)) { + of_node_put(best_frame); + best_frame = frame; + arch_timer_mem_use_virtual = true; + break; + } + of_node_put(best_frame); + best_frame = of_node_get(frame); + } + + base = arch_counter_base = of_iomap(best_frame, 0); + if (!base) { + pr_err("arch_timer: Can't map frame's registers\n"); + of_node_put(best_frame); + return; + } + + if (arch_timer_mem_use_virtual) + irq = irq_of_parse_and_map(best_frame, 1); + else + irq = irq_of_parse_and_map(best_frame, 0); + of_node_put(best_frame); + if (!irq) { + pr_err("arch_timer: Frame missing %s irq", + arch_timer_mem_use_virtual ? "virt" : "phys"); + return; + } + + arch_timer_detect_rate(base, np); + arch_timer_mem_register(base, irq); + arch_timer_common_init(); +} +CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", + arch_timer_mem_init); diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index ba3d85904c9a..0d7d8c3ed6b2 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c @@ -99,7 +99,8 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) } static const struct of_device_id bcm_timer_ids[] __initconst = { - {.compatible = "bcm,kona-timer"}, + {.compatible = "brcm,kona-timer"}, + {.compatible = "bcm,kona-timer"}, /* deprecated name */ {}, }; @@ -201,4 +202,9 @@ static void __init kona_timer_init(struct device_node *node) kona_timer_set_next_event((arch_timer_rate / HZ), NULL); } +CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init); +/* + * bcm,kona-timer is deprecated by brcm,kona-timer + * being kept here for driver compatibility + */ CLOCKSOURCE_OF_DECLARE(bcm_kona, "bcm,kona-timer", kona_timer_init); diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index 4cbe28c74631..b2bb3a4bc205 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c @@ -21,7 +21,7 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/slab.h> -#include <linux/clk-provider.h> +#include <linux/sched_clock.h> /* * This driver configures the 2 16-bit count-up timers as follows: @@ -95,6 +95,8 @@ struct ttc_timer_clockevent { #define to_ttc_timer_clkevent(x) \ container_of(x, struct ttc_timer_clockevent, ce) +static void __iomem *ttc_sched_clock_val_reg; + /** * ttc_set_interval - Set the timer interval value * @@ -156,6 +158,11 @@ static cycle_t __ttc_clocksource_read(struct clocksource *cs) TTC_COUNT_VAL_OFFSET); } +static u32 notrace ttc_sched_clock_read(void) +{ + return __raw_readl(ttc_sched_clock_val_reg); +} + /** * ttc_set_next_event - Sets the time interval for next event * @@ -297,6 +304,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base) kfree(ttccs); return; } + + ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; + setup_sched_clock(ttc_sched_clock_read, 16, + clk_get_rate(ttccs->ttc.clk) / PRESCALE); } static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index b2bbc415f120..5b34768f4d7c 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -16,6 +16,7 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/clockchips.h> +#include <linux/cpu.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/percpu.h> @@ -24,7 +25,6 @@ #include <linux/of_address.h> #include <linux/clocksource.h> -#include <asm/localtimer.h> #include <asm/mach/time.h> #define EXYNOS4_MCTREG(x) (x) @@ -80,7 +80,7 @@ static unsigned int mct_int_type; static int mct_irqs[MCT_NR_IRQS]; struct mct_clock_event_device { - struct clock_event_device *evt; + struct clock_event_device evt; unsigned long base; char name[10]; }; @@ -295,8 +295,6 @@ static void exynos4_clockevent_init(void) setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); } -#ifdef CONFIG_LOCAL_TIMERS - static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); /* Clock event handling */ @@ -369,7 +367,7 @@ static inline void exynos4_tick_set_mode(enum clock_event_mode mode, static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) { - struct clock_event_device *evt = mevt->evt; + struct clock_event_device *evt = &mevt->evt; /* * This is for supporting oneshot mode. @@ -391,7 +389,7 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) { struct mct_clock_event_device *mevt = dev_id; - struct clock_event_device *evt = mevt->evt; + struct clock_event_device *evt = &mevt->evt; exynos4_mct_tick_clear(mevt); @@ -405,8 +403,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) struct mct_clock_event_device *mevt; unsigned int cpu = smp_processor_id(); - mevt = this_cpu_ptr(&percpu_mct_tick); - mevt->evt = evt; + mevt = container_of(evt, struct mct_clock_event_device, evt); mevt->base = EXYNOS4_MCT_L_BASE(cpu); sprintf(mevt->name, "mct_tick%d", cpu); @@ -448,14 +445,37 @@ static void exynos4_local_timer_stop(struct clock_event_device *evt) disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); } -static struct local_timer_ops exynos4_mct_tick_ops = { - .setup = exynos4_local_timer_setup, - .stop = exynos4_local_timer_stop, +static int exynos4_mct_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + struct mct_clock_event_device *mevt; + + /* + * Grab cpu pointer in each case to avoid spurious + * preemptible warnings + */ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + mevt = this_cpu_ptr(&percpu_mct_tick); + exynos4_local_timer_setup(&mevt->evt); + break; + case CPU_DYING: + mevt = this_cpu_ptr(&percpu_mct_tick); + exynos4_local_timer_stop(&mevt->evt); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block exynos4_mct_cpu_nb = { + .notifier_call = exynos4_mct_cpu_notify, }; -#endif /* CONFIG_LOCAL_TIMERS */ static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) { + int err; + struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); struct clk *mct_clk, *tick_clk; tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : @@ -473,9 +493,7 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem if (!reg_base) panic("%s: unable to ioremap mct address space\n", __func__); -#ifdef CONFIG_LOCAL_TIMERS if (mct_int_type == MCT_INT_PPI) { - int err; err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], exynos4_mct_tick_isr, "MCT", @@ -484,8 +502,16 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem mct_irqs[MCT_L0_IRQ], err); } - local_timer_register(&exynos4_mct_tick_ops); -#endif /* CONFIG_LOCAL_TIMERS */ + err = register_cpu_notifier(&exynos4_mct_cpu_nb); + if (err) + goto out_irq; + + /* Immediately configure the timer on the boot CPU */ + exynos4_local_timer_setup(&mevt->evt); + return; + +out_irq: + free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); } void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c new file mode 100644 index 000000000000..5eb2c35932b1 --- /dev/null +++ b/drivers/clocksource/moxart_timer.c @@ -0,0 +1,165 @@ +/* + * MOXA ART SoCs timer handling. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen <jonas.jensen@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqreturn.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/io.h> +#include <linux/clocksource.h> +#include <linux/bitops.h> + +#define TIMER1_BASE 0x00 +#define TIMER2_BASE 0x10 +#define TIMER3_BASE 0x20 + +#define REG_COUNT 0x0 /* writable */ +#define REG_LOAD 0x4 +#define REG_MATCH1 0x8 +#define REG_MATCH2 0xC + +#define TIMER_CR 0x30 +#define TIMER_INTR_STATE 0x34 +#define TIMER_INTR_MASK 0x38 + +/* + * TIMER_CR flags: + * + * TIMEREG_CR_*_CLOCK 0: PCLK, 1: EXT1CLK + * TIMEREG_CR_*_INT overflow interrupt enable bit + */ +#define TIMEREG_CR_1_ENABLE BIT(0) +#define TIMEREG_CR_1_CLOCK BIT(1) +#define TIMEREG_CR_1_INT BIT(2) +#define TIMEREG_CR_2_ENABLE BIT(3) +#define TIMEREG_CR_2_CLOCK BIT(4) +#define TIMEREG_CR_2_INT BIT(5) +#define TIMEREG_CR_3_ENABLE BIT(6) +#define TIMEREG_CR_3_CLOCK BIT(7) +#define TIMEREG_CR_3_INT BIT(8) +#define TIMEREG_CR_COUNT_UP BIT(9) + +#define TIMER1_ENABLE (TIMEREG_CR_2_ENABLE | TIMEREG_CR_1_ENABLE) +#define TIMER1_DISABLE (TIMEREG_CR_2_ENABLE) + +static void __iomem *base; +static unsigned int clock_count_per_tick; + +static void moxart_clkevt_mode(enum clock_event_mode mode, + struct clock_event_device *clk) +{ + switch (mode) { + case CLOCK_EVT_MODE_RESUME: + case CLOCK_EVT_MODE_ONESHOT: + writel(TIMER1_DISABLE, base + TIMER_CR); + writel(~0, base + TIMER1_BASE + REG_LOAD); + break; + case CLOCK_EVT_MODE_PERIODIC: + writel(clock_count_per_tick, base + TIMER1_BASE + REG_LOAD); + writel(TIMER1_ENABLE, base + TIMER_CR); + break; + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + default: + writel(TIMER1_DISABLE, base + TIMER_CR); + break; + } +} + +static int moxart_clkevt_next_event(unsigned long cycles, + struct clock_event_device *unused) +{ + u32 u; + + writel(TIMER1_DISABLE, base + TIMER_CR); + + u = readl(base + TIMER1_BASE + REG_COUNT) - cycles; + writel(u, base + TIMER1_BASE + REG_MATCH1); + + writel(TIMER1_ENABLE, base + TIMER_CR); + + return 0; +} + +static struct clock_event_device moxart_clockevent = { + .name = "moxart_timer", + .rating = 200, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_mode = moxart_clkevt_mode, + .set_next_event = moxart_clkevt_next_event, +}; + +static irqreturn_t moxart_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + evt->event_handler(evt); + return IRQ_HANDLED; +} + +static struct irqaction moxart_timer_irq = { + .name = "moxart-timer", + .flags = IRQF_TIMER, + .handler = moxart_timer_interrupt, + .dev_id = &moxart_clockevent, +}; + +static void __init moxart_timer_init(struct device_node *node) +{ + int ret, irq; + unsigned long pclk; + struct clk *clk; + + base = of_iomap(node, 0); + if (!base) + panic("%s: of_iomap failed\n", node->full_name); + + irq = irq_of_parse_and_map(node, 0); + if (irq <= 0) + panic("%s: irq_of_parse_and_map failed\n", node->full_name); + + ret = setup_irq(irq, &moxart_timer_irq); + if (ret) + panic("%s: setup_irq failed\n", node->full_name); + + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) + panic("%s: of_clk_get failed\n", node->full_name); + + pclk = clk_get_rate(clk); + + if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT, + "moxart_timer", pclk, 200, 32, + clocksource_mmio_readl_down)) + panic("%s: clocksource_mmio_init failed\n", node->full_name); + + clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ); + + writel(~0, base + TIMER2_BASE + REG_LOAD); + writel(TIMEREG_CR_2_ENABLE, base + TIMER_CR); + + moxart_clockevent.cpumask = cpumask_of(0); + moxart_clockevent.irq = irq; + + /* + * documentation is not publicly available: + * min_delta / max_delta obtained by trial-and-error, + * max_delta 0xfffffffe should be ok because count + * register size is u32 + */ + clockevents_config_and_register(&moxart_clockevent, pclk, + 0x4, 0xfffffffe); +} +CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init); diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c index 32950c3ed374..ab29476ee5f9 100644 --- a/drivers/clocksource/samsung_pwm_timer.c +++ b/drivers/clocksource/samsung_pwm_timer.c @@ -44,16 +44,28 @@ #define TCFG1_SHIFT(x) ((x) * 4) #define TCFG1_MUX_MASK 0xf +/* + * Each channel occupies 4 bits in TCON register, but there is a gap of 4 + * bits (one channel) after channel 0, so channels have different numbering + * when accessing TCON register. + * + * In addition, the location of autoreload bit for channel 4 (TCON channel 5) + * in its set of bits is 2 as opposed to 3 for other channels. + */ #define TCON_START(chan) (1 << (4 * (chan) + 0)) #define TCON_MANUALUPDATE(chan) (1 << (4 * (chan) + 1)) #define TCON_INVERT(chan) (1 << (4 * (chan) + 2)) -#define TCON_AUTORELOAD(chan) (1 << (4 * (chan) + 3)) +#define _TCON_AUTORELOAD(chan) (1 << (4 * (chan) + 3)) +#define _TCON_AUTORELOAD4(chan) (1 << (4 * (chan) + 2)) +#define TCON_AUTORELOAD(chan) \ + ((chan < 5) ? _TCON_AUTORELOAD(chan) : _TCON_AUTORELOAD4(chan)) DEFINE_SPINLOCK(samsung_pwm_lock); EXPORT_SYMBOL(samsung_pwm_lock); struct samsung_pwm_clocksource { void __iomem *base; + void __iomem *source_reg; unsigned int irq[SAMSUNG_PWM_NUM]; struct samsung_pwm_variant variant; @@ -195,17 +207,6 @@ static int samsung_set_next_event(unsigned long cycles, return 0; } -static void samsung_timer_resume(void) -{ - /* event timer restart */ - samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1); - samsung_time_start(pwm.event_id, true); - - /* source timer restart */ - samsung_time_setup(pwm.source_id, pwm.tcnt_max); - samsung_time_start(pwm.source_id, true); -} - static void samsung_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { @@ -222,20 +223,29 @@ static void samsung_set_mode(enum clock_event_mode mode, case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: - break; - case CLOCK_EVT_MODE_RESUME: - samsung_timer_resume(); break; } } +static void samsung_clockevent_resume(struct clock_event_device *cev) +{ + samsung_timer_set_prescale(pwm.event_id, pwm.tscaler_div); + samsung_timer_set_divisor(pwm.event_id, pwm.tdiv); + + if (pwm.variant.has_tint_cstat) { + u32 mask = (1 << pwm.event_id); + writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT); + } +} + static struct clock_event_device time_event_device = { .name = "samsung_event_timer", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .rating = 200, .set_next_event = samsung_set_next_event, .set_mode = samsung_set_mode, + .resume = samsung_clockevent_resume, }; static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id) @@ -286,23 +296,34 @@ static void __init samsung_clockevent_init(void) } } -static void __iomem *samsung_timer_reg(void) +static void samsung_clocksource_suspend(struct clocksource *cs) { - switch (pwm.source_id) { - case 0: - case 1: - case 2: - case 3: - return pwm.base + pwm.source_id * 0x0c + 0x14; - - case 4: - return pwm.base + 0x40; - - default: - BUG(); - } + samsung_time_stop(pwm.source_id); } +static void samsung_clocksource_resume(struct clocksource *cs) +{ + samsung_timer_set_prescale(pwm.source_id, pwm.tscaler_div); + samsung_timer_set_divisor(pwm.source_id, pwm.tdiv); + + samsung_time_setup(pwm.source_id, pwm.tcnt_max); + samsung_time_start(pwm.source_id, true); +} + +static cycle_t samsung_clocksource_read(struct clocksource *c) +{ + return ~readl_relaxed(pwm.source_reg); +} + +static struct clocksource samsung_clocksource = { + .name = "samsung_clocksource_timer", + .rating = 250, + .read = samsung_clocksource_read, + .suspend = samsung_clocksource_suspend, + .resume = samsung_clocksource_resume, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + /* * Override the global weak sched_clock symbol with this * local implementation which uses the clocksource to get some @@ -312,17 +333,11 @@ static void __iomem *samsung_timer_reg(void) */ static u32 notrace samsung_read_sched_clock(void) { - void __iomem *reg = samsung_timer_reg(); - - if (!reg) - return 0; - - return ~__raw_readl(reg); + return samsung_clocksource_read(NULL); } static void __init samsung_clocksource_init(void) { - void __iomem *reg = samsung_timer_reg(); unsigned long pclk; unsigned long clock_rate; int ret; @@ -337,12 +352,16 @@ static void __init samsung_clocksource_init(void) samsung_time_setup(pwm.source_id, pwm.tcnt_max); samsung_time_start(pwm.source_id, true); + if (pwm.source_id == 4) + pwm.source_reg = pwm.base + 0x40; + else + pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14; + setup_sched_clock(samsung_read_sched_clock, pwm.variant.bits, clock_rate); - ret = clocksource_mmio_init(reg, "samsung_clocksource_timer", - clock_rate, 250, pwm.variant.bits, - clocksource_mmio_readl_down); + samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits); + ret = clocksource_register_hz(&samsung_clocksource, clock_rate); if (ret) panic("samsung_clocksource_timer: can't register clocksource\n"); } @@ -404,7 +423,6 @@ void __init samsung_pwm_clocksource_init(void __iomem *base, static void __init samsung_pwm_alloc(struct device_node *np, const struct samsung_pwm_variant *variant) { - struct resource res; struct property *prop; const __be32 *cur; u32 val; @@ -423,17 +441,9 @@ static void __init samsung_pwm_alloc(struct device_node *np, pwm.variant.output_mask |= 1 << val; } - of_address_to_resource(np, 0, &res); - if (!request_mem_region(res.start, - resource_size(&res), "samsung-pwm")) { - pr_err("%s: failed to request IO mem region\n", __func__); - return; - } - - pwm.base = ioremap(res.start, resource_size(&res)); + pwm.base = of_iomap(np, 0); if (!pwm.base) { pr_err("%s: failed to map PWM registers\n", __func__); - release_mem_region(res.start, resource_size(&res)); return; } diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index d4674e78ef35..8ead0258740a 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -19,42 +19,83 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqreturn.h> +#include <linux/sched_clock.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #define TIMER_IRQ_EN_REG 0x00 -#define TIMER_IRQ_EN(val) (1 << val) +#define TIMER_IRQ_EN(val) BIT(val) #define TIMER_IRQ_ST_REG 0x04 #define TIMER_CTL_REG(val) (0x10 * val + 0x10) -#define TIMER_CTL_ENABLE (1 << 0) -#define TIMER_CTL_AUTORELOAD (1 << 1) -#define TIMER_CTL_ONESHOT (1 << 7) -#define TIMER_INTVAL_REG(val) (0x10 * val + 0x14) -#define TIMER_CNTVAL_REG(val) (0x10 * val + 0x18) - -#define TIMER_SCAL 16 +#define TIMER_CTL_ENABLE BIT(0) +#define TIMER_CTL_RELOAD BIT(1) +#define TIMER_CTL_CLK_SRC(val) (((val) & 0x3) << 2) +#define TIMER_CTL_CLK_SRC_OSC24M (1) +#define TIMER_CTL_CLK_PRES(val) (((val) & 0x7) << 4) +#define TIMER_CTL_ONESHOT BIT(7) +#define TIMER_INTVAL_REG(val) (0x10 * (val) + 0x14) +#define TIMER_CNTVAL_REG(val) (0x10 * (val) + 0x18) static void __iomem *timer_base; +static u32 ticks_per_jiffy; + +/* + * When we disable a timer, we need to wait at least for 2 cycles of + * the timer source clock. We will use for that the clocksource timer + * that is already setup and runs at the same frequency than the other + * timers, and we never will be disabled. + */ +static void sun4i_clkevt_sync(void) +{ + u32 old = readl(timer_base + TIMER_CNTVAL_REG(1)); + + while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < 3) + cpu_relax(); +} + +static void sun4i_clkevt_time_stop(u8 timer) +{ + u32 val = readl(timer_base + TIMER_CTL_REG(timer)); + writel(val & ~TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(timer)); + sun4i_clkevt_sync(); +} + +static void sun4i_clkevt_time_setup(u8 timer, unsigned long delay) +{ + writel(delay, timer_base + TIMER_INTVAL_REG(timer)); +} + +static void sun4i_clkevt_time_start(u8 timer, bool periodic) +{ + u32 val = readl(timer_base + TIMER_CTL_REG(timer)); + + if (periodic) + val &= ~TIMER_CTL_ONESHOT; + else + val |= TIMER_CTL_ONESHOT; + + writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, + timer_base + TIMER_CTL_REG(timer)); +} static void sun4i_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *clk) { - u32 u = readl(timer_base + TIMER_CTL_REG(0)); - switch (mode) { case CLOCK_EVT_MODE_PERIODIC: - u &= ~(TIMER_CTL_ONESHOT); - writel(u | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(0)); + sun4i_clkevt_time_stop(0); + sun4i_clkevt_time_setup(0, ticks_per_jiffy); + sun4i_clkevt_time_start(0, true); break; - case CLOCK_EVT_MODE_ONESHOT: - writel(u | TIMER_CTL_ONESHOT, timer_base + TIMER_CTL_REG(0)); + sun4i_clkevt_time_stop(0); + sun4i_clkevt_time_start(0, false); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: default: - writel(u & ~(TIMER_CTL_ENABLE), timer_base + TIMER_CTL_REG(0)); + sun4i_clkevt_time_stop(0); break; } } @@ -62,10 +103,9 @@ static void sun4i_clkevt_mode(enum clock_event_mode mode, static int sun4i_clkevt_next_event(unsigned long evt, struct clock_event_device *unused) { - u32 u = readl(timer_base + TIMER_CTL_REG(0)); - writel(evt, timer_base + TIMER_CNTVAL_REG(0)); - writel(u | TIMER_CTL_ENABLE | TIMER_CTL_AUTORELOAD, - timer_base + TIMER_CTL_REG(0)); + sun4i_clkevt_time_stop(0); + sun4i_clkevt_time_setup(0, evt); + sun4i_clkevt_time_start(0, false); return 0; } @@ -96,6 +136,11 @@ static struct irqaction sun4i_timer_irq = { .dev_id = &sun4i_clockevent, }; +static u32 sun4i_timer_sched_read(void) +{ + return ~readl(timer_base + TIMER_CNTVAL_REG(1)); +} + static void __init sun4i_timer_init(struct device_node *node) { unsigned long rate = 0; @@ -114,22 +159,23 @@ static void __init sun4i_timer_init(struct device_node *node) clk = of_clk_get(node, 0); if (IS_ERR(clk)) panic("Can't get timer clock"); + clk_prepare_enable(clk); rate = clk_get_rate(clk); - writel(rate / (TIMER_SCAL * HZ), - timer_base + TIMER_INTVAL_REG(0)); + writel(~0, timer_base + TIMER_INTVAL_REG(1)); + writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD | + TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), + timer_base + TIMER_CTL_REG(1)); + + setup_sched_clock(sun4i_timer_sched_read, 32, rate); + clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, + rate, 300, 32, clocksource_mmio_readl_down); - /* set clock source to HOSC, 16 pre-division */ - val = readl(timer_base + TIMER_CTL_REG(0)); - val &= ~(0x07 << 4); - val &= ~(0x03 << 2); - val |= (4 << 4) | (1 << 2); - writel(val, timer_base + TIMER_CTL_REG(0)); + ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); - /* set mode to auto reload */ - val = readl(timer_base + TIMER_CTL_REG(0)); - writel(val | TIMER_CTL_AUTORELOAD, timer_base + TIMER_CTL_REG(0)); + writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), + timer_base + TIMER_CTL_REG(0)); ret = setup_irq(irq, &sun4i_timer_irq); if (ret) @@ -141,8 +187,8 @@ static void __init sun4i_timer_init(struct device_node *node) sun4i_clockevent.cpumask = cpumask_of(0); - clockevents_config_and_register(&sun4i_clockevent, rate / TIMER_SCAL, - 0x1, 0xff); + clockevents_config_and_register(&sun4i_clockevent, rate, 0x1, + 0xffffffff); } CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer", sun4i_timer_init); diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 1b04b7e1d39b..847cab6f6e31 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -19,6 +19,7 @@ #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/clk.h> +#include <linux/cpu.h> #include <linux/timer.h> #include <linux/clockchips.h> #include <linux/interrupt.h> @@ -28,9 +29,9 @@ #include <linux/irq.h> #include <linux/module.h> #include <linux/sched_clock.h> - -#include <asm/localtimer.h> #include <linux/percpu.h> +#include <linux/time-armada-370-xp.h> + /* * Timer block registers. */ @@ -69,7 +70,7 @@ static bool timer25Mhz = true; */ static u32 ticks_per_jiffy; -static struct clock_event_device __percpu **percpu_armada_370_xp_evt; +static struct clock_event_device __percpu *armada_370_xp_evt; static u32 notrace armada_370_xp_read_sched_clock(void) { @@ -142,21 +143,14 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode, } } -static struct clock_event_device armada_370_xp_clkevt = { - .name = "armada_370_xp_per_cpu_tick", - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .shift = 32, - .rating = 300, - .set_next_event = armada_370_xp_clkevt_next_event, - .set_mode = armada_370_xp_clkevt_mode, -}; +static int armada_370_xp_clkevt_irq; static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) { /* * ACK timer interrupt and call event handler. */ - struct clock_event_device *evt = *(struct clock_event_device **)dev_id; + struct clock_event_device *evt = dev_id; writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); evt->event_handler(evt); @@ -172,42 +166,55 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt) u32 u; int cpu = smp_processor_id(); - /* Use existing clock_event for cpu 0 */ - if (!smp_processor_id()) - return 0; - u = readl(local_base + TIMER_CTRL_OFF); if (timer25Mhz) writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); else writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); - evt->name = armada_370_xp_clkevt.name; - evt->irq = armada_370_xp_clkevt.irq; - evt->features = armada_370_xp_clkevt.features; - evt->shift = armada_370_xp_clkevt.shift; - evt->rating = armada_370_xp_clkevt.rating, + evt->name = "armada_370_xp_per_cpu_tick", + evt->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC; + evt->shift = 32, + evt->rating = 300, evt->set_next_event = armada_370_xp_clkevt_next_event, evt->set_mode = armada_370_xp_clkevt_mode, + evt->irq = armada_370_xp_clkevt_irq; evt->cpumask = cpumask_of(cpu); - *__this_cpu_ptr(percpu_armada_370_xp_evt) = evt; - clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); enable_percpu_irq(evt->irq, 0); return 0; } -static void armada_370_xp_timer_stop(struct clock_event_device *evt) +static void armada_370_xp_timer_stop(struct clock_event_device *evt) { evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); disable_percpu_irq(evt->irq); } -static struct local_timer_ops armada_370_xp_local_timer_ops = { - .setup = armada_370_xp_timer_setup, - .stop = armada_370_xp_timer_stop, +static int armada_370_xp_timer_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + /* + * Grab cpu pointer in each case to avoid spurious + * preemptible warnings + */ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); + break; + case CPU_DYING: + armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt)); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block armada_370_xp_timer_cpu_nb = { + .notifier_call = armada_370_xp_timer_cpu_notify, }; void __init armada_370_xp_timer_init(void) @@ -223,9 +230,6 @@ void __init armada_370_xp_timer_init(void) if (of_find_property(np, "marvell,timer-25Mhz", NULL)) { /* The fixed 25MHz timer is available so let's use it */ - u = readl(local_base + TIMER_CTRL_OFF); - writel(u | TIMER0_25MHZ, - local_base + TIMER_CTRL_OFF); u = readl(timer_base + TIMER_CTRL_OFF); writel(u | TIMER0_25MHZ, timer_base + TIMER_CTRL_OFF); @@ -235,9 +239,6 @@ void __init armada_370_xp_timer_init(void) struct clk *clk = of_clk_get(np, 0); WARN_ON(IS_ERR(clk)); rate = clk_get_rate(clk); - u = readl(local_base + TIMER_CTRL_OFF); - writel(u & ~(TIMER0_25MHZ), - local_base + TIMER_CTRL_OFF); u = readl(timer_base + TIMER_CTRL_OFF); writel(u & ~(TIMER0_25MHZ), @@ -251,7 +252,7 @@ void __init armada_370_xp_timer_init(void) * We use timer 0 as clocksource, and private(local) timer 0 * for clockevents */ - armada_370_xp_clkevt.irq = irq_of_parse_and_map(np, 4); + armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; @@ -276,26 +277,19 @@ void __init armada_370_xp_timer_init(void) "armada_370_xp_clocksource", timer_clk, 300, 32, clocksource_mmio_readl_down); - /* Register the clockevent on the private timer of CPU 0 */ - armada_370_xp_clkevt.cpumask = cpumask_of(0); - clockevents_config_and_register(&armada_370_xp_clkevt, - timer_clk, 1, 0xfffffffe); + register_cpu_notifier(&armada_370_xp_timer_cpu_nb); - percpu_armada_370_xp_evt = alloc_percpu(struct clock_event_device *); + armada_370_xp_evt = alloc_percpu(struct clock_event_device); /* * Setup clockevent timer (interrupt-driven). */ - *__this_cpu_ptr(percpu_armada_370_xp_evt) = &armada_370_xp_clkevt; - res = request_percpu_irq(armada_370_xp_clkevt.irq, + res = request_percpu_irq(armada_370_xp_clkevt_irq, armada_370_xp_timer_interrupt, - armada_370_xp_clkevt.name, - percpu_armada_370_xp_evt); - if (!res) { - enable_percpu_irq(armada_370_xp_clkevt.irq, 0); -#ifdef CONFIG_LOCAL_TIMERS - local_timer_register(&armada_370_xp_local_timer_ops); -#endif - } + "armada_370_xp_per_cpu_tick", + armada_370_xp_evt); + /* Immediately configure the timer on the boot CPU */ + if (!res) + armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); } diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c index ecbeb6810215..9c7f018a67ca 100644 --- a/drivers/clocksource/time-orion.c +++ b/drivers/clocksource/time-orion.c @@ -19,7 +19,7 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/spinlock.h> -#include <asm/sched_clock.h> +#include <linux/sched_clock.h> #define TIMER_CTRL 0x00 #define TIMER0_EN BIT(0) diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c index 62876baa3ab9..09a17d9a6594 100644 --- a/drivers/clocksource/timer-marco.c +++ b/drivers/clocksource/timer-marco.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/clockchips.h> #include <linux/clocksource.h> +#include <linux/cpu.h> #include <linux/bitops.h> #include <linux/irq.h> #include <linux/clk.h> @@ -18,7 +19,6 @@ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/sched_clock.h> -#include <asm/localtimer.h> #include <asm/mach/time.h> #define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000 @@ -151,13 +151,7 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs) BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); } -static struct clock_event_device sirfsoc_clockevent = { - .name = "sirfsoc_clockevent", - .rating = 200, - .features = CLOCK_EVT_FEAT_ONESHOT, - .set_mode = sirfsoc_timer_set_mode, - .set_next_event = sirfsoc_timer_set_next_event, -}; +static struct clock_event_device __percpu *sirfsoc_clockevent; static struct clocksource sirfsoc_clocksource = { .name = "sirfsoc_clocksource", @@ -173,11 +167,8 @@ static struct irqaction sirfsoc_timer_irq = { .name = "sirfsoc_timer0", .flags = IRQF_TIMER | IRQF_NOBALANCING, .handler = sirfsoc_timer_interrupt, - .dev_id = &sirfsoc_clockevent, }; -#ifdef CONFIG_LOCAL_TIMERS - static struct irqaction sirfsoc_timer1_irq = { .name = "sirfsoc_timer1", .flags = IRQF_TIMER | IRQF_NOBALANCING, @@ -186,24 +177,28 @@ static struct irqaction sirfsoc_timer1_irq = { static int sirfsoc_local_timer_setup(struct clock_event_device *ce) { - /* Use existing clock_event for cpu 0 */ - if (!smp_processor_id()) - return 0; + int cpu = smp_processor_id(); + struct irqaction *action; + + if (cpu == 0) + action = &sirfsoc_timer_irq; + else + action = &sirfsoc_timer1_irq; - ce->irq = sirfsoc_timer1_irq.irq; + ce->irq = action->irq; ce->name = "local_timer"; - ce->features = sirfsoc_clockevent.features; - ce->rating = sirfsoc_clockevent.rating; + ce->features = CLOCK_EVT_FEAT_ONESHOT; + ce->rating = 200; ce->set_mode = sirfsoc_timer_set_mode; ce->set_next_event = sirfsoc_timer_set_next_event; - ce->shift = sirfsoc_clockevent.shift; - ce->mult = sirfsoc_clockevent.mult; - ce->max_delta_ns = sirfsoc_clockevent.max_delta_ns; - ce->min_delta_ns = sirfsoc_clockevent.min_delta_ns; + clockevents_calc_mult_shift(ce, CLOCK_TICK_RATE, 60); + ce->max_delta_ns = clockevent_delta2ns(-2, ce); + ce->min_delta_ns = clockevent_delta2ns(2, ce); + ce->cpumask = cpumask_of(cpu); - sirfsoc_timer1_irq.dev_id = ce; - BUG_ON(setup_irq(ce->irq, &sirfsoc_timer1_irq)); - irq_set_affinity(sirfsoc_timer1_irq.irq, cpumask_of(1)); + action->dev_id = ce; + BUG_ON(setup_irq(ce->irq, action)); + irq_set_affinity(action->irq, cpumask_of(cpu)); clockevents_register_device(ce); return 0; @@ -211,31 +206,48 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce) static void sirfsoc_local_timer_stop(struct clock_event_device *ce) { + int cpu = smp_processor_id(); + sirfsoc_timer_count_disable(1); - remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); + if (cpu == 0) + remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); + else + remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); } -static struct local_timer_ops sirfsoc_local_timer_ops = { - .setup = sirfsoc_local_timer_setup, - .stop = sirfsoc_local_timer_stop, +static int sirfsoc_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + /* + * Grab cpu pointer in each case to avoid spurious + * preemptible warnings + */ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); + break; + case CPU_DYING: + sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent)); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block sirfsoc_cpu_nb = { + .notifier_call = sirfsoc_cpu_notify, }; -#endif /* CONFIG_LOCAL_TIMERS */ static void __init sirfsoc_clockevent_init(void) { - clockevents_calc_mult_shift(&sirfsoc_clockevent, CLOCK_TICK_RATE, 60); - - sirfsoc_clockevent.max_delta_ns = - clockevent_delta2ns(-2, &sirfsoc_clockevent); - sirfsoc_clockevent.min_delta_ns = - clockevent_delta2ns(2, &sirfsoc_clockevent); - - sirfsoc_clockevent.cpumask = cpumask_of(0); - clockevents_register_device(&sirfsoc_clockevent); -#ifdef CONFIG_LOCAL_TIMERS - local_timer_register(&sirfsoc_local_timer_ops); -#endif + sirfsoc_clockevent = alloc_percpu(struct clock_event_device); + BUG_ON(!sirfsoc_clockevent); + + BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); + + /* Immediately configure the timer on the boot CPU */ + sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); } /* initialize the kernel jiffy timer source */ @@ -273,8 +285,6 @@ static void __init sirfsoc_marco_timer_init(void) BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); - BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq)); - sirfsoc_clockevent_init(); } @@ -288,11 +298,9 @@ static void __init sirfsoc_of_timer_init(struct device_node *np) if (!sirfsoc_timer_irq.irq) panic("No irq passed for timer0 via DT\n"); -#ifdef CONFIG_LOCAL_TIMERS sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); if (!sirfsoc_timer1_irq.irq) panic("No irq passed for timer1 via DT\n"); -#endif sirfsoc_marco_timer_init(); } |