summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnup Patel <apatel@ventanamicro.com>2023-07-10 18:49:01 +0530
committerPalmer Dabbelt <palmer@rivosinc.com>2023-10-31 19:15:48 -0700
commit5d98446f03c622cb917e15a5561601587c64aab2 (patch)
tree2fe485cdec9a3858c756d7fa704749342a993b64
parenta9429d5f99bc25885ba5d4c2c58a25467f5d741b (diff)
downloadlinux-5d98446f03c622cb917e15a5561601587c64aab2.tar.gz
linux-5d98446f03c622cb917e15a5561601587c64aab2.tar.bz2
linux-5d98446f03c622cb917e15a5561601587c64aab2.zip
clocksource: timer-riscv: Don't enable/disable timer interrupt
Currently, we enable/disable timer interrupt at runtime to start/stop timer events. This makes timer interrupt state go out-of-sync with the Linux interrupt subsystem. To address the above issue, we can stop a per-HART timer interrupt by setting U64_MAX in timecmp CSR (or sbi_set_timer()) at the time of handling timer interrupt. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Conor Dooley <conor.dooley@microchip.com> Acked-by: Palmer Dabbelt <palmer@rivosinc.com> Link: https://lore.kernel.org/r/20230710131902.1459180-2-apatel@ventanamicro.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
-rw-r--r--drivers/clocksource/timer-riscv.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index da3071b387eb..f2ea2b3d2d43 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -22,6 +22,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
+#include <linux/limits.h>
#include <clocksource/timer-riscv.h>
#include <asm/smp.h>
#include <asm/hwcap.h>
@@ -31,12 +32,22 @@
static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
static bool riscv_timer_cannot_wake_cpu;
+static void riscv_clock_event_stop(void)
+{
+ if (static_branch_likely(&riscv_sstc_available)) {
+ csr_write(CSR_STIMECMP, ULONG_MAX);
+ if (IS_ENABLED(CONFIG_32BIT))
+ csr_write(CSR_STIMECMPH, ULONG_MAX);
+ } else {
+ sbi_set_timer(U64_MAX);
+ }
+}
+
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
u64 next_tval = get_cycles64() + delta;
- csr_set(CSR_IE, IE_TIE);
if (static_branch_likely(&riscv_sstc_available)) {
#if defined(CONFIG_32BIT)
csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
@@ -119,7 +130,7 @@ static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
- csr_clear(CSR_IE, IE_TIE);
+ riscv_clock_event_stop();
evdev->event_handler(evdev);
return IRQ_HANDLED;