1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2020 Western Digital Corporation or its affiliates.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a
6*4882a593Smuzhiyun * CLINT MMIO timer device.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #define pr_fmt(fmt) "clint: " fmt
10*4882a593Smuzhiyun #include <linux/bitops.h>
11*4882a593Smuzhiyun #include <linux/clocksource.h>
12*4882a593Smuzhiyun #include <linux/clockchips.h>
13*4882a593Smuzhiyun #include <linux/cpu.h>
14*4882a593Smuzhiyun #include <linux/delay.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/of_address.h>
17*4882a593Smuzhiyun #include <linux/sched_clock.h>
18*4882a593Smuzhiyun #include <linux/io-64-nonatomic-lo-hi.h>
19*4882a593Smuzhiyun #include <linux/interrupt.h>
20*4882a593Smuzhiyun #include <linux/of_irq.h>
21*4882a593Smuzhiyun #include <linux/smp.h>
22*4882a593Smuzhiyun #include <linux/timex.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #ifndef CONFIG_RISCV_M_MODE
25*4882a593Smuzhiyun #include <asm/clint.h>
26*4882a593Smuzhiyun #endif
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define CLINT_IPI_OFF 0
29*4882a593Smuzhiyun #define CLINT_TIMER_CMP_OFF 0x4000
30*4882a593Smuzhiyun #define CLINT_TIMER_VAL_OFF 0xbff8
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* CLINT manages IPI and Timer for RISC-V M-mode */
33*4882a593Smuzhiyun static u32 __iomem *clint_ipi_base;
34*4882a593Smuzhiyun static u64 __iomem *clint_timer_cmp;
35*4882a593Smuzhiyun static u64 __iomem *clint_timer_val;
36*4882a593Smuzhiyun static unsigned long clint_timer_freq;
37*4882a593Smuzhiyun static unsigned int clint_timer_irq;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #ifdef CONFIG_RISCV_M_MODE
40*4882a593Smuzhiyun u64 __iomem *clint_time_val;
41*4882a593Smuzhiyun EXPORT_SYMBOL(clint_time_val);
42*4882a593Smuzhiyun #endif
43*4882a593Smuzhiyun
clint_send_ipi(const struct cpumask * target)44*4882a593Smuzhiyun static void clint_send_ipi(const struct cpumask *target)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun unsigned int cpu;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun for_each_cpu(cpu, target)
49*4882a593Smuzhiyun writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
clint_clear_ipi(void)52*4882a593Smuzhiyun static void clint_clear_ipi(void)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun static struct riscv_ipi_ops clint_ipi_ops = {
58*4882a593Smuzhiyun .ipi_inject = clint_send_ipi,
59*4882a593Smuzhiyun .ipi_clear = clint_clear_ipi,
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #ifdef CONFIG_64BIT
63*4882a593Smuzhiyun #define clint_get_cycles() readq_relaxed(clint_timer_val)
64*4882a593Smuzhiyun #else
65*4882a593Smuzhiyun #define clint_get_cycles() readl_relaxed(clint_timer_val)
66*4882a593Smuzhiyun #define clint_get_cycles_hi() readl_relaxed(((u32 *)clint_timer_val) + 1)
67*4882a593Smuzhiyun #endif
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #ifdef CONFIG_64BIT
clint_get_cycles64(void)70*4882a593Smuzhiyun static u64 notrace clint_get_cycles64(void)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun return clint_get_cycles();
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun #else /* CONFIG_64BIT */
clint_get_cycles64(void)75*4882a593Smuzhiyun static u64 notrace clint_get_cycles64(void)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun u32 hi, lo;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun do {
80*4882a593Smuzhiyun hi = clint_get_cycles_hi();
81*4882a593Smuzhiyun lo = clint_get_cycles();
82*4882a593Smuzhiyun } while (hi != clint_get_cycles_hi());
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun return ((u64)hi << 32) | lo;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
87*4882a593Smuzhiyun
clint_rdtime(struct clocksource * cs)88*4882a593Smuzhiyun static u64 clint_rdtime(struct clocksource *cs)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun return clint_get_cycles64();
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun static struct clocksource clint_clocksource = {
94*4882a593Smuzhiyun .name = "clint_clocksource",
95*4882a593Smuzhiyun .rating = 300,
96*4882a593Smuzhiyun .mask = CLOCKSOURCE_MASK(64),
97*4882a593Smuzhiyun .flags = CLOCK_SOURCE_IS_CONTINUOUS,
98*4882a593Smuzhiyun .read = clint_rdtime,
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun
clint_clock_next_event(unsigned long delta,struct clock_event_device * ce)101*4882a593Smuzhiyun static int clint_clock_next_event(unsigned long delta,
102*4882a593Smuzhiyun struct clock_event_device *ce)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun void __iomem *r = clint_timer_cmp +
105*4882a593Smuzhiyun cpuid_to_hartid_map(smp_processor_id());
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun csr_set(CSR_IE, IE_TIE);
108*4882a593Smuzhiyun writeq_relaxed(clint_get_cycles64() + delta, r);
109*4882a593Smuzhiyun return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = {
113*4882a593Smuzhiyun .name = "clint_clockevent",
114*4882a593Smuzhiyun .features = CLOCK_EVT_FEAT_ONESHOT,
115*4882a593Smuzhiyun .rating = 100,
116*4882a593Smuzhiyun .set_next_event = clint_clock_next_event,
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun
clint_timer_starting_cpu(unsigned int cpu)119*4882a593Smuzhiyun static int clint_timer_starting_cpu(unsigned int cpu)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun ce->cpumask = cpumask_of(cpu);
124*4882a593Smuzhiyun clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun enable_percpu_irq(clint_timer_irq,
127*4882a593Smuzhiyun irq_get_trigger_type(clint_timer_irq));
128*4882a593Smuzhiyun return 0;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
clint_timer_dying_cpu(unsigned int cpu)131*4882a593Smuzhiyun static int clint_timer_dying_cpu(unsigned int cpu)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun disable_percpu_irq(clint_timer_irq);
134*4882a593Smuzhiyun return 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
clint_timer_interrupt(int irq,void * dev_id)137*4882a593Smuzhiyun static irqreturn_t clint_timer_interrupt(int irq, void *dev_id)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun csr_clear(CSR_IE, IE_TIE);
142*4882a593Smuzhiyun evdev->event_handler(evdev);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun return IRQ_HANDLED;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
clint_timer_init_dt(struct device_node * np)147*4882a593Smuzhiyun static int __init clint_timer_init_dt(struct device_node *np)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun int rc;
150*4882a593Smuzhiyun u32 i, nr_irqs;
151*4882a593Smuzhiyun void __iomem *base;
152*4882a593Smuzhiyun struct of_phandle_args oirq;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * Ensure that CLINT device interrupts are either RV_IRQ_TIMER or
156*4882a593Smuzhiyun * RV_IRQ_SOFT. If it's anything else then we ignore the device.
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun nr_irqs = of_irq_count(np);
159*4882a593Smuzhiyun for (i = 0; i < nr_irqs; i++) {
160*4882a593Smuzhiyun if (of_irq_parse_one(np, i, &oirq)) {
161*4882a593Smuzhiyun pr_err("%pOFP: failed to parse irq %d.\n", np, i);
162*4882a593Smuzhiyun continue;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if ((oirq.args_count != 1) ||
166*4882a593Smuzhiyun (oirq.args[0] != RV_IRQ_TIMER &&
167*4882a593Smuzhiyun oirq.args[0] != RV_IRQ_SOFT)) {
168*4882a593Smuzhiyun pr_err("%pOFP: invalid irq %d (hwirq %d)\n",
169*4882a593Smuzhiyun np, i, oirq.args[0]);
170*4882a593Smuzhiyun return -ENODEV;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /* Find parent irq domain and map timer irq */
174*4882a593Smuzhiyun if (!clint_timer_irq &&
175*4882a593Smuzhiyun oirq.args[0] == RV_IRQ_TIMER &&
176*4882a593Smuzhiyun irq_find_host(oirq.np))
177*4882a593Smuzhiyun clint_timer_irq = irq_of_parse_and_map(np, i);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* If CLINT timer irq not found then fail */
181*4882a593Smuzhiyun if (!clint_timer_irq) {
182*4882a593Smuzhiyun pr_err("%pOFP: timer irq not found\n", np);
183*4882a593Smuzhiyun return -ENODEV;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun base = of_iomap(np, 0);
187*4882a593Smuzhiyun if (!base) {
188*4882a593Smuzhiyun pr_err("%pOFP: could not map registers\n", np);
189*4882a593Smuzhiyun return -ENODEV;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun clint_ipi_base = base + CLINT_IPI_OFF;
193*4882a593Smuzhiyun clint_timer_cmp = base + CLINT_TIMER_CMP_OFF;
194*4882a593Smuzhiyun clint_timer_val = base + CLINT_TIMER_VAL_OFF;
195*4882a593Smuzhiyun clint_timer_freq = riscv_timebase;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun #ifdef CONFIG_RISCV_M_MODE
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * Yes, that's an odd naming scheme. time_val is public, but hopefully
200*4882a593Smuzhiyun * will die in favor of something cleaner.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun clint_time_val = clint_timer_val;
203*4882a593Smuzhiyun #endif
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq);
208*4882a593Smuzhiyun if (rc) {
209*4882a593Smuzhiyun pr_err("%pOFP: clocksource register failed [%d]\n", np, rc);
210*4882a593Smuzhiyun goto fail_iounmap;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun sched_clock_register(clint_get_cycles64, 64, clint_timer_freq);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun rc = request_percpu_irq(clint_timer_irq, clint_timer_interrupt,
216*4882a593Smuzhiyun "clint-timer", &clint_clock_event);
217*4882a593Smuzhiyun if (rc) {
218*4882a593Smuzhiyun pr_err("registering percpu irq failed [%d]\n", rc);
219*4882a593Smuzhiyun goto fail_iounmap;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING,
223*4882a593Smuzhiyun "clockevents/clint/timer:starting",
224*4882a593Smuzhiyun clint_timer_starting_cpu,
225*4882a593Smuzhiyun clint_timer_dying_cpu);
226*4882a593Smuzhiyun if (rc) {
227*4882a593Smuzhiyun pr_err("%pOFP: cpuhp setup state failed [%d]\n", np, rc);
228*4882a593Smuzhiyun goto fail_free_irq;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun riscv_set_ipi_ops(&clint_ipi_ops);
232*4882a593Smuzhiyun clint_clear_ipi();
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun return 0;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun fail_free_irq:
237*4882a593Smuzhiyun free_irq(clint_timer_irq, &clint_clock_event);
238*4882a593Smuzhiyun fail_iounmap:
239*4882a593Smuzhiyun iounmap(base);
240*4882a593Smuzhiyun return rc;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt);
244*4882a593Smuzhiyun TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt);
245