1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * SuperH Timer Support - TMU
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2009 Magnus Damm
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/clk.h>
9*4882a593Smuzhiyun #include <linux/clockchips.h>
10*4882a593Smuzhiyun #include <linux/clocksource.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/err.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/ioport.h>
17*4882a593Smuzhiyun #include <linux/irq.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/of.h>
20*4882a593Smuzhiyun #include <linux/platform_device.h>
21*4882a593Smuzhiyun #include <linux/pm_domain.h>
22*4882a593Smuzhiyun #include <linux/pm_runtime.h>
23*4882a593Smuzhiyun #include <linux/sh_timer.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/spinlock.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #ifdef CONFIG_SUPERH
28*4882a593Smuzhiyun #include <asm/platform_early.h>
29*4882a593Smuzhiyun #endif
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun enum sh_tmu_model {
32*4882a593Smuzhiyun SH_TMU,
33*4882a593Smuzhiyun SH_TMU_SH3,
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct sh_tmu_device;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun struct sh_tmu_channel {
39*4882a593Smuzhiyun struct sh_tmu_device *tmu;
40*4882a593Smuzhiyun unsigned int index;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun void __iomem *base;
43*4882a593Smuzhiyun int irq;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun unsigned long periodic;
46*4882a593Smuzhiyun struct clock_event_device ced;
47*4882a593Smuzhiyun struct clocksource cs;
48*4882a593Smuzhiyun bool cs_enabled;
49*4882a593Smuzhiyun unsigned int enable_count;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun struct sh_tmu_device {
53*4882a593Smuzhiyun struct platform_device *pdev;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun void __iomem *mapbase;
56*4882a593Smuzhiyun struct clk *clk;
57*4882a593Smuzhiyun unsigned long rate;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun enum sh_tmu_model model;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun raw_spinlock_t lock; /* Protect the shared start/stop register */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct sh_tmu_channel *channels;
64*4882a593Smuzhiyun unsigned int num_channels;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun bool has_clockevent;
67*4882a593Smuzhiyun bool has_clocksource;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define TSTR -1 /* shared register */
71*4882a593Smuzhiyun #define TCOR 0 /* channel register */
72*4882a593Smuzhiyun #define TCNT 1 /* channel register */
73*4882a593Smuzhiyun #define TCR 2 /* channel register */
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define TCR_UNF (1 << 8)
76*4882a593Smuzhiyun #define TCR_UNIE (1 << 5)
77*4882a593Smuzhiyun #define TCR_TPSC_CLK4 (0 << 0)
78*4882a593Smuzhiyun #define TCR_TPSC_CLK16 (1 << 0)
79*4882a593Smuzhiyun #define TCR_TPSC_CLK64 (2 << 0)
80*4882a593Smuzhiyun #define TCR_TPSC_CLK256 (3 << 0)
81*4882a593Smuzhiyun #define TCR_TPSC_CLK1024 (4 << 0)
82*4882a593Smuzhiyun #define TCR_TPSC_MASK (7 << 0)
83*4882a593Smuzhiyun
sh_tmu_read(struct sh_tmu_channel * ch,int reg_nr)84*4882a593Smuzhiyun static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun unsigned long offs;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (reg_nr == TSTR) {
89*4882a593Smuzhiyun switch (ch->tmu->model) {
90*4882a593Smuzhiyun case SH_TMU_SH3:
91*4882a593Smuzhiyun return ioread8(ch->tmu->mapbase + 2);
92*4882a593Smuzhiyun case SH_TMU:
93*4882a593Smuzhiyun return ioread8(ch->tmu->mapbase + 4);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun offs = reg_nr << 2;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun if (reg_nr == TCR)
100*4882a593Smuzhiyun return ioread16(ch->base + offs);
101*4882a593Smuzhiyun else
102*4882a593Smuzhiyun return ioread32(ch->base + offs);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
sh_tmu_write(struct sh_tmu_channel * ch,int reg_nr,unsigned long value)105*4882a593Smuzhiyun static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
106*4882a593Smuzhiyun unsigned long value)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun unsigned long offs;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (reg_nr == TSTR) {
111*4882a593Smuzhiyun switch (ch->tmu->model) {
112*4882a593Smuzhiyun case SH_TMU_SH3:
113*4882a593Smuzhiyun return iowrite8(value, ch->tmu->mapbase + 2);
114*4882a593Smuzhiyun case SH_TMU:
115*4882a593Smuzhiyun return iowrite8(value, ch->tmu->mapbase + 4);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun offs = reg_nr << 2;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (reg_nr == TCR)
122*4882a593Smuzhiyun iowrite16(value, ch->base + offs);
123*4882a593Smuzhiyun else
124*4882a593Smuzhiyun iowrite32(value, ch->base + offs);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
sh_tmu_start_stop_ch(struct sh_tmu_channel * ch,int start)127*4882a593Smuzhiyun static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun unsigned long flags, value;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* start stop register shared by multiple timer channels */
132*4882a593Smuzhiyun raw_spin_lock_irqsave(&ch->tmu->lock, flags);
133*4882a593Smuzhiyun value = sh_tmu_read(ch, TSTR);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (start)
136*4882a593Smuzhiyun value |= 1 << ch->index;
137*4882a593Smuzhiyun else
138*4882a593Smuzhiyun value &= ~(1 << ch->index);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun sh_tmu_write(ch, TSTR, value);
141*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
__sh_tmu_enable(struct sh_tmu_channel * ch)144*4882a593Smuzhiyun static int __sh_tmu_enable(struct sh_tmu_channel *ch)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun int ret;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /* enable clock */
149*4882a593Smuzhiyun ret = clk_enable(ch->tmu->clk);
150*4882a593Smuzhiyun if (ret) {
151*4882a593Smuzhiyun dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
152*4882a593Smuzhiyun ch->index);
153*4882a593Smuzhiyun return ret;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* make sure channel is disabled */
157*4882a593Smuzhiyun sh_tmu_start_stop_ch(ch, 0);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* maximum timeout */
160*4882a593Smuzhiyun sh_tmu_write(ch, TCOR, 0xffffffff);
161*4882a593Smuzhiyun sh_tmu_write(ch, TCNT, 0xffffffff);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* configure channel to parent clock / 4, irq off */
164*4882a593Smuzhiyun sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* enable channel */
167*4882a593Smuzhiyun sh_tmu_start_stop_ch(ch, 1);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
sh_tmu_enable(struct sh_tmu_channel * ch)172*4882a593Smuzhiyun static int sh_tmu_enable(struct sh_tmu_channel *ch)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun if (ch->enable_count++ > 0)
175*4882a593Smuzhiyun return 0;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun pm_runtime_get_sync(&ch->tmu->pdev->dev);
178*4882a593Smuzhiyun dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun return __sh_tmu_enable(ch);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
__sh_tmu_disable(struct sh_tmu_channel * ch)183*4882a593Smuzhiyun static void __sh_tmu_disable(struct sh_tmu_channel *ch)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun /* disable channel */
186*4882a593Smuzhiyun sh_tmu_start_stop_ch(ch, 0);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* disable interrupts in TMU block */
189*4882a593Smuzhiyun sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* stop clock */
192*4882a593Smuzhiyun clk_disable(ch->tmu->clk);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
sh_tmu_disable(struct sh_tmu_channel * ch)195*4882a593Smuzhiyun static void sh_tmu_disable(struct sh_tmu_channel *ch)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun if (WARN_ON(ch->enable_count == 0))
198*4882a593Smuzhiyun return;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (--ch->enable_count > 0)
201*4882a593Smuzhiyun return;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun __sh_tmu_disable(ch);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
206*4882a593Smuzhiyun pm_runtime_put(&ch->tmu->pdev->dev);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
sh_tmu_set_next(struct sh_tmu_channel * ch,unsigned long delta,int periodic)209*4882a593Smuzhiyun static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
210*4882a593Smuzhiyun int periodic)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun /* stop timer */
213*4882a593Smuzhiyun sh_tmu_start_stop_ch(ch, 0);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* acknowledge interrupt */
216*4882a593Smuzhiyun sh_tmu_read(ch, TCR);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* enable interrupt */
219*4882a593Smuzhiyun sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* reload delta value in case of periodic timer */
222*4882a593Smuzhiyun if (periodic)
223*4882a593Smuzhiyun sh_tmu_write(ch, TCOR, delta);
224*4882a593Smuzhiyun else
225*4882a593Smuzhiyun sh_tmu_write(ch, TCOR, 0xffffffff);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun sh_tmu_write(ch, TCNT, delta);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* start timer */
230*4882a593Smuzhiyun sh_tmu_start_stop_ch(ch, 1);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
sh_tmu_interrupt(int irq,void * dev_id)233*4882a593Smuzhiyun static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun struct sh_tmu_channel *ch = dev_id;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* disable or acknowledge interrupt */
238*4882a593Smuzhiyun if (clockevent_state_oneshot(&ch->ced))
239*4882a593Smuzhiyun sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
240*4882a593Smuzhiyun else
241*4882a593Smuzhiyun sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* notify clockevent layer */
244*4882a593Smuzhiyun ch->ced.event_handler(&ch->ced);
245*4882a593Smuzhiyun return IRQ_HANDLED;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
cs_to_sh_tmu(struct clocksource * cs)248*4882a593Smuzhiyun static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun return container_of(cs, struct sh_tmu_channel, cs);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
sh_tmu_clocksource_read(struct clocksource * cs)253*4882a593Smuzhiyun static u64 sh_tmu_clocksource_read(struct clocksource *cs)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
sh_tmu_clocksource_enable(struct clocksource * cs)260*4882a593Smuzhiyun static int sh_tmu_clocksource_enable(struct clocksource *cs)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
263*4882a593Smuzhiyun int ret;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (WARN_ON(ch->cs_enabled))
266*4882a593Smuzhiyun return 0;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun ret = sh_tmu_enable(ch);
269*4882a593Smuzhiyun if (!ret)
270*4882a593Smuzhiyun ch->cs_enabled = true;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return ret;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
sh_tmu_clocksource_disable(struct clocksource * cs)275*4882a593Smuzhiyun static void sh_tmu_clocksource_disable(struct clocksource *cs)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (WARN_ON(!ch->cs_enabled))
280*4882a593Smuzhiyun return;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun sh_tmu_disable(ch);
283*4882a593Smuzhiyun ch->cs_enabled = false;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
sh_tmu_clocksource_suspend(struct clocksource * cs)286*4882a593Smuzhiyun static void sh_tmu_clocksource_suspend(struct clocksource *cs)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (!ch->cs_enabled)
291*4882a593Smuzhiyun return;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (--ch->enable_count == 0) {
294*4882a593Smuzhiyun __sh_tmu_disable(ch);
295*4882a593Smuzhiyun dev_pm_genpd_suspend(&ch->tmu->pdev->dev);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
sh_tmu_clocksource_resume(struct clocksource * cs)299*4882a593Smuzhiyun static void sh_tmu_clocksource_resume(struct clocksource *cs)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (!ch->cs_enabled)
304*4882a593Smuzhiyun return;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (ch->enable_count++ == 0) {
307*4882a593Smuzhiyun dev_pm_genpd_resume(&ch->tmu->pdev->dev);
308*4882a593Smuzhiyun __sh_tmu_enable(ch);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
sh_tmu_register_clocksource(struct sh_tmu_channel * ch,const char * name)312*4882a593Smuzhiyun static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
313*4882a593Smuzhiyun const char *name)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct clocksource *cs = &ch->cs;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun cs->name = name;
318*4882a593Smuzhiyun cs->rating = 200;
319*4882a593Smuzhiyun cs->read = sh_tmu_clocksource_read;
320*4882a593Smuzhiyun cs->enable = sh_tmu_clocksource_enable;
321*4882a593Smuzhiyun cs->disable = sh_tmu_clocksource_disable;
322*4882a593Smuzhiyun cs->suspend = sh_tmu_clocksource_suspend;
323*4882a593Smuzhiyun cs->resume = sh_tmu_clocksource_resume;
324*4882a593Smuzhiyun cs->mask = CLOCKSOURCE_MASK(32);
325*4882a593Smuzhiyun cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
328*4882a593Smuzhiyun ch->index);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun clocksource_register_hz(cs, ch->tmu->rate);
331*4882a593Smuzhiyun return 0;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
ced_to_sh_tmu(struct clock_event_device * ced)334*4882a593Smuzhiyun static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun return container_of(ced, struct sh_tmu_channel, ced);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
sh_tmu_clock_event_start(struct sh_tmu_channel * ch,int periodic)339*4882a593Smuzhiyun static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun sh_tmu_enable(ch);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (periodic) {
344*4882a593Smuzhiyun ch->periodic = (ch->tmu->rate + HZ/2) / HZ;
345*4882a593Smuzhiyun sh_tmu_set_next(ch, ch->periodic, 1);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
sh_tmu_clock_event_shutdown(struct clock_event_device * ced)349*4882a593Smuzhiyun static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
354*4882a593Smuzhiyun sh_tmu_disable(ch);
355*4882a593Smuzhiyun return 0;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
sh_tmu_clock_event_set_state(struct clock_event_device * ced,int periodic)358*4882a593Smuzhiyun static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
359*4882a593Smuzhiyun int periodic)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* deal with old setting first */
364*4882a593Smuzhiyun if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
365*4882a593Smuzhiyun sh_tmu_disable(ch);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
368*4882a593Smuzhiyun ch->index, periodic ? "periodic" : "oneshot");
369*4882a593Smuzhiyun sh_tmu_clock_event_start(ch, periodic);
370*4882a593Smuzhiyun return 0;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
sh_tmu_clock_event_set_oneshot(struct clock_event_device * ced)373*4882a593Smuzhiyun static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun return sh_tmu_clock_event_set_state(ced, 0);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
sh_tmu_clock_event_set_periodic(struct clock_event_device * ced)378*4882a593Smuzhiyun static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun return sh_tmu_clock_event_set_state(ced, 1);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
sh_tmu_clock_event_next(unsigned long delta,struct clock_event_device * ced)383*4882a593Smuzhiyun static int sh_tmu_clock_event_next(unsigned long delta,
384*4882a593Smuzhiyun struct clock_event_device *ced)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun BUG_ON(!clockevent_state_oneshot(ced));
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* program new delta value */
391*4882a593Smuzhiyun sh_tmu_set_next(ch, delta, 0);
392*4882a593Smuzhiyun return 0;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
sh_tmu_clock_event_suspend(struct clock_event_device * ced)395*4882a593Smuzhiyun static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun dev_pm_genpd_suspend(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
sh_tmu_clock_event_resume(struct clock_event_device * ced)400*4882a593Smuzhiyun static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun dev_pm_genpd_resume(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
sh_tmu_register_clockevent(struct sh_tmu_channel * ch,const char * name)405*4882a593Smuzhiyun static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
406*4882a593Smuzhiyun const char *name)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct clock_event_device *ced = &ch->ced;
409*4882a593Smuzhiyun int ret;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun ced->name = name;
412*4882a593Smuzhiyun ced->features = CLOCK_EVT_FEAT_PERIODIC;
413*4882a593Smuzhiyun ced->features |= CLOCK_EVT_FEAT_ONESHOT;
414*4882a593Smuzhiyun ced->rating = 200;
415*4882a593Smuzhiyun ced->cpumask = cpu_possible_mask;
416*4882a593Smuzhiyun ced->set_next_event = sh_tmu_clock_event_next;
417*4882a593Smuzhiyun ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
418*4882a593Smuzhiyun ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
419*4882a593Smuzhiyun ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
420*4882a593Smuzhiyun ced->suspend = sh_tmu_clock_event_suspend;
421*4882a593Smuzhiyun ced->resume = sh_tmu_clock_event_resume;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
424*4882a593Smuzhiyun ch->index);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun clockevents_config_and_register(ced, ch->tmu->rate, 0x300, 0xffffffff);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun ret = request_irq(ch->irq, sh_tmu_interrupt,
429*4882a593Smuzhiyun IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
430*4882a593Smuzhiyun dev_name(&ch->tmu->pdev->dev), ch);
431*4882a593Smuzhiyun if (ret) {
432*4882a593Smuzhiyun dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
433*4882a593Smuzhiyun ch->index, ch->irq);
434*4882a593Smuzhiyun return;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
sh_tmu_register(struct sh_tmu_channel * ch,const char * name,bool clockevent,bool clocksource)438*4882a593Smuzhiyun static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
439*4882a593Smuzhiyun bool clockevent, bool clocksource)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun if (clockevent) {
442*4882a593Smuzhiyun ch->tmu->has_clockevent = true;
443*4882a593Smuzhiyun sh_tmu_register_clockevent(ch, name);
444*4882a593Smuzhiyun } else if (clocksource) {
445*4882a593Smuzhiyun ch->tmu->has_clocksource = true;
446*4882a593Smuzhiyun sh_tmu_register_clocksource(ch, name);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun return 0;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
sh_tmu_channel_setup(struct sh_tmu_channel * ch,unsigned int index,bool clockevent,bool clocksource,struct sh_tmu_device * tmu)452*4882a593Smuzhiyun static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
453*4882a593Smuzhiyun bool clockevent, bool clocksource,
454*4882a593Smuzhiyun struct sh_tmu_device *tmu)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun /* Skip unused channels. */
457*4882a593Smuzhiyun if (!clockevent && !clocksource)
458*4882a593Smuzhiyun return 0;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun ch->tmu = tmu;
461*4882a593Smuzhiyun ch->index = index;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun if (tmu->model == SH_TMU_SH3)
464*4882a593Smuzhiyun ch->base = tmu->mapbase + 4 + ch->index * 12;
465*4882a593Smuzhiyun else
466*4882a593Smuzhiyun ch->base = tmu->mapbase + 8 + ch->index * 12;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun ch->irq = platform_get_irq(tmu->pdev, index);
469*4882a593Smuzhiyun if (ch->irq < 0)
470*4882a593Smuzhiyun return ch->irq;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun ch->cs_enabled = false;
473*4882a593Smuzhiyun ch->enable_count = 0;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
476*4882a593Smuzhiyun clockevent, clocksource);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
sh_tmu_map_memory(struct sh_tmu_device * tmu)479*4882a593Smuzhiyun static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct resource *res;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
484*4882a593Smuzhiyun if (!res) {
485*4882a593Smuzhiyun dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
486*4882a593Smuzhiyun return -ENXIO;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun tmu->mapbase = ioremap(res->start, resource_size(res));
490*4882a593Smuzhiyun if (tmu->mapbase == NULL)
491*4882a593Smuzhiyun return -ENXIO;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun return 0;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
sh_tmu_parse_dt(struct sh_tmu_device * tmu)496*4882a593Smuzhiyun static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun struct device_node *np = tmu->pdev->dev.of_node;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun tmu->model = SH_TMU;
501*4882a593Smuzhiyun tmu->num_channels = 3;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun if (tmu->num_channels != 2 && tmu->num_channels != 3) {
506*4882a593Smuzhiyun dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
507*4882a593Smuzhiyun tmu->num_channels);
508*4882a593Smuzhiyun return -EINVAL;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun return 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
sh_tmu_setup(struct sh_tmu_device * tmu,struct platform_device * pdev)514*4882a593Smuzhiyun static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun unsigned int i;
517*4882a593Smuzhiyun int ret;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun tmu->pdev = pdev;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun raw_spin_lock_init(&tmu->lock);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
524*4882a593Smuzhiyun ret = sh_tmu_parse_dt(tmu);
525*4882a593Smuzhiyun if (ret < 0)
526*4882a593Smuzhiyun return ret;
527*4882a593Smuzhiyun } else if (pdev->dev.platform_data) {
528*4882a593Smuzhiyun const struct platform_device_id *id = pdev->id_entry;
529*4882a593Smuzhiyun struct sh_timer_config *cfg = pdev->dev.platform_data;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun tmu->model = id->driver_data;
532*4882a593Smuzhiyun tmu->num_channels = hweight8(cfg->channels_mask);
533*4882a593Smuzhiyun } else {
534*4882a593Smuzhiyun dev_err(&tmu->pdev->dev, "missing platform data\n");
535*4882a593Smuzhiyun return -ENXIO;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /* Get hold of clock. */
539*4882a593Smuzhiyun tmu->clk = clk_get(&tmu->pdev->dev, "fck");
540*4882a593Smuzhiyun if (IS_ERR(tmu->clk)) {
541*4882a593Smuzhiyun dev_err(&tmu->pdev->dev, "cannot get clock\n");
542*4882a593Smuzhiyun return PTR_ERR(tmu->clk);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun ret = clk_prepare(tmu->clk);
546*4882a593Smuzhiyun if (ret < 0)
547*4882a593Smuzhiyun goto err_clk_put;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /* Determine clock rate. */
550*4882a593Smuzhiyun ret = clk_enable(tmu->clk);
551*4882a593Smuzhiyun if (ret < 0)
552*4882a593Smuzhiyun goto err_clk_unprepare;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun tmu->rate = clk_get_rate(tmu->clk) / 4;
555*4882a593Smuzhiyun clk_disable(tmu->clk);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /* Map the memory resource. */
558*4882a593Smuzhiyun ret = sh_tmu_map_memory(tmu);
559*4882a593Smuzhiyun if (ret < 0) {
560*4882a593Smuzhiyun dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
561*4882a593Smuzhiyun goto err_clk_unprepare;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /* Allocate and setup the channels. */
565*4882a593Smuzhiyun tmu->channels = kcalloc(tmu->num_channels, sizeof(*tmu->channels),
566*4882a593Smuzhiyun GFP_KERNEL);
567*4882a593Smuzhiyun if (tmu->channels == NULL) {
568*4882a593Smuzhiyun ret = -ENOMEM;
569*4882a593Smuzhiyun goto err_unmap;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /*
573*4882a593Smuzhiyun * Use the first channel as a clock event device and the second channel
574*4882a593Smuzhiyun * as a clock source.
575*4882a593Smuzhiyun */
576*4882a593Smuzhiyun for (i = 0; i < tmu->num_channels; ++i) {
577*4882a593Smuzhiyun ret = sh_tmu_channel_setup(&tmu->channels[i], i,
578*4882a593Smuzhiyun i == 0, i == 1, tmu);
579*4882a593Smuzhiyun if (ret < 0)
580*4882a593Smuzhiyun goto err_unmap;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun platform_set_drvdata(pdev, tmu);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun return 0;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun err_unmap:
588*4882a593Smuzhiyun kfree(tmu->channels);
589*4882a593Smuzhiyun iounmap(tmu->mapbase);
590*4882a593Smuzhiyun err_clk_unprepare:
591*4882a593Smuzhiyun clk_unprepare(tmu->clk);
592*4882a593Smuzhiyun err_clk_put:
593*4882a593Smuzhiyun clk_put(tmu->clk);
594*4882a593Smuzhiyun return ret;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
sh_tmu_probe(struct platform_device * pdev)597*4882a593Smuzhiyun static int sh_tmu_probe(struct platform_device *pdev)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
600*4882a593Smuzhiyun int ret;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (!is_sh_early_platform_device(pdev)) {
603*4882a593Smuzhiyun pm_runtime_set_active(&pdev->dev);
604*4882a593Smuzhiyun pm_runtime_enable(&pdev->dev);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (tmu) {
608*4882a593Smuzhiyun dev_info(&pdev->dev, "kept as earlytimer\n");
609*4882a593Smuzhiyun goto out;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
613*4882a593Smuzhiyun if (tmu == NULL)
614*4882a593Smuzhiyun return -ENOMEM;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun ret = sh_tmu_setup(tmu, pdev);
617*4882a593Smuzhiyun if (ret) {
618*4882a593Smuzhiyun kfree(tmu);
619*4882a593Smuzhiyun pm_runtime_idle(&pdev->dev);
620*4882a593Smuzhiyun return ret;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if (is_sh_early_platform_device(pdev))
624*4882a593Smuzhiyun return 0;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun out:
627*4882a593Smuzhiyun if (tmu->has_clockevent || tmu->has_clocksource)
628*4882a593Smuzhiyun pm_runtime_irq_safe(&pdev->dev);
629*4882a593Smuzhiyun else
630*4882a593Smuzhiyun pm_runtime_idle(&pdev->dev);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun return 0;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
sh_tmu_remove(struct platform_device * pdev)635*4882a593Smuzhiyun static int sh_tmu_remove(struct platform_device *pdev)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun return -EBUSY; /* cannot unregister clockevent and clocksource */
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun static const struct platform_device_id sh_tmu_id_table[] = {
641*4882a593Smuzhiyun { "sh-tmu", SH_TMU },
642*4882a593Smuzhiyun { "sh-tmu-sh3", SH_TMU_SH3 },
643*4882a593Smuzhiyun { }
644*4882a593Smuzhiyun };
645*4882a593Smuzhiyun MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
648*4882a593Smuzhiyun { .compatible = "renesas,tmu" },
649*4882a593Smuzhiyun { }
650*4882a593Smuzhiyun };
651*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun static struct platform_driver sh_tmu_device_driver = {
654*4882a593Smuzhiyun .probe = sh_tmu_probe,
655*4882a593Smuzhiyun .remove = sh_tmu_remove,
656*4882a593Smuzhiyun .driver = {
657*4882a593Smuzhiyun .name = "sh_tmu",
658*4882a593Smuzhiyun .of_match_table = of_match_ptr(sh_tmu_of_table),
659*4882a593Smuzhiyun },
660*4882a593Smuzhiyun .id_table = sh_tmu_id_table,
661*4882a593Smuzhiyun };
662*4882a593Smuzhiyun
sh_tmu_init(void)663*4882a593Smuzhiyun static int __init sh_tmu_init(void)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun return platform_driver_register(&sh_tmu_device_driver);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
sh_tmu_exit(void)668*4882a593Smuzhiyun static void __exit sh_tmu_exit(void)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun platform_driver_unregister(&sh_tmu_device_driver);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun #ifdef CONFIG_SUPERH
674*4882a593Smuzhiyun sh_early_platform_init("earlytimer", &sh_tmu_device_driver);
675*4882a593Smuzhiyun #endif
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun subsys_initcall(sh_tmu_init);
678*4882a593Smuzhiyun module_exit(sh_tmu_exit);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun MODULE_AUTHOR("Magnus Damm");
681*4882a593Smuzhiyun MODULE_DESCRIPTION("SuperH TMU Timer Driver");
682*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
683