1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Ingenic SoCs TCU IRQ driver
4*4882a593Smuzhiyun * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
5*4882a593Smuzhiyun * Copyright (C) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/bitops.h>
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/clockchips.h>
11*4882a593Smuzhiyun #include <linux/clocksource.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/mfd/ingenic-tcu.h>
14*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/of_address.h>
17*4882a593Smuzhiyun #include <linux/of_irq.h>
18*4882a593Smuzhiyun #include <linux/of_platform.h>
19*4882a593Smuzhiyun #include <linux/overflow.h>
20*4882a593Smuzhiyun #include <linux/platform_device.h>
21*4882a593Smuzhiyun #include <linux/regmap.h>
22*4882a593Smuzhiyun #include <linux/sched_clock.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <dt-bindings/clock/ingenic,tcu.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static DEFINE_PER_CPU(call_single_data_t, ingenic_cevt_csd);
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct ingenic_soc_info {
29*4882a593Smuzhiyun unsigned int num_channels;
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct ingenic_tcu_timer {
33*4882a593Smuzhiyun unsigned int cpu;
34*4882a593Smuzhiyun unsigned int channel;
35*4882a593Smuzhiyun struct clock_event_device cevt;
36*4882a593Smuzhiyun struct clk *clk;
37*4882a593Smuzhiyun char name[8];
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct ingenic_tcu {
41*4882a593Smuzhiyun struct regmap *map;
42*4882a593Smuzhiyun struct device_node *np;
43*4882a593Smuzhiyun struct clk *cs_clk;
44*4882a593Smuzhiyun unsigned int cs_channel;
45*4882a593Smuzhiyun struct clocksource cs;
46*4882a593Smuzhiyun unsigned long pwm_channels_mask;
47*4882a593Smuzhiyun struct ingenic_tcu_timer timers[];
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun static struct ingenic_tcu *ingenic_tcu;
51*4882a593Smuzhiyun
ingenic_tcu_timer_read(void)52*4882a593Smuzhiyun static u64 notrace ingenic_tcu_timer_read(void)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun struct ingenic_tcu *tcu = ingenic_tcu;
55*4882a593Smuzhiyun unsigned int count;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun return count;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
ingenic_tcu_timer_cs_read(struct clocksource * cs)62*4882a593Smuzhiyun static u64 notrace ingenic_tcu_timer_cs_read(struct clocksource *cs)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun return ingenic_tcu_timer_read();
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun static inline struct ingenic_tcu *
to_ingenic_tcu(struct ingenic_tcu_timer * timer)68*4882a593Smuzhiyun to_ingenic_tcu(struct ingenic_tcu_timer *timer)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun return container_of(timer, struct ingenic_tcu, timers[timer->cpu]);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun static inline struct ingenic_tcu_timer *
to_ingenic_tcu_timer(struct clock_event_device * evt)74*4882a593Smuzhiyun to_ingenic_tcu_timer(struct clock_event_device *evt)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun return container_of(evt, struct ingenic_tcu_timer, cevt);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device * evt)79*4882a593Smuzhiyun static int ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device *evt)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
82*4882a593Smuzhiyun struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
ingenic_tcu_cevt_set_next(unsigned long next,struct clock_event_device * evt)89*4882a593Smuzhiyun static int ingenic_tcu_cevt_set_next(unsigned long next,
90*4882a593Smuzhiyun struct clock_event_device *evt)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
93*4882a593Smuzhiyun struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (next > 0xffff)
96*4882a593Smuzhiyun return -EINVAL;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next);
99*4882a593Smuzhiyun regmap_write(tcu->map, TCU_REG_TCNTc(timer->channel), 0);
100*4882a593Smuzhiyun regmap_write(tcu->map, TCU_REG_TESR, BIT(timer->channel));
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
ingenic_per_cpu_event_handler(void * info)105*4882a593Smuzhiyun static void ingenic_per_cpu_event_handler(void *info)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct clock_event_device *cevt = (struct clock_event_device *) info;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun cevt->event_handler(cevt);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
ingenic_tcu_cevt_cb(int irq,void * dev_id)112*4882a593Smuzhiyun static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct ingenic_tcu_timer *timer = dev_id;
115*4882a593Smuzhiyun struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
116*4882a593Smuzhiyun call_single_data_t *csd;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun if (timer->cevt.event_handler) {
121*4882a593Smuzhiyun csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
122*4882a593Smuzhiyun csd->info = (void *) &timer->cevt;
123*4882a593Smuzhiyun csd->func = ingenic_per_cpu_event_handler;
124*4882a593Smuzhiyun smp_call_function_single_async(timer->cpu, csd);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun return IRQ_HANDLED;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
ingenic_tcu_get_clock(struct device_node * np,int id)130*4882a593Smuzhiyun static struct clk *ingenic_tcu_get_clock(struct device_node *np, int id)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun struct of_phandle_args args;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun args.np = np;
135*4882a593Smuzhiyun args.args_count = 1;
136*4882a593Smuzhiyun args.args[0] = id;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun return of_clk_get_from_provider(&args);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
ingenic_tcu_setup_cevt(unsigned int cpu)141*4882a593Smuzhiyun static int ingenic_tcu_setup_cevt(unsigned int cpu)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun struct ingenic_tcu *tcu = ingenic_tcu;
144*4882a593Smuzhiyun struct ingenic_tcu_timer *timer = &tcu->timers[cpu];
145*4882a593Smuzhiyun unsigned int timer_virq;
146*4882a593Smuzhiyun struct irq_domain *domain;
147*4882a593Smuzhiyun unsigned long rate;
148*4882a593Smuzhiyun int err;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun timer->clk = ingenic_tcu_get_clock(tcu->np, timer->channel);
151*4882a593Smuzhiyun if (IS_ERR(timer->clk))
152*4882a593Smuzhiyun return PTR_ERR(timer->clk);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun err = clk_prepare_enable(timer->clk);
155*4882a593Smuzhiyun if (err)
156*4882a593Smuzhiyun goto err_clk_put;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun rate = clk_get_rate(timer->clk);
159*4882a593Smuzhiyun if (!rate) {
160*4882a593Smuzhiyun err = -EINVAL;
161*4882a593Smuzhiyun goto err_clk_disable;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun domain = irq_find_host(tcu->np);
165*4882a593Smuzhiyun if (!domain) {
166*4882a593Smuzhiyun err = -ENODEV;
167*4882a593Smuzhiyun goto err_clk_disable;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun timer_virq = irq_create_mapping(domain, timer->channel);
171*4882a593Smuzhiyun if (!timer_virq) {
172*4882a593Smuzhiyun err = -EINVAL;
173*4882a593Smuzhiyun goto err_clk_disable;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun snprintf(timer->name, sizeof(timer->name), "TCU%u", timer->channel);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun err = request_irq(timer_virq, ingenic_tcu_cevt_cb, IRQF_TIMER,
179*4882a593Smuzhiyun timer->name, timer);
180*4882a593Smuzhiyun if (err)
181*4882a593Smuzhiyun goto err_irq_dispose_mapping;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun timer->cpu = smp_processor_id();
184*4882a593Smuzhiyun timer->cevt.cpumask = cpumask_of(smp_processor_id());
185*4882a593Smuzhiyun timer->cevt.features = CLOCK_EVT_FEAT_ONESHOT;
186*4882a593Smuzhiyun timer->cevt.name = timer->name;
187*4882a593Smuzhiyun timer->cevt.rating = 200;
188*4882a593Smuzhiyun timer->cevt.set_state_shutdown = ingenic_tcu_cevt_set_state_shutdown;
189*4882a593Smuzhiyun timer->cevt.set_next_event = ingenic_tcu_cevt_set_next;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun clockevents_config_and_register(&timer->cevt, rate, 10, 0xffff);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return 0;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun err_irq_dispose_mapping:
196*4882a593Smuzhiyun irq_dispose_mapping(timer_virq);
197*4882a593Smuzhiyun err_clk_disable:
198*4882a593Smuzhiyun clk_disable_unprepare(timer->clk);
199*4882a593Smuzhiyun err_clk_put:
200*4882a593Smuzhiyun clk_put(timer->clk);
201*4882a593Smuzhiyun return err;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
ingenic_tcu_clocksource_init(struct device_node * np,struct ingenic_tcu * tcu)204*4882a593Smuzhiyun static int __init ingenic_tcu_clocksource_init(struct device_node *np,
205*4882a593Smuzhiyun struct ingenic_tcu *tcu)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun unsigned int channel = tcu->cs_channel;
208*4882a593Smuzhiyun struct clocksource *cs = &tcu->cs;
209*4882a593Smuzhiyun unsigned long rate;
210*4882a593Smuzhiyun int err;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun tcu->cs_clk = ingenic_tcu_get_clock(np, channel);
213*4882a593Smuzhiyun if (IS_ERR(tcu->cs_clk))
214*4882a593Smuzhiyun return PTR_ERR(tcu->cs_clk);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun err = clk_prepare_enable(tcu->cs_clk);
217*4882a593Smuzhiyun if (err)
218*4882a593Smuzhiyun goto err_clk_put;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun rate = clk_get_rate(tcu->cs_clk);
221*4882a593Smuzhiyun if (!rate) {
222*4882a593Smuzhiyun err = -EINVAL;
223*4882a593Smuzhiyun goto err_clk_disable;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* Reset channel */
227*4882a593Smuzhiyun regmap_update_bits(tcu->map, TCU_REG_TCSRc(channel),
228*4882a593Smuzhiyun 0xffff & ~TCU_TCSR_RESERVED_BITS, 0);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* Reset counter */
231*4882a593Smuzhiyun regmap_write(tcu->map, TCU_REG_TDFRc(channel), 0xffff);
232*4882a593Smuzhiyun regmap_write(tcu->map, TCU_REG_TCNTc(channel), 0);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /* Enable channel */
235*4882a593Smuzhiyun regmap_write(tcu->map, TCU_REG_TESR, BIT(channel));
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun cs->name = "ingenic-timer";
238*4882a593Smuzhiyun cs->rating = 200;
239*4882a593Smuzhiyun cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
240*4882a593Smuzhiyun cs->mask = CLOCKSOURCE_MASK(16);
241*4882a593Smuzhiyun cs->read = ingenic_tcu_timer_cs_read;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun err = clocksource_register_hz(cs, rate);
244*4882a593Smuzhiyun if (err)
245*4882a593Smuzhiyun goto err_clk_disable;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun return 0;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun err_clk_disable:
250*4882a593Smuzhiyun clk_disable_unprepare(tcu->cs_clk);
251*4882a593Smuzhiyun err_clk_put:
252*4882a593Smuzhiyun clk_put(tcu->cs_clk);
253*4882a593Smuzhiyun return err;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun static const struct ingenic_soc_info jz4740_soc_info = {
257*4882a593Smuzhiyun .num_channels = 8,
258*4882a593Smuzhiyun };
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun static const struct ingenic_soc_info jz4725b_soc_info = {
261*4882a593Smuzhiyun .num_channels = 6,
262*4882a593Smuzhiyun };
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun static const struct of_device_id ingenic_tcu_of_match[] = {
265*4882a593Smuzhiyun { .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
266*4882a593Smuzhiyun { .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
267*4882a593Smuzhiyun { .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, },
268*4882a593Smuzhiyun { .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, },
269*4882a593Smuzhiyun { /* sentinel */ }
270*4882a593Smuzhiyun };
271*4882a593Smuzhiyun
ingenic_tcu_init(struct device_node * np)272*4882a593Smuzhiyun static int __init ingenic_tcu_init(struct device_node *np)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun const struct of_device_id *id = of_match_node(ingenic_tcu_of_match, np);
275*4882a593Smuzhiyun const struct ingenic_soc_info *soc_info = id->data;
276*4882a593Smuzhiyun struct ingenic_tcu_timer *timer;
277*4882a593Smuzhiyun struct ingenic_tcu *tcu;
278*4882a593Smuzhiyun struct regmap *map;
279*4882a593Smuzhiyun unsigned int cpu;
280*4882a593Smuzhiyun int ret, last_bit = -1;
281*4882a593Smuzhiyun long rate;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun of_node_clear_flag(np, OF_POPULATED);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun map = device_node_to_regmap(np);
286*4882a593Smuzhiyun if (IS_ERR(map))
287*4882a593Smuzhiyun return PTR_ERR(map);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun tcu = kzalloc(struct_size(tcu, timers, num_possible_cpus()),
290*4882a593Smuzhiyun GFP_KERNEL);
291*4882a593Smuzhiyun if (!tcu)
292*4882a593Smuzhiyun return -ENOMEM;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun * Enable all TCU channels for PWM use by default except channels 0/1,
296*4882a593Smuzhiyun * and channel 2 if target CPU is JZ4780/X2000 and SMP is selected.
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun tcu->pwm_channels_mask = GENMASK(soc_info->num_channels - 1,
299*4882a593Smuzhiyun num_possible_cpus() + 1);
300*4882a593Smuzhiyun of_property_read_u32(np, "ingenic,pwm-channels-mask",
301*4882a593Smuzhiyun (u32 *)&tcu->pwm_channels_mask);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* Verify that we have at least num_possible_cpus() + 1 free channels */
304*4882a593Smuzhiyun if (hweight8(tcu->pwm_channels_mask) >
305*4882a593Smuzhiyun soc_info->num_channels - num_possible_cpus() + 1) {
306*4882a593Smuzhiyun pr_crit("%s: Invalid PWM channel mask: 0x%02lx\n", __func__,
307*4882a593Smuzhiyun tcu->pwm_channels_mask);
308*4882a593Smuzhiyun ret = -EINVAL;
309*4882a593Smuzhiyun goto err_free_ingenic_tcu;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun tcu->map = map;
313*4882a593Smuzhiyun tcu->np = np;
314*4882a593Smuzhiyun ingenic_tcu = tcu;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
317*4882a593Smuzhiyun timer = &tcu->timers[cpu];
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun timer->cpu = cpu;
320*4882a593Smuzhiyun timer->channel = find_next_zero_bit(&tcu->pwm_channels_mask,
321*4882a593Smuzhiyun soc_info->num_channels,
322*4882a593Smuzhiyun last_bit + 1);
323*4882a593Smuzhiyun last_bit = timer->channel;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun tcu->cs_channel = find_next_zero_bit(&tcu->pwm_channels_mask,
327*4882a593Smuzhiyun soc_info->num_channels,
328*4882a593Smuzhiyun last_bit + 1);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun ret = ingenic_tcu_clocksource_init(np, tcu);
331*4882a593Smuzhiyun if (ret) {
332*4882a593Smuzhiyun pr_crit("%s: Unable to init clocksource: %d\n", __func__, ret);
333*4882a593Smuzhiyun goto err_free_ingenic_tcu;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* Setup clock events on each CPU core */
337*4882a593Smuzhiyun ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "Ingenic XBurst: online",
338*4882a593Smuzhiyun ingenic_tcu_setup_cevt, NULL);
339*4882a593Smuzhiyun if (ret < 0) {
340*4882a593Smuzhiyun pr_crit("%s: Unable to start CPU timers: %d\n", __func__, ret);
341*4882a593Smuzhiyun goto err_tcu_clocksource_cleanup;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /* Register the sched_clock at the end as there's no way to undo it */
345*4882a593Smuzhiyun rate = clk_get_rate(tcu->cs_clk);
346*4882a593Smuzhiyun sched_clock_register(ingenic_tcu_timer_read, 16, rate);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun return 0;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun err_tcu_clocksource_cleanup:
351*4882a593Smuzhiyun clocksource_unregister(&tcu->cs);
352*4882a593Smuzhiyun clk_disable_unprepare(tcu->cs_clk);
353*4882a593Smuzhiyun clk_put(tcu->cs_clk);
354*4882a593Smuzhiyun err_free_ingenic_tcu:
355*4882a593Smuzhiyun kfree(tcu);
356*4882a593Smuzhiyun return ret;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun TIMER_OF_DECLARE(jz4740_tcu_intc, "ingenic,jz4740-tcu", ingenic_tcu_init);
360*4882a593Smuzhiyun TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init);
361*4882a593Smuzhiyun TIMER_OF_DECLARE(jz4770_tcu_intc, "ingenic,jz4770-tcu", ingenic_tcu_init);
362*4882a593Smuzhiyun TIMER_OF_DECLARE(x1000_tcu_intc, "ingenic,x1000-tcu", ingenic_tcu_init);
363*4882a593Smuzhiyun
ingenic_tcu_probe(struct platform_device * pdev)364*4882a593Smuzhiyun static int __init ingenic_tcu_probe(struct platform_device *pdev)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun platform_set_drvdata(pdev, ingenic_tcu);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun return 0;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
ingenic_tcu_suspend(struct device * dev)371*4882a593Smuzhiyun static int __maybe_unused ingenic_tcu_suspend(struct device *dev)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun struct ingenic_tcu *tcu = dev_get_drvdata(dev);
374*4882a593Smuzhiyun unsigned int cpu;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun clk_disable(tcu->cs_clk);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun for (cpu = 0; cpu < num_online_cpus(); cpu++)
379*4882a593Smuzhiyun clk_disable(tcu->timers[cpu].clk);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun return 0;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
ingenic_tcu_resume(struct device * dev)384*4882a593Smuzhiyun static int __maybe_unused ingenic_tcu_resume(struct device *dev)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct ingenic_tcu *tcu = dev_get_drvdata(dev);
387*4882a593Smuzhiyun unsigned int cpu;
388*4882a593Smuzhiyun int ret;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun for (cpu = 0; cpu < num_online_cpus(); cpu++) {
391*4882a593Smuzhiyun ret = clk_enable(tcu->timers[cpu].clk);
392*4882a593Smuzhiyun if (ret)
393*4882a593Smuzhiyun goto err_timer_clk_disable;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun ret = clk_enable(tcu->cs_clk);
397*4882a593Smuzhiyun if (ret)
398*4882a593Smuzhiyun goto err_timer_clk_disable;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun return 0;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun err_timer_clk_disable:
403*4882a593Smuzhiyun for (; cpu > 0; cpu--)
404*4882a593Smuzhiyun clk_disable(tcu->timers[cpu - 1].clk);
405*4882a593Smuzhiyun return ret;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = {
409*4882a593Smuzhiyun /* _noirq: We want the TCU clocks to be gated last / ungated first */
410*4882a593Smuzhiyun .suspend_noirq = ingenic_tcu_suspend,
411*4882a593Smuzhiyun .resume_noirq = ingenic_tcu_resume,
412*4882a593Smuzhiyun };
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun static struct platform_driver ingenic_tcu_driver = {
415*4882a593Smuzhiyun .driver = {
416*4882a593Smuzhiyun .name = "ingenic-tcu-timer",
417*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
418*4882a593Smuzhiyun .pm = &ingenic_tcu_pm_ops,
419*4882a593Smuzhiyun #endif
420*4882a593Smuzhiyun .of_match_table = ingenic_tcu_of_match,
421*4882a593Smuzhiyun },
422*4882a593Smuzhiyun };
423*4882a593Smuzhiyun builtin_platform_driver_probe(ingenic_tcu_driver, ingenic_tcu_probe);
424