1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2007 Google, Inc.
5*4882a593Smuzhiyun * Copyright (c) 2009-2012,2014, The Linux Foundation. All rights reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/clocksource.h>
9*4882a593Smuzhiyun #include <linux/clockchips.h>
10*4882a593Smuzhiyun #include <linux/cpu.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/irq.h>
14*4882a593Smuzhiyun #include <linux/io.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/of_address.h>
17*4882a593Smuzhiyun #include <linux/of_irq.h>
18*4882a593Smuzhiyun #include <linux/sched_clock.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <asm/delay.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define TIMER_MATCH_VAL 0x0000
23*4882a593Smuzhiyun #define TIMER_COUNT_VAL 0x0004
24*4882a593Smuzhiyun #define TIMER_ENABLE 0x0008
25*4882a593Smuzhiyun #define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1)
26*4882a593Smuzhiyun #define TIMER_ENABLE_EN BIT(0)
27*4882a593Smuzhiyun #define TIMER_CLEAR 0x000C
28*4882a593Smuzhiyun #define DGT_CLK_CTL 0x10
29*4882a593Smuzhiyun #define DGT_CLK_CTL_DIV_4 0x3
30*4882a593Smuzhiyun #define TIMER_STS_GPT0_CLR_PEND BIT(10)
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define GPT_HZ 32768
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun static void __iomem *event_base;
35*4882a593Smuzhiyun static void __iomem *sts_base;
36*4882a593Smuzhiyun
msm_timer_interrupt(int irq,void * dev_id)37*4882a593Smuzhiyun static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun struct clock_event_device *evt = dev_id;
40*4882a593Smuzhiyun /* Stop the timer tick */
41*4882a593Smuzhiyun if (clockevent_state_oneshot(evt)) {
42*4882a593Smuzhiyun u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
43*4882a593Smuzhiyun ctrl &= ~TIMER_ENABLE_EN;
44*4882a593Smuzhiyun writel_relaxed(ctrl, event_base + TIMER_ENABLE);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun evt->event_handler(evt);
47*4882a593Smuzhiyun return IRQ_HANDLED;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
msm_timer_set_next_event(unsigned long cycles,struct clock_event_device * evt)50*4882a593Smuzhiyun static int msm_timer_set_next_event(unsigned long cycles,
51*4882a593Smuzhiyun struct clock_event_device *evt)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun ctrl &= ~TIMER_ENABLE_EN;
56*4882a593Smuzhiyun writel_relaxed(ctrl, event_base + TIMER_ENABLE);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun writel_relaxed(ctrl, event_base + TIMER_CLEAR);
59*4882a593Smuzhiyun writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (sts_base)
62*4882a593Smuzhiyun while (readl_relaxed(sts_base) & TIMER_STS_GPT0_CLR_PEND)
63*4882a593Smuzhiyun cpu_relax();
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
66*4882a593Smuzhiyun return 0;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
msm_timer_shutdown(struct clock_event_device * evt)69*4882a593Smuzhiyun static int msm_timer_shutdown(struct clock_event_device *evt)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun u32 ctrl;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun ctrl = readl_relaxed(event_base + TIMER_ENABLE);
74*4882a593Smuzhiyun ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
75*4882a593Smuzhiyun writel_relaxed(ctrl, event_base + TIMER_ENABLE);
76*4882a593Smuzhiyun return 0;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun static struct clock_event_device __percpu *msm_evt;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun static void __iomem *source_base;
82*4882a593Smuzhiyun
msm_read_timer_count(struct clocksource * cs)83*4882a593Smuzhiyun static notrace u64 msm_read_timer_count(struct clocksource *cs)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun return readl_relaxed(source_base + TIMER_COUNT_VAL);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun static struct clocksource msm_clocksource = {
89*4882a593Smuzhiyun .name = "dg_timer",
90*4882a593Smuzhiyun .rating = 300,
91*4882a593Smuzhiyun .read = msm_read_timer_count,
92*4882a593Smuzhiyun .mask = CLOCKSOURCE_MASK(32),
93*4882a593Smuzhiyun .flags = CLOCK_SOURCE_IS_CONTINUOUS,
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun static int msm_timer_irq;
97*4882a593Smuzhiyun static int msm_timer_has_ppi;
98*4882a593Smuzhiyun
msm_local_timer_starting_cpu(unsigned int cpu)99*4882a593Smuzhiyun static int msm_local_timer_starting_cpu(unsigned int cpu)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
102*4882a593Smuzhiyun int err;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun evt->irq = msm_timer_irq;
105*4882a593Smuzhiyun evt->name = "msm_timer";
106*4882a593Smuzhiyun evt->features = CLOCK_EVT_FEAT_ONESHOT;
107*4882a593Smuzhiyun evt->rating = 200;
108*4882a593Smuzhiyun evt->set_state_shutdown = msm_timer_shutdown;
109*4882a593Smuzhiyun evt->set_state_oneshot = msm_timer_shutdown;
110*4882a593Smuzhiyun evt->tick_resume = msm_timer_shutdown;
111*4882a593Smuzhiyun evt->set_next_event = msm_timer_set_next_event;
112*4882a593Smuzhiyun evt->cpumask = cpumask_of(cpu);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (msm_timer_has_ppi) {
117*4882a593Smuzhiyun enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING);
118*4882a593Smuzhiyun } else {
119*4882a593Smuzhiyun err = request_irq(evt->irq, msm_timer_interrupt,
120*4882a593Smuzhiyun IRQF_TIMER | IRQF_NOBALANCING |
121*4882a593Smuzhiyun IRQF_TRIGGER_RISING, "gp_timer", evt);
122*4882a593Smuzhiyun if (err)
123*4882a593Smuzhiyun pr_err("request_irq failed\n");
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
msm_local_timer_dying_cpu(unsigned int cpu)129*4882a593Smuzhiyun static int msm_local_timer_dying_cpu(unsigned int cpu)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun evt->set_state_shutdown(evt);
134*4882a593Smuzhiyun disable_percpu_irq(evt->irq);
135*4882a593Smuzhiyun return 0;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
msm_sched_clock_read(void)138*4882a593Smuzhiyun static u64 notrace msm_sched_clock_read(void)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun return msm_clocksource.read(&msm_clocksource);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
msm_read_current_timer(void)143*4882a593Smuzhiyun static unsigned long msm_read_current_timer(void)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun return msm_clocksource.read(&msm_clocksource);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun static struct delay_timer msm_delay_timer = {
149*4882a593Smuzhiyun .read_current_timer = msm_read_current_timer,
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun
msm_timer_init(u32 dgt_hz,int sched_bits,int irq,bool percpu)152*4882a593Smuzhiyun static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
153*4882a593Smuzhiyun bool percpu)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct clocksource *cs = &msm_clocksource;
156*4882a593Smuzhiyun int res = 0;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun msm_timer_irq = irq;
159*4882a593Smuzhiyun msm_timer_has_ppi = percpu;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun msm_evt = alloc_percpu(struct clock_event_device);
162*4882a593Smuzhiyun if (!msm_evt) {
163*4882a593Smuzhiyun pr_err("memory allocation failed for clockevents\n");
164*4882a593Smuzhiyun goto err;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (percpu)
168*4882a593Smuzhiyun res = request_percpu_irq(irq, msm_timer_interrupt,
169*4882a593Smuzhiyun "gp_timer", msm_evt);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (res) {
172*4882a593Smuzhiyun pr_err("request_percpu_irq failed\n");
173*4882a593Smuzhiyun } else {
174*4882a593Smuzhiyun /* Install and invoke hotplug callbacks */
175*4882a593Smuzhiyun res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
176*4882a593Smuzhiyun "clockevents/qcom/timer:starting",
177*4882a593Smuzhiyun msm_local_timer_starting_cpu,
178*4882a593Smuzhiyun msm_local_timer_dying_cpu);
179*4882a593Smuzhiyun if (res) {
180*4882a593Smuzhiyun free_percpu_irq(irq, msm_evt);
181*4882a593Smuzhiyun goto err;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun err:
186*4882a593Smuzhiyun writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
187*4882a593Smuzhiyun res = clocksource_register_hz(cs, dgt_hz);
188*4882a593Smuzhiyun if (res)
189*4882a593Smuzhiyun pr_err("clocksource_register failed\n");
190*4882a593Smuzhiyun sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
191*4882a593Smuzhiyun msm_delay_timer.freq = dgt_hz;
192*4882a593Smuzhiyun register_current_timer_delay(&msm_delay_timer);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return res;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
msm_dt_timer_init(struct device_node * np)197*4882a593Smuzhiyun static int __init msm_dt_timer_init(struct device_node *np)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun u32 freq;
200*4882a593Smuzhiyun int irq, ret;
201*4882a593Smuzhiyun struct resource res;
202*4882a593Smuzhiyun u32 percpu_offset;
203*4882a593Smuzhiyun void __iomem *base;
204*4882a593Smuzhiyun void __iomem *cpu0_base;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun base = of_iomap(np, 0);
207*4882a593Smuzhiyun if (!base) {
208*4882a593Smuzhiyun pr_err("Failed to map event base\n");
209*4882a593Smuzhiyun return -ENXIO;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* We use GPT0 for the clockevent */
213*4882a593Smuzhiyun irq = irq_of_parse_and_map(np, 1);
214*4882a593Smuzhiyun if (irq <= 0) {
215*4882a593Smuzhiyun pr_err("Can't get irq\n");
216*4882a593Smuzhiyun return -EINVAL;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* We use CPU0's DGT for the clocksource */
220*4882a593Smuzhiyun if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
221*4882a593Smuzhiyun percpu_offset = 0;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun ret = of_address_to_resource(np, 0, &res);
224*4882a593Smuzhiyun if (ret) {
225*4882a593Smuzhiyun pr_err("Failed to parse DGT resource\n");
226*4882a593Smuzhiyun return ret;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
230*4882a593Smuzhiyun if (!cpu0_base) {
231*4882a593Smuzhiyun pr_err("Failed to map source base\n");
232*4882a593Smuzhiyun return -EINVAL;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (of_property_read_u32(np, "clock-frequency", &freq)) {
236*4882a593Smuzhiyun pr_err("Unknown frequency\n");
237*4882a593Smuzhiyun return -EINVAL;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun event_base = base + 0x4;
241*4882a593Smuzhiyun sts_base = base + 0x88;
242*4882a593Smuzhiyun source_base = cpu0_base + 0x24;
243*4882a593Smuzhiyun freq /= 4;
244*4882a593Smuzhiyun writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun return msm_timer_init(freq, 32, irq, !!percpu_offset);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
249*4882a593Smuzhiyun TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
250