xref: /OK3568_Linux_fs/kernel/drivers/clocksource/timer-mp-csky.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/init.h>
5*4882a593Smuzhiyun #include <linux/interrupt.h>
6*4882a593Smuzhiyun #include <linux/sched_clock.h>
7*4882a593Smuzhiyun #include <linux/cpu.h>
8*4882a593Smuzhiyun #include <linux/of_irq.h>
9*4882a593Smuzhiyun #include <asm/reg_ops.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "timer-of.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define PTIM_CCVR	"cr<3, 14>"
14*4882a593Smuzhiyun #define PTIM_CTLR	"cr<0, 14>"
15*4882a593Smuzhiyun #define PTIM_LVR	"cr<6, 14>"
16*4882a593Smuzhiyun #define PTIM_TSR	"cr<1, 14>"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun static int csky_mptimer_irq;
19*4882a593Smuzhiyun 
csky_mptimer_set_next_event(unsigned long delta,struct clock_event_device * ce)20*4882a593Smuzhiyun static int csky_mptimer_set_next_event(unsigned long delta,
21*4882a593Smuzhiyun 				       struct clock_event_device *ce)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	mtcr(PTIM_LVR, delta);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	return 0;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
csky_mptimer_shutdown(struct clock_event_device * ce)28*4882a593Smuzhiyun static int csky_mptimer_shutdown(struct clock_event_device *ce)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	mtcr(PTIM_CTLR, 0);
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	return 0;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
csky_mptimer_oneshot(struct clock_event_device * ce)35*4882a593Smuzhiyun static int csky_mptimer_oneshot(struct clock_event_device *ce)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	mtcr(PTIM_CTLR, 1);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	return 0;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
csky_mptimer_oneshot_stopped(struct clock_event_device * ce)42*4882a593Smuzhiyun static int csky_mptimer_oneshot_stopped(struct clock_event_device *ce)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	mtcr(PTIM_CTLR, 0);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	return 0;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun static DEFINE_PER_CPU(struct timer_of, csky_to) = {
50*4882a593Smuzhiyun 	.flags					= TIMER_OF_CLOCK,
51*4882a593Smuzhiyun 	.clkevt = {
52*4882a593Smuzhiyun 		.rating				= 300,
53*4882a593Smuzhiyun 		.features			= CLOCK_EVT_FEAT_PERCPU |
54*4882a593Smuzhiyun 						  CLOCK_EVT_FEAT_ONESHOT,
55*4882a593Smuzhiyun 		.set_state_shutdown		= csky_mptimer_shutdown,
56*4882a593Smuzhiyun 		.set_state_oneshot		= csky_mptimer_oneshot,
57*4882a593Smuzhiyun 		.set_state_oneshot_stopped	= csky_mptimer_oneshot_stopped,
58*4882a593Smuzhiyun 		.set_next_event			= csky_mptimer_set_next_event,
59*4882a593Smuzhiyun 	},
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
csky_timer_interrupt(int irq,void * dev)62*4882a593Smuzhiyun static irqreturn_t csky_timer_interrupt(int irq, void *dev)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct timer_of *to = this_cpu_ptr(&csky_to);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	mtcr(PTIM_TSR, 0);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	to->clkevt.event_handler(&to->clkevt);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	return IRQ_HANDLED;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * clock event for percpu
75*4882a593Smuzhiyun  */
csky_mptimer_starting_cpu(unsigned int cpu)76*4882a593Smuzhiyun static int csky_mptimer_starting_cpu(unsigned int cpu)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	struct timer_of *to = per_cpu_ptr(&csky_to, cpu);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	to->clkevt.cpumask = cpumask_of(cpu);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	enable_percpu_irq(csky_mptimer_irq, 0);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
85*4882a593Smuzhiyun 					2, ULONG_MAX);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	return 0;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
csky_mptimer_dying_cpu(unsigned int cpu)90*4882a593Smuzhiyun static int csky_mptimer_dying_cpu(unsigned int cpu)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	disable_percpu_irq(csky_mptimer_irq);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	return 0;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun  * clock source
99*4882a593Smuzhiyun  */
sched_clock_read(void)100*4882a593Smuzhiyun static u64 notrace sched_clock_read(void)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	return (u64)mfcr(PTIM_CCVR);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
clksrc_read(struct clocksource * c)105*4882a593Smuzhiyun static u64 clksrc_read(struct clocksource *c)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	return (u64)mfcr(PTIM_CCVR);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun struct clocksource csky_clocksource = {
111*4882a593Smuzhiyun 	.name	= "csky",
112*4882a593Smuzhiyun 	.rating	= 400,
113*4882a593Smuzhiyun 	.mask	= CLOCKSOURCE_MASK(32),
114*4882a593Smuzhiyun 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
115*4882a593Smuzhiyun 	.read	= clksrc_read,
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun 
csky_mptimer_init(struct device_node * np)118*4882a593Smuzhiyun static int __init csky_mptimer_init(struct device_node *np)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	int ret, cpu, cpu_rollback;
121*4882a593Smuzhiyun 	struct timer_of *to = NULL;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/*
124*4882a593Smuzhiyun 	 * Csky_mptimer is designed for C-SKY SMP multi-processors and
125*4882a593Smuzhiyun 	 * every core has it's own private irq and regs for clkevt and
126*4882a593Smuzhiyun 	 * clksrc.
127*4882a593Smuzhiyun 	 *
128*4882a593Smuzhiyun 	 * The regs is accessed by cpu instruction: mfcr/mtcr instead of
129*4882a593Smuzhiyun 	 * mmio map style. So we needn't mmio-address in dts, but we still
130*4882a593Smuzhiyun 	 * need to give clk and irq number.
131*4882a593Smuzhiyun 	 *
132*4882a593Smuzhiyun 	 * We use private irq for the mptimer and irq number is the same
133*4882a593Smuzhiyun 	 * for every core. So we use request_percpu_irq() in timer_of_init.
134*4882a593Smuzhiyun 	 */
135*4882a593Smuzhiyun 	csky_mptimer_irq = irq_of_parse_and_map(np, 0);
136*4882a593Smuzhiyun 	if (csky_mptimer_irq <= 0)
137*4882a593Smuzhiyun 		return -EINVAL;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt,
140*4882a593Smuzhiyun 				 "csky_mp_timer", &csky_to);
141*4882a593Smuzhiyun 	if (ret)
142*4882a593Smuzhiyun 		return -EINVAL;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
145*4882a593Smuzhiyun 		to = per_cpu_ptr(&csky_to, cpu);
146*4882a593Smuzhiyun 		ret = timer_of_init(np, to);
147*4882a593Smuzhiyun 		if (ret)
148*4882a593Smuzhiyun 			goto rollback;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	clocksource_register_hz(&csky_clocksource, timer_of_rate(to));
152*4882a593Smuzhiyun 	sched_clock_register(sched_clock_read, 32, timer_of_rate(to));
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING,
155*4882a593Smuzhiyun 				"clockevents/csky/timer:starting",
156*4882a593Smuzhiyun 				csky_mptimer_starting_cpu,
157*4882a593Smuzhiyun 				csky_mptimer_dying_cpu);
158*4882a593Smuzhiyun 	if (ret)
159*4882a593Smuzhiyun 		return -EINVAL;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return 0;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun rollback:
164*4882a593Smuzhiyun 	for_each_possible_cpu(cpu_rollback) {
165*4882a593Smuzhiyun 		if (cpu_rollback == cpu)
166*4882a593Smuzhiyun 			break;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		to = per_cpu_ptr(&csky_to, cpu_rollback);
169*4882a593Smuzhiyun 		timer_of_cleanup(to);
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 	return -EINVAL;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun TIMER_OF_DECLARE(csky_mptimer, "csky,mptimer", csky_mptimer_init);
174