xref: /OK3568_Linux_fs/kernel/arch/mips/cavium-octeon/csrc-octeon.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2007 by Ralf Baechle
7*4882a593Smuzhiyun  * Copyright (C) 2009, 2012 Cavium, Inc.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #include <linux/clocksource.h>
10*4882a593Smuzhiyun #include <linux/sched/clock.h>
11*4882a593Smuzhiyun #include <linux/export.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/smp.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <asm/cpu-info.h>
16*4882a593Smuzhiyun #include <asm/cpu-type.h>
17*4882a593Smuzhiyun #include <asm/time.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/octeon/octeon.h>
20*4882a593Smuzhiyun #include <asm/octeon/cvmx-ipd-defs.h>
21*4882a593Smuzhiyun #include <asm/octeon/cvmx-mio-defs.h>
22*4882a593Smuzhiyun #include <asm/octeon/cvmx-rst-defs.h>
23*4882a593Smuzhiyun #include <asm/octeon/cvmx-fpa-defs.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun static u64 f;
26*4882a593Smuzhiyun static u64 rdiv;
27*4882a593Smuzhiyun static u64 sdiv;
28*4882a593Smuzhiyun static u64 octeon_udelay_factor;
29*4882a593Smuzhiyun static u64 octeon_ndelay_factor;
30*4882a593Smuzhiyun 
octeon_setup_delays(void)31*4882a593Smuzhiyun void __init octeon_setup_delays(void)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	octeon_udelay_factor = octeon_get_clock_rate() / 1000000;
34*4882a593Smuzhiyun 	/*
35*4882a593Smuzhiyun 	 * For __ndelay we divide by 2^16, so the factor is multiplied
36*4882a593Smuzhiyun 	 * by the same amount.
37*4882a593Smuzhiyun 	 */
38*4882a593Smuzhiyun 	octeon_ndelay_factor = (octeon_udelay_factor * 0x10000ull) / 1000ull;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	preset_lpj = octeon_get_clock_rate() / HZ;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
43*4882a593Smuzhiyun 		union cvmx_mio_rst_boot rst_boot;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 		rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
46*4882a593Smuzhiyun 		rdiv = rst_boot.s.c_mul;	/* CPU clock */
47*4882a593Smuzhiyun 		sdiv = rst_boot.s.pnr_mul;	/* I/O clock */
48*4882a593Smuzhiyun 		f = (0x8000000000000000ull / sdiv) * 2;
49*4882a593Smuzhiyun 	} else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) {
50*4882a593Smuzhiyun 		union cvmx_rst_boot rst_boot;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 		rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
53*4882a593Smuzhiyun 		rdiv = rst_boot.s.c_mul;	/* CPU clock */
54*4882a593Smuzhiyun 		sdiv = rst_boot.s.pnr_mul;	/* I/O clock */
55*4882a593Smuzhiyun 		f = (0x8000000000000000ull / sdiv) * 2;
56*4882a593Smuzhiyun 	}
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * Set the current core's cvmcount counter to the value of the
62*4882a593Smuzhiyun  * IPD_CLK_COUNT.  We do this on all cores as they are brought
63*4882a593Smuzhiyun  * on-line.  This allows for a read from a local cpu register to
64*4882a593Smuzhiyun  * access a synchronized counter.
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * On CPU_CAVIUM_OCTEON2 the IPD_CLK_COUNT is scaled by rdiv/sdiv.
67*4882a593Smuzhiyun  */
octeon_init_cvmcount(void)68*4882a593Smuzhiyun void octeon_init_cvmcount(void)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	u64 clk_reg;
71*4882a593Smuzhiyun 	unsigned long flags;
72*4882a593Smuzhiyun 	unsigned loops = 2;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	clk_reg = octeon_has_feature(OCTEON_FEATURE_FPA3) ?
75*4882a593Smuzhiyun 		CVMX_FPA_CLK_COUNT : CVMX_IPD_CLK_COUNT;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/* Clobber loops so GCC will not unroll the following while loop. */
78*4882a593Smuzhiyun 	asm("" : "+r" (loops));
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	local_irq_save(flags);
81*4882a593Smuzhiyun 	/*
82*4882a593Smuzhiyun 	 * Loop several times so we are executing from the cache,
83*4882a593Smuzhiyun 	 * which should give more deterministic timing.
84*4882a593Smuzhiyun 	 */
85*4882a593Smuzhiyun 	while (loops--) {
86*4882a593Smuzhiyun 		u64 clk_count = cvmx_read_csr(clk_reg);
87*4882a593Smuzhiyun 		if (rdiv != 0) {
88*4882a593Smuzhiyun 			clk_count *= rdiv;
89*4882a593Smuzhiyun 			if (f != 0) {
90*4882a593Smuzhiyun 				asm("dmultu\t%[cnt],%[f]\n\t"
91*4882a593Smuzhiyun 				    "mfhi\t%[cnt]"
92*4882a593Smuzhiyun 				    : [cnt] "+r" (clk_count)
93*4882a593Smuzhiyun 				    : [f] "r" (f)
94*4882a593Smuzhiyun 				    : "hi", "lo");
95*4882a593Smuzhiyun 			}
96*4882a593Smuzhiyun 		}
97*4882a593Smuzhiyun 		write_c0_cvmcount(clk_count);
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun 	local_irq_restore(flags);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
octeon_cvmcount_read(struct clocksource * cs)102*4882a593Smuzhiyun static u64 octeon_cvmcount_read(struct clocksource *cs)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	return read_c0_cvmcount();
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun static struct clocksource clocksource_mips = {
108*4882a593Smuzhiyun 	.name		= "OCTEON_CVMCOUNT",
109*4882a593Smuzhiyun 	.read		= octeon_cvmcount_read,
110*4882a593Smuzhiyun 	.mask		= CLOCKSOURCE_MASK(64),
111*4882a593Smuzhiyun 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
sched_clock(void)114*4882a593Smuzhiyun unsigned long long notrace sched_clock(void)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	/* 64-bit arithmatic can overflow, so use 128-bit.  */
117*4882a593Smuzhiyun 	u64 t1, t2, t3;
118*4882a593Smuzhiyun 	unsigned long long rv;
119*4882a593Smuzhiyun 	u64 mult = clocksource_mips.mult;
120*4882a593Smuzhiyun 	u64 shift = clocksource_mips.shift;
121*4882a593Smuzhiyun 	u64 cnt = read_c0_cvmcount();
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	asm (
124*4882a593Smuzhiyun 		"dmultu\t%[cnt],%[mult]\n\t"
125*4882a593Smuzhiyun 		"nor\t%[t1],$0,%[shift]\n\t"
126*4882a593Smuzhiyun 		"mfhi\t%[t2]\n\t"
127*4882a593Smuzhiyun 		"mflo\t%[t3]\n\t"
128*4882a593Smuzhiyun 		"dsll\t%[t2],%[t2],1\n\t"
129*4882a593Smuzhiyun 		"dsrlv\t%[rv],%[t3],%[shift]\n\t"
130*4882a593Smuzhiyun 		"dsllv\t%[t1],%[t2],%[t1]\n\t"
131*4882a593Smuzhiyun 		"or\t%[rv],%[t1],%[rv]\n\t"
132*4882a593Smuzhiyun 		: [rv] "=&r" (rv), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3)
133*4882a593Smuzhiyun 		: [cnt] "r" (cnt), [mult] "r" (mult), [shift] "r" (shift)
134*4882a593Smuzhiyun 		: "hi", "lo");
135*4882a593Smuzhiyun 	return rv;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
plat_time_init(void)138*4882a593Smuzhiyun void __init plat_time_init(void)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	clocksource_mips.rating = 300;
141*4882a593Smuzhiyun 	clocksource_register_hz(&clocksource_mips, octeon_get_clock_rate());
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
__udelay(unsigned long us)144*4882a593Smuzhiyun void __udelay(unsigned long us)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	u64 cur, end, inc;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	cur = read_c0_cvmcount();
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	inc = us * octeon_udelay_factor;
151*4882a593Smuzhiyun 	end = cur + inc;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	while (end > cur)
154*4882a593Smuzhiyun 		cur = read_c0_cvmcount();
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun EXPORT_SYMBOL(__udelay);
157*4882a593Smuzhiyun 
__ndelay(unsigned long ns)158*4882a593Smuzhiyun void __ndelay(unsigned long ns)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	u64 cur, end, inc;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	cur = read_c0_cvmcount();
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	inc = ((ns * octeon_ndelay_factor) >> 16);
165*4882a593Smuzhiyun 	end = cur + inc;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	while (end > cur)
168*4882a593Smuzhiyun 		cur = read_c0_cvmcount();
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun EXPORT_SYMBOL(__ndelay);
171*4882a593Smuzhiyun 
__delay(unsigned long loops)172*4882a593Smuzhiyun void __delay(unsigned long loops)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	u64 cur, end;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	cur = read_c0_cvmcount();
177*4882a593Smuzhiyun 	end = cur + loops;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	while (end > cur)
180*4882a593Smuzhiyun 		cur = read_c0_cvmcount();
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun EXPORT_SYMBOL(__delay);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun /**
186*4882a593Smuzhiyun  * octeon_io_clk_delay - wait for a given number of io clock cycles to pass.
187*4882a593Smuzhiyun  *
188*4882a593Smuzhiyun  * We scale the wait by the clock ratio, and then wait for the
189*4882a593Smuzhiyun  * corresponding number of core clocks.
190*4882a593Smuzhiyun  *
191*4882a593Smuzhiyun  * @count: The number of clocks to wait.
192*4882a593Smuzhiyun  */
octeon_io_clk_delay(unsigned long count)193*4882a593Smuzhiyun void octeon_io_clk_delay(unsigned long count)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	u64 cur, end;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	cur = read_c0_cvmcount();
198*4882a593Smuzhiyun 	if (rdiv != 0) {
199*4882a593Smuzhiyun 		end = count * rdiv;
200*4882a593Smuzhiyun 		if (f != 0) {
201*4882a593Smuzhiyun 			asm("dmultu\t%[cnt],%[f]\n\t"
202*4882a593Smuzhiyun 				"mfhi\t%[cnt]"
203*4882a593Smuzhiyun 				: [cnt] "+r" (end)
204*4882a593Smuzhiyun 				: [f] "r" (f)
205*4882a593Smuzhiyun 				: "hi", "lo");
206*4882a593Smuzhiyun 		}
207*4882a593Smuzhiyun 		end = cur + end;
208*4882a593Smuzhiyun 	} else {
209*4882a593Smuzhiyun 		end = cur + count;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 	while (end > cur)
212*4882a593Smuzhiyun 		cur = read_c0_cvmcount();
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun EXPORT_SYMBOL(octeon_io_clk_delay);
215