1*4882a593Smuzhiyun /***********************license start***************
2*4882a593Smuzhiyun * Author: Cavium Networks
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Contact: support@caviumnetworks.com
5*4882a593Smuzhiyun * This file is part of the OCTEON SDK
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (c) 2003-2017 Cavium, Inc.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This file is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun * it under the terms of the GNU General Public License, Version 2, as
11*4882a593Smuzhiyun * published by the Free Software Foundation.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This file is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15*4882a593Smuzhiyun * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16*4882a593Smuzhiyun * NONINFRINGEMENT. See the GNU General Public License for more
17*4882a593Smuzhiyun * details.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
20*4882a593Smuzhiyun * along with this file; if not, write to the Free Software
21*4882a593Smuzhiyun * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22*4882a593Smuzhiyun * or visit http://www.gnu.org/licenses/.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * This file may also be available under a different license from Cavium.
25*4882a593Smuzhiyun * Contact Cavium Networks for more information
26*4882a593Smuzhiyun ***********************license end**************************************/
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #ifndef __CVMX_H__
29*4882a593Smuzhiyun #define __CVMX_H__
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <linux/kernel.h>
32*4882a593Smuzhiyun #include <linux/string.h>
33*4882a593Smuzhiyun #include <linux/delay.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun enum cvmx_mips_space {
36*4882a593Smuzhiyun CVMX_MIPS_SPACE_XKSEG = 3LL,
37*4882a593Smuzhiyun CVMX_MIPS_SPACE_XKPHYS = 2LL,
38*4882a593Smuzhiyun CVMX_MIPS_SPACE_XSSEG = 1LL,
39*4882a593Smuzhiyun CVMX_MIPS_SPACE_XUSEG = 0LL
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* These macros for use when using 32 bit pointers. */
43*4882a593Smuzhiyun #define CVMX_MIPS32_SPACE_KSEG0 1l
44*4882a593Smuzhiyun #define CVMX_ADD_SEG32(segment, add) \
45*4882a593Smuzhiyun (((int32_t)segment << 31) | (int32_t)(add))
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* These macros simplify the process of creating common IO addresses */
50*4882a593Smuzhiyun #define CVMX_ADD_SEG(segment, add) \
51*4882a593Smuzhiyun ((((uint64_t)segment) << 62) | (add))
52*4882a593Smuzhiyun #ifndef CVMX_ADD_IO_SEG
53*4882a593Smuzhiyun #define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #include <asm/octeon/cvmx-asm.h>
57*4882a593Smuzhiyun #include <asm/octeon/cvmx-packet.h>
58*4882a593Smuzhiyun #include <asm/octeon/cvmx-sysinfo.h>
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #include <asm/octeon/cvmx-ciu-defs.h>
61*4882a593Smuzhiyun #include <asm/octeon/cvmx-ciu3-defs.h>
62*4882a593Smuzhiyun #include <asm/octeon/cvmx-gpio-defs.h>
63*4882a593Smuzhiyun #include <asm/octeon/cvmx-iob-defs.h>
64*4882a593Smuzhiyun #include <asm/octeon/cvmx-ipd-defs.h>
65*4882a593Smuzhiyun #include <asm/octeon/cvmx-l2c-defs.h>
66*4882a593Smuzhiyun #include <asm/octeon/cvmx-l2d-defs.h>
67*4882a593Smuzhiyun #include <asm/octeon/cvmx-l2t-defs.h>
68*4882a593Smuzhiyun #include <asm/octeon/cvmx-led-defs.h>
69*4882a593Smuzhiyun #include <asm/octeon/cvmx-mio-defs.h>
70*4882a593Smuzhiyun #include <asm/octeon/cvmx-pow-defs.h>
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #include <asm/octeon/cvmx-bootinfo.h>
73*4882a593Smuzhiyun #include <asm/octeon/cvmx-bootmem.h>
74*4882a593Smuzhiyun #include <asm/octeon/cvmx-l2c.h>
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #ifndef CVMX_ENABLE_DEBUG_PRINTS
77*4882a593Smuzhiyun #define CVMX_ENABLE_DEBUG_PRINTS 1
78*4882a593Smuzhiyun #endif
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #if CVMX_ENABLE_DEBUG_PRINTS
81*4882a593Smuzhiyun #define cvmx_dprintf printk
82*4882a593Smuzhiyun #else
83*4882a593Smuzhiyun #define cvmx_dprintf(...) {}
84*4882a593Smuzhiyun #endif
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #define CVMX_MAX_CORES (16)
87*4882a593Smuzhiyun #define CVMX_CACHE_LINE_SIZE (128) /* In bytes */
88*4882a593Smuzhiyun #define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) /* In bytes */
89*4882a593Smuzhiyun #define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned(CVMX_CACHE_LINE_SIZE)))
90*4882a593Smuzhiyun #define CAST64(v) ((long long)(long)(v))
91*4882a593Smuzhiyun #define CASTPTR(type, v) ((type *)(long)(v))
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * Returns processor ID, different Linux and simple exec versions
95*4882a593Smuzhiyun * provided in the cvmx-app-init*.c files.
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
cvmx_get_proc_id(void)98*4882a593Smuzhiyun static inline uint32_t cvmx_get_proc_id(void)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun uint32_t id;
101*4882a593Smuzhiyun asm("mfc0 %0, $15,0" : "=r"(id));
102*4882a593Smuzhiyun return id;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* turn the variable name into a string */
106*4882a593Smuzhiyun #define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
107*4882a593Smuzhiyun #define CVMX_TMP_STR2(x) #x
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun * Builds a bit mask given the required size in bits.
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * @bits: Number of bits in the mask
113*4882a593Smuzhiyun * Returns The mask
cvmx_build_mask(uint64_t bits)114*4882a593Smuzhiyun */ static inline uint64_t cvmx_build_mask(uint64_t bits)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun return ~((~0x0ull) << bits);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun * Builds a memory address for I/O based on the Major and Sub DID.
121*4882a593Smuzhiyun *
122*4882a593Smuzhiyun * @major_did: 5 bit major did
123*4882a593Smuzhiyun * @sub_did: 3 bit sub did
124*4882a593Smuzhiyun * Returns I/O base address
125*4882a593Smuzhiyun */
cvmx_build_io_address(uint64_t major_did,uint64_t sub_did)126*4882a593Smuzhiyun static inline uint64_t cvmx_build_io_address(uint64_t major_did,
127*4882a593Smuzhiyun uint64_t sub_did)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun return (0x1ull << 48) | (major_did << 43) | (sub_did << 40);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /**
133*4882a593Smuzhiyun * Perform mask and shift to place the supplied value into
134*4882a593Smuzhiyun * the supplied bit rage.
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * Example: cvmx_build_bits(39,24,value)
137*4882a593Smuzhiyun * <pre>
138*4882a593Smuzhiyun * 6 5 4 3 3 2 1
139*4882a593Smuzhiyun * 3 5 7 9 1 3 5 7 0
140*4882a593Smuzhiyun * +-------+-------+-------+-------+-------+-------+-------+------+
141*4882a593Smuzhiyun * 000000000000000000000000___________value000000000000000000000000
142*4882a593Smuzhiyun * </pre>
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * @high_bit: Highest bit value can occupy (inclusive) 0-63
145*4882a593Smuzhiyun * @low_bit: Lowest bit value can occupy inclusive 0-high_bit
146*4882a593Smuzhiyun * @value: Value to use
147*4882a593Smuzhiyun * Returns Value masked and shifted
148*4882a593Smuzhiyun */
cvmx_build_bits(uint64_t high_bit,uint64_t low_bit,uint64_t value)149*4882a593Smuzhiyun static inline uint64_t cvmx_build_bits(uint64_t high_bit,
150*4882a593Smuzhiyun uint64_t low_bit, uint64_t value)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return (value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun * Convert a memory pointer (void*) into a hardware compatible
157*4882a593Smuzhiyun * memory address (uint64_t). Octeon hardware widgets don't
158*4882a593Smuzhiyun * understand logical addresses.
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * @ptr: C style memory pointer
161*4882a593Smuzhiyun * Returns Hardware physical address
162*4882a593Smuzhiyun */
cvmx_ptr_to_phys(void * ptr)163*4882a593Smuzhiyun static inline uint64_t cvmx_ptr_to_phys(void *ptr)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun if (sizeof(void *) == 8) {
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * We're running in 64 bit mode. Normally this means
168*4882a593Smuzhiyun * that we can use 40 bits of address space (the
169*4882a593Smuzhiyun * hardware limit). Unfortunately there is one case
170*4882a593Smuzhiyun * were we need to limit this to 30 bits, sign
171*4882a593Smuzhiyun * extended 32 bit. Although these are 64 bits wide,
172*4882a593Smuzhiyun * only 30 bits can be used.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun if ((CAST64(ptr) >> 62) == 3)
175*4882a593Smuzhiyun return CAST64(ptr) & cvmx_build_mask(30);
176*4882a593Smuzhiyun else
177*4882a593Smuzhiyun return CAST64(ptr) & cvmx_build_mask(40);
178*4882a593Smuzhiyun } else {
179*4882a593Smuzhiyun return (long)(ptr) & 0x1fffffff;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /**
184*4882a593Smuzhiyun * Convert a hardware physical address (uint64_t) into a
185*4882a593Smuzhiyun * memory pointer (void *).
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * @physical_address:
188*4882a593Smuzhiyun * Hardware physical address to memory
189*4882a593Smuzhiyun * Returns Pointer to memory
190*4882a593Smuzhiyun */
cvmx_phys_to_ptr(uint64_t physical_address)191*4882a593Smuzhiyun static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun if (sizeof(void *) == 8) {
194*4882a593Smuzhiyun /* Just set the top bit, avoiding any TLB ugliness */
195*4882a593Smuzhiyun return CASTPTR(void,
196*4882a593Smuzhiyun CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
197*4882a593Smuzhiyun physical_address));
198*4882a593Smuzhiyun } else {
199*4882a593Smuzhiyun return CASTPTR(void,
200*4882a593Smuzhiyun CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,
201*4882a593Smuzhiyun physical_address));
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /* The following #if controls the definition of the macro
206*4882a593Smuzhiyun CVMX_BUILD_WRITE64. This macro is used to build a store operation to
207*4882a593Smuzhiyun a full 64bit address. With a 64bit ABI, this can be done with a simple
208*4882a593Smuzhiyun pointer access. 32bit ABIs require more complicated assembly */
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* We have a full 64bit ABI. Writing to a 64bit address can be done with
211*4882a593Smuzhiyun a simple volatile pointer */
212*4882a593Smuzhiyun #define CVMX_BUILD_WRITE64(TYPE, ST) \
213*4882a593Smuzhiyun static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
214*4882a593Smuzhiyun { \
215*4882a593Smuzhiyun *CASTPTR(volatile TYPE##_t, addr) = val; \
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* The following #if controls the definition of the macro
220*4882a593Smuzhiyun CVMX_BUILD_READ64. This macro is used to build a load operation from
221*4882a593Smuzhiyun a full 64bit address. With a 64bit ABI, this can be done with a simple
222*4882a593Smuzhiyun pointer access. 32bit ABIs require more complicated assembly */
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* We have a full 64bit ABI. Writing to a 64bit address can be done with
225*4882a593Smuzhiyun a simple volatile pointer */
226*4882a593Smuzhiyun #define CVMX_BUILD_READ64(TYPE, LT) \
227*4882a593Smuzhiyun static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
228*4882a593Smuzhiyun { \
229*4882a593Smuzhiyun return *CASTPTR(volatile TYPE##_t, addr); \
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* The following defines 8 functions for writing to a 64bit address. Each
234*4882a593Smuzhiyun takes two arguments, the address and the value to write.
235*4882a593Smuzhiyun cvmx_write64_int64 cvmx_write64_uint64
236*4882a593Smuzhiyun cvmx_write64_int32 cvmx_write64_uint32
237*4882a593Smuzhiyun cvmx_write64_int16 cvmx_write64_uint16
238*4882a593Smuzhiyun cvmx_write64_int8 cvmx_write64_uint8 */
239*4882a593Smuzhiyun CVMX_BUILD_WRITE64(int64, "sd");
240*4882a593Smuzhiyun CVMX_BUILD_WRITE64(int32, "sw");
241*4882a593Smuzhiyun CVMX_BUILD_WRITE64(int16, "sh");
242*4882a593Smuzhiyun CVMX_BUILD_WRITE64(int8, "sb");
243*4882a593Smuzhiyun CVMX_BUILD_WRITE64(uint64, "sd");
244*4882a593Smuzhiyun CVMX_BUILD_WRITE64(uint32, "sw");
245*4882a593Smuzhiyun CVMX_BUILD_WRITE64(uint16, "sh");
246*4882a593Smuzhiyun CVMX_BUILD_WRITE64(uint8, "sb");
247*4882a593Smuzhiyun #define cvmx_write64 cvmx_write64_uint64
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* The following defines 8 functions for reading from a 64bit address. Each
250*4882a593Smuzhiyun takes the address as the only argument
251*4882a593Smuzhiyun cvmx_read64_int64 cvmx_read64_uint64
252*4882a593Smuzhiyun cvmx_read64_int32 cvmx_read64_uint32
253*4882a593Smuzhiyun cvmx_read64_int16 cvmx_read64_uint16
254*4882a593Smuzhiyun cvmx_read64_int8 cvmx_read64_uint8 */
255*4882a593Smuzhiyun CVMX_BUILD_READ64(int64, "ld");
256*4882a593Smuzhiyun CVMX_BUILD_READ64(int32, "lw");
257*4882a593Smuzhiyun CVMX_BUILD_READ64(int16, "lh");
258*4882a593Smuzhiyun CVMX_BUILD_READ64(int8, "lb");
259*4882a593Smuzhiyun CVMX_BUILD_READ64(uint64, "ld");
260*4882a593Smuzhiyun CVMX_BUILD_READ64(uint32, "lw");
261*4882a593Smuzhiyun CVMX_BUILD_READ64(uint16, "lhu");
262*4882a593Smuzhiyun CVMX_BUILD_READ64(uint8, "lbu");
263*4882a593Smuzhiyun #define cvmx_read64 cvmx_read64_uint64
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun
cvmx_write_csr(uint64_t csr_addr,uint64_t val)266*4882a593Smuzhiyun static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun cvmx_write64(csr_addr, val);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun * Perform an immediate read after every write to an RSL
272*4882a593Smuzhiyun * register to force the write to complete. It doesn't matter
273*4882a593Smuzhiyun * what RSL read we do, so we choose CVMX_MIO_BOOT_BIST_STAT
274*4882a593Smuzhiyun * because it is fast and harmless.
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun if (((csr_addr >> 40) & 0x7ffff) == (0x118))
277*4882a593Smuzhiyun cvmx_read64(CVMX_MIO_BOOT_BIST_STAT);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
cvmx_writeq_csr(void __iomem * csr_addr,uint64_t val)280*4882a593Smuzhiyun static inline void cvmx_writeq_csr(void __iomem *csr_addr, uint64_t val)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun cvmx_write_csr((__force uint64_t)csr_addr, val);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
cvmx_write_io(uint64_t io_addr,uint64_t val)285*4882a593Smuzhiyun static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun cvmx_write64(io_addr, val);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
cvmx_read_csr(uint64_t csr_addr)291*4882a593Smuzhiyun static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun uint64_t val = cvmx_read64(csr_addr);
294*4882a593Smuzhiyun return val;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
cvmx_readq_csr(void __iomem * csr_addr)297*4882a593Smuzhiyun static inline uint64_t cvmx_readq_csr(void __iomem *csr_addr)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun return cvmx_read_csr((__force uint64_t) csr_addr);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
cvmx_send_single(uint64_t data)302*4882a593Smuzhiyun static inline void cvmx_send_single(uint64_t data)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
305*4882a593Smuzhiyun cvmx_write64(CVMX_IOBDMA_SENDSINGLE, data);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
cvmx_read_csr_async(uint64_t scraddr,uint64_t csr_addr)308*4882a593Smuzhiyun static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun union {
311*4882a593Smuzhiyun uint64_t u64;
312*4882a593Smuzhiyun struct {
313*4882a593Smuzhiyun uint64_t scraddr:8;
314*4882a593Smuzhiyun uint64_t len:8;
315*4882a593Smuzhiyun uint64_t addr:48;
316*4882a593Smuzhiyun } s;
317*4882a593Smuzhiyun } addr;
318*4882a593Smuzhiyun addr.u64 = csr_addr;
319*4882a593Smuzhiyun addr.s.scraddr = scraddr >> 3;
320*4882a593Smuzhiyun addr.s.len = 1;
321*4882a593Smuzhiyun cvmx_send_single(addr.u64);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* Return true if Octeon is CN38XX pass 1 */
cvmx_octeon_is_pass1(void)325*4882a593Smuzhiyun static inline int cvmx_octeon_is_pass1(void)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun #if OCTEON_IS_COMMON_BINARY()
328*4882a593Smuzhiyun return 0; /* Pass 1 isn't supported for common binaries */
329*4882a593Smuzhiyun #else
330*4882a593Smuzhiyun /* Now that we know we're built for a specific model, only check CN38XX */
331*4882a593Smuzhiyun #if OCTEON_IS_MODEL(OCTEON_CN38XX)
332*4882a593Smuzhiyun return cvmx_get_proc_id() == OCTEON_CN38XX_PASS1;
333*4882a593Smuzhiyun #else
334*4882a593Smuzhiyun return 0; /* Built for non CN38XX chip, we're not CN38XX pass1 */
335*4882a593Smuzhiyun #endif
336*4882a593Smuzhiyun #endif
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
cvmx_get_core_num(void)339*4882a593Smuzhiyun static inline unsigned int cvmx_get_core_num(void)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun unsigned int core_num;
342*4882a593Smuzhiyun CVMX_RDHWRNV(core_num, 0);
343*4882a593Smuzhiyun return core_num;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* Maximum # of bits to define core in node */
347*4882a593Smuzhiyun #define CVMX_NODE_NO_SHIFT 7
348*4882a593Smuzhiyun #define CVMX_NODE_MASK 0x3
cvmx_get_node_num(void)349*4882a593Smuzhiyun static inline unsigned int cvmx_get_node_num(void)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun unsigned int core_num = cvmx_get_core_num();
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun return (core_num >> CVMX_NODE_NO_SHIFT) & CVMX_NODE_MASK;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
cvmx_get_local_core_num(void)356*4882a593Smuzhiyun static inline unsigned int cvmx_get_local_core_num(void)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun return cvmx_get_core_num() & ((1 << CVMX_NODE_NO_SHIFT) - 1);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun #define CVMX_NODE_BITS (2) /* Number of bits to define a node */
362*4882a593Smuzhiyun #define CVMX_MAX_NODES (1 << CVMX_NODE_BITS)
363*4882a593Smuzhiyun #define CVMX_NODE_IO_SHIFT (36)
364*4882a593Smuzhiyun #define CVMX_NODE_MEM_SHIFT (40)
365*4882a593Smuzhiyun #define CVMX_NODE_IO_MASK ((uint64_t)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT)
366*4882a593Smuzhiyun
cvmx_write_csr_node(uint64_t node,uint64_t csr_addr,uint64_t val)367*4882a593Smuzhiyun static inline void cvmx_write_csr_node(uint64_t node, uint64_t csr_addr,
368*4882a593Smuzhiyun uint64_t val)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun uint64_t composite_csr_addr, node_addr;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun node_addr = (node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT;
373*4882a593Smuzhiyun composite_csr_addr = (csr_addr & ~CVMX_NODE_IO_MASK) | node_addr;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun cvmx_write64_uint64(composite_csr_addr, val);
376*4882a593Smuzhiyun if (((csr_addr >> 40) & 0x7ffff) == (0x118))
377*4882a593Smuzhiyun cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT | node_addr);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
cvmx_read_csr_node(uint64_t node,uint64_t csr_addr)380*4882a593Smuzhiyun static inline uint64_t cvmx_read_csr_node(uint64_t node, uint64_t csr_addr)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun uint64_t node_addr;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun node_addr = (csr_addr & ~CVMX_NODE_IO_MASK) |
385*4882a593Smuzhiyun (node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT;
386*4882a593Smuzhiyun return cvmx_read_csr(node_addr);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /**
390*4882a593Smuzhiyun * Returns the number of bits set in the provided value.
391*4882a593Smuzhiyun * Simple wrapper for POP instruction.
392*4882a593Smuzhiyun *
393*4882a593Smuzhiyun * @val: 32 bit value to count set bits in
394*4882a593Smuzhiyun *
395*4882a593Smuzhiyun * Returns Number of bits set
396*4882a593Smuzhiyun */
cvmx_pop(uint32_t val)397*4882a593Smuzhiyun static inline uint32_t cvmx_pop(uint32_t val)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun uint32_t pop;
400*4882a593Smuzhiyun CVMX_POP(pop, val);
401*4882a593Smuzhiyun return pop;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /**
405*4882a593Smuzhiyun * Returns the number of bits set in the provided value.
406*4882a593Smuzhiyun * Simple wrapper for DPOP instruction.
407*4882a593Smuzhiyun *
408*4882a593Smuzhiyun * @val: 64 bit value to count set bits in
409*4882a593Smuzhiyun *
410*4882a593Smuzhiyun * Returns Number of bits set
411*4882a593Smuzhiyun */
cvmx_dpop(uint64_t val)412*4882a593Smuzhiyun static inline int cvmx_dpop(uint64_t val)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun int pop;
415*4882a593Smuzhiyun CVMX_DPOP(pop, val);
416*4882a593Smuzhiyun return pop;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun * Provide current cycle counter as a return value
421*4882a593Smuzhiyun *
422*4882a593Smuzhiyun * Returns current cycle counter
423*4882a593Smuzhiyun */
424*4882a593Smuzhiyun
cvmx_get_cycle(void)425*4882a593Smuzhiyun static inline uint64_t cvmx_get_cycle(void)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun uint64_t cycle;
428*4882a593Smuzhiyun CVMX_RDHWR(cycle, 31);
429*4882a593Smuzhiyun return cycle;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /**
433*4882a593Smuzhiyun * Reads a chip global cycle counter. This counts CPU cycles since
434*4882a593Smuzhiyun * chip reset. The counter is 64 bit.
435*4882a593Smuzhiyun * This register does not exist on CN38XX pass 1 silicion
436*4882a593Smuzhiyun *
437*4882a593Smuzhiyun * Returns Global chip cycle count since chip reset.
438*4882a593Smuzhiyun */
cvmx_get_cycle_global(void)439*4882a593Smuzhiyun static inline uint64_t cvmx_get_cycle_global(void)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun if (cvmx_octeon_is_pass1())
442*4882a593Smuzhiyun return 0;
443*4882a593Smuzhiyun else
444*4882a593Smuzhiyun return cvmx_read64(CVMX_IPD_CLK_COUNT);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /**
448*4882a593Smuzhiyun * This macro spins on a field waiting for it to reach a value. It
449*4882a593Smuzhiyun * is common in code to need to wait for a specific field in a CSR
450*4882a593Smuzhiyun * to match a specific value. Conceptually this macro expands to:
451*4882a593Smuzhiyun *
452*4882a593Smuzhiyun * 1) read csr at "address" with a csr typedef of "type"
453*4882a593Smuzhiyun * 2) Check if ("type".s."field" "op" "value")
454*4882a593Smuzhiyun * 3) If #2 isn't true loop to #1 unless too much time has passed.
455*4882a593Smuzhiyun */
456*4882a593Smuzhiyun #define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, timeout_usec)\
457*4882a593Smuzhiyun ( \
458*4882a593Smuzhiyun { \
459*4882a593Smuzhiyun int result; \
460*4882a593Smuzhiyun do { \
461*4882a593Smuzhiyun uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
462*4882a593Smuzhiyun cvmx_sysinfo_get()->cpu_clock_hz / 1000000; \
463*4882a593Smuzhiyun type c; \
464*4882a593Smuzhiyun while (1) { \
465*4882a593Smuzhiyun c.u64 = cvmx_read_csr(address); \
466*4882a593Smuzhiyun if ((c.s.field) op(value)) { \
467*4882a593Smuzhiyun result = 0; \
468*4882a593Smuzhiyun break; \
469*4882a593Smuzhiyun } else if (cvmx_get_cycle() > done) { \
470*4882a593Smuzhiyun result = -1; \
471*4882a593Smuzhiyun break; \
472*4882a593Smuzhiyun } else \
473*4882a593Smuzhiyun __delay(100); \
474*4882a593Smuzhiyun } \
475*4882a593Smuzhiyun } while (0); \
476*4882a593Smuzhiyun result; \
477*4882a593Smuzhiyun })
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /***************************************************************************/
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /* Return the number of cores available in the chip */
cvmx_octeon_num_cores(void)482*4882a593Smuzhiyun static inline uint32_t cvmx_octeon_num_cores(void)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun u64 ciu_fuse_reg;
485*4882a593Smuzhiyun u64 ciu_fuse;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX))
488*4882a593Smuzhiyun ciu_fuse_reg = CVMX_CIU3_FUSE;
489*4882a593Smuzhiyun else
490*4882a593Smuzhiyun ciu_fuse_reg = CVMX_CIU_FUSE;
491*4882a593Smuzhiyun ciu_fuse = cvmx_read_csr(ciu_fuse_reg);
492*4882a593Smuzhiyun return cvmx_dpop(ciu_fuse);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun #endif /* __CVMX_H__ */
496