xref: /OK3568_Linux_fs/kernel/arch/mips/include/asm/octeon/cvmx-fau.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /***********************license start***************
2*4882a593Smuzhiyun  * Author: Cavium Networks
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Contact: support@caviumnetworks.com
5*4882a593Smuzhiyun  * This file is part of the OCTEON SDK
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (c) 2003-2008 Cavium Networks
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This file is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun  * it under the terms of the GNU General Public License, Version 2, as
11*4882a593Smuzhiyun  * published by the Free Software Foundation.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * This file is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15*4882a593Smuzhiyun  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16*4882a593Smuzhiyun  * NONINFRINGEMENT.  See the GNU General Public License for more
17*4882a593Smuzhiyun  * details.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * You should have received a copy of the GNU General Public License
20*4882a593Smuzhiyun  * along with this file; if not, write to the Free Software
21*4882a593Smuzhiyun  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22*4882a593Smuzhiyun  * or visit http://www.gnu.org/licenses/.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * This file may also be available under a different license from Cavium.
25*4882a593Smuzhiyun  * Contact Cavium Networks for more information
26*4882a593Smuzhiyun  ***********************license end**************************************/
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun  * Interface to the hardware Fetch and Add Unit.
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #ifndef __CVMX_FAU_H__
33*4882a593Smuzhiyun #define __CVMX_FAU_H__
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun  * Octeon Fetch and Add Unit (FAU)
37*4882a593Smuzhiyun  */
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define CVMX_FAU_LOAD_IO_ADDRESS    cvmx_build_io_address(0x1e, 0)
40*4882a593Smuzhiyun #define CVMX_FAU_BITS_SCRADDR	    63, 56
41*4882a593Smuzhiyun #define CVMX_FAU_BITS_LEN	    55, 48
42*4882a593Smuzhiyun #define CVMX_FAU_BITS_INEVAL	    35, 14
43*4882a593Smuzhiyun #define CVMX_FAU_BITS_TAGWAIT	    13, 13
44*4882a593Smuzhiyun #define CVMX_FAU_BITS_NOADD	    13, 13
45*4882a593Smuzhiyun #define CVMX_FAU_BITS_SIZE	    12, 11
46*4882a593Smuzhiyun #define CVMX_FAU_BITS_REGISTER	    10, 0
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun typedef enum {
49*4882a593Smuzhiyun 	CVMX_FAU_OP_SIZE_8 = 0,
50*4882a593Smuzhiyun 	CVMX_FAU_OP_SIZE_16 = 1,
51*4882a593Smuzhiyun 	CVMX_FAU_OP_SIZE_32 = 2,
52*4882a593Smuzhiyun 	CVMX_FAU_OP_SIZE_64 = 3
53*4882a593Smuzhiyun } cvmx_fau_op_size_t;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /**
56*4882a593Smuzhiyun  * Tagwait return definition. If a timeout occurs, the error
57*4882a593Smuzhiyun  * bit will be set. Otherwise the value of the register before
58*4882a593Smuzhiyun  * the update will be returned.
59*4882a593Smuzhiyun  */
60*4882a593Smuzhiyun typedef struct {
61*4882a593Smuzhiyun 	uint64_t error:1;
62*4882a593Smuzhiyun 	int64_t value:63;
63*4882a593Smuzhiyun } cvmx_fau_tagwait64_t;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun  * Tagwait return definition. If a timeout occurs, the error
67*4882a593Smuzhiyun  * bit will be set. Otherwise the value of the register before
68*4882a593Smuzhiyun  * the update will be returned.
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun typedef struct {
71*4882a593Smuzhiyun 	uint64_t error:1;
72*4882a593Smuzhiyun 	int32_t value:31;
73*4882a593Smuzhiyun } cvmx_fau_tagwait32_t;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /**
76*4882a593Smuzhiyun  * Tagwait return definition. If a timeout occurs, the error
77*4882a593Smuzhiyun  * bit will be set. Otherwise the value of the register before
78*4882a593Smuzhiyun  * the update will be returned.
79*4882a593Smuzhiyun  */
80*4882a593Smuzhiyun typedef struct {
81*4882a593Smuzhiyun 	uint64_t error:1;
82*4882a593Smuzhiyun 	int16_t value:15;
83*4882a593Smuzhiyun } cvmx_fau_tagwait16_t;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun /**
86*4882a593Smuzhiyun  * Tagwait return definition. If a timeout occurs, the error
87*4882a593Smuzhiyun  * bit will be set. Otherwise the value of the register before
88*4882a593Smuzhiyun  * the update will be returned.
89*4882a593Smuzhiyun  */
90*4882a593Smuzhiyun typedef struct {
91*4882a593Smuzhiyun 	uint64_t error:1;
92*4882a593Smuzhiyun 	int8_t value:7;
93*4882a593Smuzhiyun } cvmx_fau_tagwait8_t;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun  * Asynchronous tagwait return definition. If a timeout occurs,
97*4882a593Smuzhiyun  * the error bit will be set. Otherwise the value of the
98*4882a593Smuzhiyun  * register before the update will be returned.
99*4882a593Smuzhiyun  */
100*4882a593Smuzhiyun typedef union {
101*4882a593Smuzhiyun 	uint64_t u64;
102*4882a593Smuzhiyun 	struct {
103*4882a593Smuzhiyun 		uint64_t invalid:1;
104*4882a593Smuzhiyun 		uint64_t data:63;	/* unpredictable if invalid is set */
105*4882a593Smuzhiyun 	} s;
106*4882a593Smuzhiyun } cvmx_fau_async_tagwait_result_t;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun #ifdef __BIG_ENDIAN_BITFIELD
109*4882a593Smuzhiyun #define SWIZZLE_8  0
110*4882a593Smuzhiyun #define SWIZZLE_16 0
111*4882a593Smuzhiyun #define SWIZZLE_32 0
112*4882a593Smuzhiyun #else
113*4882a593Smuzhiyun #define SWIZZLE_8  0x7
114*4882a593Smuzhiyun #define SWIZZLE_16 0x6
115*4882a593Smuzhiyun #define SWIZZLE_32 0x4
116*4882a593Smuzhiyun #endif
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /**
119*4882a593Smuzhiyun  * Builds a store I/O address for writing to the FAU
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * @noadd:  0 = Store value is atomically added to the current value
122*4882a593Smuzhiyun  *		 1 = Store value is atomically written over the current value
123*4882a593Smuzhiyun  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
124*4882a593Smuzhiyun  *		 - Step by 2 for 16 bit access.
125*4882a593Smuzhiyun  *		 - Step by 4 for 32 bit access.
126*4882a593Smuzhiyun  *		 - Step by 8 for 64 bit access.
127*4882a593Smuzhiyun  * Returns Address to store for atomic update
128*4882a593Smuzhiyun  */
__cvmx_fau_store_address(uint64_t noadd,uint64_t reg)129*4882a593Smuzhiyun static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
132*4882a593Smuzhiyun 	       cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
133*4882a593Smuzhiyun 	       cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /**
137*4882a593Smuzhiyun  * Builds a I/O address for accessing the FAU
138*4882a593Smuzhiyun  *
139*4882a593Smuzhiyun  * @tagwait: Should the atomic add wait for the current tag switch
140*4882a593Smuzhiyun  *		  operation to complete.
141*4882a593Smuzhiyun  *		  - 0 = Don't wait
142*4882a593Smuzhiyun  *		  - 1 = Wait for tag switch to complete
143*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
144*4882a593Smuzhiyun  *		  - Step by 2 for 16 bit access.
145*4882a593Smuzhiyun  *		  - Step by 4 for 32 bit access.
146*4882a593Smuzhiyun  *		  - Step by 8 for 64 bit access.
147*4882a593Smuzhiyun  * @value:   Signed value to add.
148*4882a593Smuzhiyun  *		  Note: When performing 32 and 64 bit access, only the low
149*4882a593Smuzhiyun  *		  22 bits are available.
150*4882a593Smuzhiyun  * Returns Address to read from for atomic update
151*4882a593Smuzhiyun  */
__cvmx_fau_atomic_address(uint64_t tagwait,uint64_t reg,int64_t value)152*4882a593Smuzhiyun static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
153*4882a593Smuzhiyun 						 int64_t value)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
156*4882a593Smuzhiyun 	       cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
157*4882a593Smuzhiyun 	       cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
158*4882a593Smuzhiyun 	       cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /**
162*4882a593Smuzhiyun  * Perform an atomic 64 bit add
163*4882a593Smuzhiyun  *
164*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
165*4882a593Smuzhiyun  *		  - Step by 8 for 64 bit access.
166*4882a593Smuzhiyun  * @value:   Signed value to add.
167*4882a593Smuzhiyun  *		  Note: Only the low 22 bits are available.
168*4882a593Smuzhiyun  * Returns Value of the register before the update
169*4882a593Smuzhiyun  */
cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,int64_t value)170*4882a593Smuzhiyun static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
171*4882a593Smuzhiyun 					       int64_t value)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /**
177*4882a593Smuzhiyun  * Perform an atomic 32 bit add
178*4882a593Smuzhiyun  *
179*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
180*4882a593Smuzhiyun  *		  - Step by 4 for 32 bit access.
181*4882a593Smuzhiyun  * @value:   Signed value to add.
182*4882a593Smuzhiyun  *		  Note: Only the low 22 bits are available.
183*4882a593Smuzhiyun  * Returns Value of the register before the update
184*4882a593Smuzhiyun  */
cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,int32_t value)185*4882a593Smuzhiyun static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
186*4882a593Smuzhiyun 					       int32_t value)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	reg ^= SWIZZLE_32;
189*4882a593Smuzhiyun 	return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /**
193*4882a593Smuzhiyun  * Perform an atomic 16 bit add
194*4882a593Smuzhiyun  *
195*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
196*4882a593Smuzhiyun  *		  - Step by 2 for 16 bit access.
197*4882a593Smuzhiyun  * @value:   Signed value to add.
198*4882a593Smuzhiyun  * Returns Value of the register before the update
199*4882a593Smuzhiyun  */
cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,int16_t value)200*4882a593Smuzhiyun static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,
201*4882a593Smuzhiyun 					       int16_t value)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	reg ^= SWIZZLE_16;
204*4882a593Smuzhiyun 	return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /**
208*4882a593Smuzhiyun  * Perform an atomic 8 bit add
209*4882a593Smuzhiyun  *
210*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
211*4882a593Smuzhiyun  * @value:   Signed value to add.
212*4882a593Smuzhiyun  * Returns Value of the register before the update
213*4882a593Smuzhiyun  */
cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg,int8_t value)214*4882a593Smuzhiyun static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	reg ^= SWIZZLE_8;
217*4882a593Smuzhiyun 	return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /**
221*4882a593Smuzhiyun  * Perform an atomic 64 bit add after the current tag switch
222*4882a593Smuzhiyun  * completes
223*4882a593Smuzhiyun  *
224*4882a593Smuzhiyun  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
225*4882a593Smuzhiyun  *		 - Step by 8 for 64 bit access.
226*4882a593Smuzhiyun  * @value:  Signed value to add.
227*4882a593Smuzhiyun  *		 Note: Only the low 22 bits are available.
228*4882a593Smuzhiyun  * Returns If a timeout occurs, the error bit will be set. Otherwise
229*4882a593Smuzhiyun  *	   the value of the register before the update will be
230*4882a593Smuzhiyun  *	   returned
231*4882a593Smuzhiyun  */
232*4882a593Smuzhiyun static inline cvmx_fau_tagwait64_t
cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg,int64_t value)233*4882a593Smuzhiyun cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	union {
236*4882a593Smuzhiyun 		uint64_t i64;
237*4882a593Smuzhiyun 		cvmx_fau_tagwait64_t t;
238*4882a593Smuzhiyun 	} result;
239*4882a593Smuzhiyun 	result.i64 =
240*4882a593Smuzhiyun 	    cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
241*4882a593Smuzhiyun 	return result.t;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /**
245*4882a593Smuzhiyun  * Perform an atomic 32 bit add after the current tag switch
246*4882a593Smuzhiyun  * completes
247*4882a593Smuzhiyun  *
248*4882a593Smuzhiyun  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
249*4882a593Smuzhiyun  *		 - Step by 4 for 32 bit access.
250*4882a593Smuzhiyun  * @value:  Signed value to add.
251*4882a593Smuzhiyun  *		 Note: Only the low 22 bits are available.
252*4882a593Smuzhiyun  * Returns If a timeout occurs, the error bit will be set. Otherwise
253*4882a593Smuzhiyun  *	   the value of the register before the update will be
254*4882a593Smuzhiyun  *	   returned
255*4882a593Smuzhiyun  */
256*4882a593Smuzhiyun static inline cvmx_fau_tagwait32_t
cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg,int32_t value)257*4882a593Smuzhiyun cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	union {
260*4882a593Smuzhiyun 		uint64_t i32;
261*4882a593Smuzhiyun 		cvmx_fau_tagwait32_t t;
262*4882a593Smuzhiyun 	} result;
263*4882a593Smuzhiyun 	reg ^= SWIZZLE_32;
264*4882a593Smuzhiyun 	result.i32 =
265*4882a593Smuzhiyun 	    cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
266*4882a593Smuzhiyun 	return result.t;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun /**
270*4882a593Smuzhiyun  * Perform an atomic 16 bit add after the current tag switch
271*4882a593Smuzhiyun  * completes
272*4882a593Smuzhiyun  *
273*4882a593Smuzhiyun  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
274*4882a593Smuzhiyun  *		 - Step by 2 for 16 bit access.
275*4882a593Smuzhiyun  * @value:  Signed value to add.
276*4882a593Smuzhiyun  * Returns If a timeout occurs, the error bit will be set. Otherwise
277*4882a593Smuzhiyun  *	   the value of the register before the update will be
278*4882a593Smuzhiyun  *	   returned
279*4882a593Smuzhiyun  */
280*4882a593Smuzhiyun static inline cvmx_fau_tagwait16_t
cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg,int16_t value)281*4882a593Smuzhiyun cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	union {
284*4882a593Smuzhiyun 		uint64_t i16;
285*4882a593Smuzhiyun 		cvmx_fau_tagwait16_t t;
286*4882a593Smuzhiyun 	} result;
287*4882a593Smuzhiyun 	reg ^= SWIZZLE_16;
288*4882a593Smuzhiyun 	result.i16 =
289*4882a593Smuzhiyun 	    cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
290*4882a593Smuzhiyun 	return result.t;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun /**
294*4882a593Smuzhiyun  * Perform an atomic 8 bit add after the current tag switch
295*4882a593Smuzhiyun  * completes
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
298*4882a593Smuzhiyun  * @value:  Signed value to add.
299*4882a593Smuzhiyun  * Returns If a timeout occurs, the error bit will be set. Otherwise
300*4882a593Smuzhiyun  *	   the value of the register before the update will be
301*4882a593Smuzhiyun  *	   returned
302*4882a593Smuzhiyun  */
303*4882a593Smuzhiyun static inline cvmx_fau_tagwait8_t
cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg,int8_t value)304*4882a593Smuzhiyun cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	union {
307*4882a593Smuzhiyun 		uint64_t i8;
308*4882a593Smuzhiyun 		cvmx_fau_tagwait8_t t;
309*4882a593Smuzhiyun 	} result;
310*4882a593Smuzhiyun 	reg ^= SWIZZLE_8;
311*4882a593Smuzhiyun 	result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
312*4882a593Smuzhiyun 	return result.t;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun /**
316*4882a593Smuzhiyun  * Builds I/O data for async operations
317*4882a593Smuzhiyun  *
318*4882a593Smuzhiyun  * @scraddr: Scratch pad byte address to write to.  Must be 8 byte aligned
319*4882a593Smuzhiyun  * @value:   Signed value to add.
320*4882a593Smuzhiyun  *		  Note: When performing 32 and 64 bit access, only the low
321*4882a593Smuzhiyun  *		  22 bits are available.
322*4882a593Smuzhiyun  * @tagwait: Should the atomic add wait for the current tag switch
323*4882a593Smuzhiyun  *		  operation to complete.
324*4882a593Smuzhiyun  *		  - 0 = Don't wait
325*4882a593Smuzhiyun  *		  - 1 = Wait for tag switch to complete
326*4882a593Smuzhiyun  * @size:    The size of the operation:
327*4882a593Smuzhiyun  *		  - CVMX_FAU_OP_SIZE_8	(0) = 8 bits
328*4882a593Smuzhiyun  *		  - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
329*4882a593Smuzhiyun  *		  - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
330*4882a593Smuzhiyun  *		  - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
331*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
332*4882a593Smuzhiyun  *		  - Step by 2 for 16 bit access.
333*4882a593Smuzhiyun  *		  - Step by 4 for 32 bit access.
334*4882a593Smuzhiyun  *		  - Step by 8 for 64 bit access.
335*4882a593Smuzhiyun  * Returns Data to write using cvmx_send_single
336*4882a593Smuzhiyun  */
__cvmx_fau_iobdma_data(uint64_t scraddr,int64_t value,uint64_t tagwait,cvmx_fau_op_size_t size,uint64_t reg)337*4882a593Smuzhiyun static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
338*4882a593Smuzhiyun 					      uint64_t tagwait,
339*4882a593Smuzhiyun 					      cvmx_fau_op_size_t size,
340*4882a593Smuzhiyun 					      uint64_t reg)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	return CVMX_FAU_LOAD_IO_ADDRESS |
343*4882a593Smuzhiyun 	       cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |
344*4882a593Smuzhiyun 	       cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
345*4882a593Smuzhiyun 	       cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
346*4882a593Smuzhiyun 	       cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
347*4882a593Smuzhiyun 	       cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
348*4882a593Smuzhiyun 	       cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /**
352*4882a593Smuzhiyun  * Perform an async atomic 64 bit add. The old value is
353*4882a593Smuzhiyun  * placed in the scratch memory at byte address scraddr.
354*4882a593Smuzhiyun  *
355*4882a593Smuzhiyun  * @scraddr: Scratch memory byte address to put response in.
356*4882a593Smuzhiyun  *		  Must be 8 byte aligned.
357*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
358*4882a593Smuzhiyun  *		  - Step by 8 for 64 bit access.
359*4882a593Smuzhiyun  * @value:   Signed value to add.
360*4882a593Smuzhiyun  *		  Note: Only the low 22 bits are available.
361*4882a593Smuzhiyun  * Returns Placed in the scratch pad register
362*4882a593Smuzhiyun  */
cvmx_fau_async_fetch_and_add64(uint64_t scraddr,cvmx_fau_reg_64_t reg,int64_t value)363*4882a593Smuzhiyun static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
364*4882a593Smuzhiyun 						  cvmx_fau_reg_64_t reg,
365*4882a593Smuzhiyun 						  int64_t value)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	cvmx_send_single(__cvmx_fau_iobdma_data
368*4882a593Smuzhiyun 			 (scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun /**
372*4882a593Smuzhiyun  * Perform an async atomic 32 bit add. The old value is
373*4882a593Smuzhiyun  * placed in the scratch memory at byte address scraddr.
374*4882a593Smuzhiyun  *
375*4882a593Smuzhiyun  * @scraddr: Scratch memory byte address to put response in.
376*4882a593Smuzhiyun  *		  Must be 8 byte aligned.
377*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
378*4882a593Smuzhiyun  *		  - Step by 4 for 32 bit access.
379*4882a593Smuzhiyun  * @value:   Signed value to add.
380*4882a593Smuzhiyun  *		  Note: Only the low 22 bits are available.
381*4882a593Smuzhiyun  * Returns Placed in the scratch pad register
382*4882a593Smuzhiyun  */
cvmx_fau_async_fetch_and_add32(uint64_t scraddr,cvmx_fau_reg_32_t reg,int32_t value)383*4882a593Smuzhiyun static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
384*4882a593Smuzhiyun 						  cvmx_fau_reg_32_t reg,
385*4882a593Smuzhiyun 						  int32_t value)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	cvmx_send_single(__cvmx_fau_iobdma_data
388*4882a593Smuzhiyun 			 (scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun /**
392*4882a593Smuzhiyun  * Perform an async atomic 16 bit add. The old value is
393*4882a593Smuzhiyun  * placed in the scratch memory at byte address scraddr.
394*4882a593Smuzhiyun  *
395*4882a593Smuzhiyun  * @scraddr: Scratch memory byte address to put response in.
396*4882a593Smuzhiyun  *		  Must be 8 byte aligned.
397*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
398*4882a593Smuzhiyun  *		  - Step by 2 for 16 bit access.
399*4882a593Smuzhiyun  * @value:   Signed value to add.
400*4882a593Smuzhiyun  * Returns Placed in the scratch pad register
401*4882a593Smuzhiyun  */
cvmx_fau_async_fetch_and_add16(uint64_t scraddr,cvmx_fau_reg_16_t reg,int16_t value)402*4882a593Smuzhiyun static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
403*4882a593Smuzhiyun 						  cvmx_fau_reg_16_t reg,
404*4882a593Smuzhiyun 						  int16_t value)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	cvmx_send_single(__cvmx_fau_iobdma_data
407*4882a593Smuzhiyun 			 (scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun /**
411*4882a593Smuzhiyun  * Perform an async atomic 8 bit add. The old value is
412*4882a593Smuzhiyun  * placed in the scratch memory at byte address scraddr.
413*4882a593Smuzhiyun  *
414*4882a593Smuzhiyun  * @scraddr: Scratch memory byte address to put response in.
415*4882a593Smuzhiyun  *		  Must be 8 byte aligned.
416*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
417*4882a593Smuzhiyun  * @value:   Signed value to add.
418*4882a593Smuzhiyun  * Returns Placed in the scratch pad register
419*4882a593Smuzhiyun  */
cvmx_fau_async_fetch_and_add8(uint64_t scraddr,cvmx_fau_reg_8_t reg,int8_t value)420*4882a593Smuzhiyun static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
421*4882a593Smuzhiyun 						 cvmx_fau_reg_8_t reg,
422*4882a593Smuzhiyun 						 int8_t value)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	cvmx_send_single(__cvmx_fau_iobdma_data
425*4882a593Smuzhiyun 			 (scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun /**
429*4882a593Smuzhiyun  * Perform an async atomic 64 bit add after the current tag
430*4882a593Smuzhiyun  * switch completes.
431*4882a593Smuzhiyun  *
432*4882a593Smuzhiyun  * @scraddr: Scratch memory byte address to put response in.  Must be
433*4882a593Smuzhiyun  *	     8 byte aligned.  If a timeout occurs, the error bit (63)
434*4882a593Smuzhiyun  *	     will be set. Otherwise the value of the register before
435*4882a593Smuzhiyun  *	     the update will be returned
436*4882a593Smuzhiyun  *
437*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
438*4882a593Smuzhiyun  *		  - Step by 8 for 64 bit access.
439*4882a593Smuzhiyun  * @value:   Signed value to add.
440*4882a593Smuzhiyun  *		  Note: Only the low 22 bits are available.
441*4882a593Smuzhiyun  * Returns Placed in the scratch pad register
442*4882a593Smuzhiyun  */
cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,cvmx_fau_reg_64_t reg,int64_t value)443*4882a593Smuzhiyun static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
444*4882a593Smuzhiyun 							  cvmx_fau_reg_64_t reg,
445*4882a593Smuzhiyun 							  int64_t value)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	cvmx_send_single(__cvmx_fau_iobdma_data
448*4882a593Smuzhiyun 			 (scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun /**
452*4882a593Smuzhiyun  * Perform an async atomic 32 bit add after the current tag
453*4882a593Smuzhiyun  * switch completes.
454*4882a593Smuzhiyun  *
455*4882a593Smuzhiyun  * @scraddr: Scratch memory byte address to put response in.  Must be
456*4882a593Smuzhiyun  *	     8 byte aligned.  If a timeout occurs, the error bit (63)
457*4882a593Smuzhiyun  *	     will be set. Otherwise the value of the register before
458*4882a593Smuzhiyun  *	     the update will be returned
459*4882a593Smuzhiyun  *
460*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
461*4882a593Smuzhiyun  *		  - Step by 4 for 32 bit access.
462*4882a593Smuzhiyun  * @value:   Signed value to add.
463*4882a593Smuzhiyun  *		  Note: Only the low 22 bits are available.
464*4882a593Smuzhiyun  * Returns Placed in the scratch pad register
465*4882a593Smuzhiyun  */
cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,cvmx_fau_reg_32_t reg,int32_t value)466*4882a593Smuzhiyun static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
467*4882a593Smuzhiyun 							  cvmx_fau_reg_32_t reg,
468*4882a593Smuzhiyun 							  int32_t value)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	cvmx_send_single(__cvmx_fau_iobdma_data
471*4882a593Smuzhiyun 			 (scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun /**
475*4882a593Smuzhiyun  * Perform an async atomic 16 bit add after the current tag
476*4882a593Smuzhiyun  * switch completes.
477*4882a593Smuzhiyun  *
478*4882a593Smuzhiyun  * @scraddr: Scratch memory byte address to put response in.  Must be
479*4882a593Smuzhiyun  *	     8 byte aligned.  If a timeout occurs, the error bit (63)
480*4882a593Smuzhiyun  *	     will be set. Otherwise the value of the register before
481*4882a593Smuzhiyun  *	     the update will be returned
482*4882a593Smuzhiyun  *
483*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
484*4882a593Smuzhiyun  *		  - Step by 2 for 16 bit access.
485*4882a593Smuzhiyun  * @value:   Signed value to add.
486*4882a593Smuzhiyun  *
487*4882a593Smuzhiyun  * Returns Placed in the scratch pad register
488*4882a593Smuzhiyun  */
cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,cvmx_fau_reg_16_t reg,int16_t value)489*4882a593Smuzhiyun static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
490*4882a593Smuzhiyun 							  cvmx_fau_reg_16_t reg,
491*4882a593Smuzhiyun 							  int16_t value)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	cvmx_send_single(__cvmx_fau_iobdma_data
494*4882a593Smuzhiyun 			 (scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun /**
498*4882a593Smuzhiyun  * Perform an async atomic 8 bit add after the current tag
499*4882a593Smuzhiyun  * switch completes.
500*4882a593Smuzhiyun  *
501*4882a593Smuzhiyun  * @scraddr: Scratch memory byte address to put response in.  Must be
502*4882a593Smuzhiyun  *	     8 byte aligned.  If a timeout occurs, the error bit (63)
503*4882a593Smuzhiyun  *	     will be set. Otherwise the value of the register before
504*4882a593Smuzhiyun  *	     the update will be returned
505*4882a593Smuzhiyun  *
506*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
507*4882a593Smuzhiyun  * @value:   Signed value to add.
508*4882a593Smuzhiyun  *
509*4882a593Smuzhiyun  * Returns Placed in the scratch pad register
510*4882a593Smuzhiyun  */
cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,cvmx_fau_reg_8_t reg,int8_t value)511*4882a593Smuzhiyun static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
512*4882a593Smuzhiyun 							 cvmx_fau_reg_8_t reg,
513*4882a593Smuzhiyun 							 int8_t value)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun 	cvmx_send_single(__cvmx_fau_iobdma_data
516*4882a593Smuzhiyun 			 (scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun /**
520*4882a593Smuzhiyun  * Perform an atomic 64 bit add
521*4882a593Smuzhiyun  *
522*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
523*4882a593Smuzhiyun  *		  - Step by 8 for 64 bit access.
524*4882a593Smuzhiyun  * @value:   Signed value to add.
525*4882a593Smuzhiyun  */
cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg,int64_t value)526*4882a593Smuzhiyun static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun /**
532*4882a593Smuzhiyun  * Perform an atomic 32 bit add
533*4882a593Smuzhiyun  *
534*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
535*4882a593Smuzhiyun  *		  - Step by 4 for 32 bit access.
536*4882a593Smuzhiyun  * @value:   Signed value to add.
537*4882a593Smuzhiyun  */
cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg,int32_t value)538*4882a593Smuzhiyun static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	reg ^= SWIZZLE_32;
541*4882a593Smuzhiyun 	cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun /**
545*4882a593Smuzhiyun  * Perform an atomic 16 bit add
546*4882a593Smuzhiyun  *
547*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
548*4882a593Smuzhiyun  *		  - Step by 2 for 16 bit access.
549*4882a593Smuzhiyun  * @value:   Signed value to add.
550*4882a593Smuzhiyun  */
cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg,int16_t value)551*4882a593Smuzhiyun static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	reg ^= SWIZZLE_16;
554*4882a593Smuzhiyun 	cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun /**
558*4882a593Smuzhiyun  * Perform an atomic 8 bit add
559*4882a593Smuzhiyun  *
560*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
561*4882a593Smuzhiyun  * @value:   Signed value to add.
562*4882a593Smuzhiyun  */
cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg,int8_t value)563*4882a593Smuzhiyun static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	reg ^= SWIZZLE_8;
566*4882a593Smuzhiyun 	cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun /**
570*4882a593Smuzhiyun  * Perform an atomic 64 bit write
571*4882a593Smuzhiyun  *
572*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
573*4882a593Smuzhiyun  *		  - Step by 8 for 64 bit access.
574*4882a593Smuzhiyun  * @value:   Signed value to write.
575*4882a593Smuzhiyun  */
cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg,int64_t value)576*4882a593Smuzhiyun static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun 	cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun /**
582*4882a593Smuzhiyun  * Perform an atomic 32 bit write
583*4882a593Smuzhiyun  *
584*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
585*4882a593Smuzhiyun  *		  - Step by 4 for 32 bit access.
586*4882a593Smuzhiyun  * @value:   Signed value to write.
587*4882a593Smuzhiyun  */
cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg,int32_t value)588*4882a593Smuzhiyun static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	reg ^= SWIZZLE_32;
591*4882a593Smuzhiyun 	cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun /**
595*4882a593Smuzhiyun  * Perform an atomic 16 bit write
596*4882a593Smuzhiyun  *
597*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
598*4882a593Smuzhiyun  *		  - Step by 2 for 16 bit access.
599*4882a593Smuzhiyun  * @value:   Signed value to write.
600*4882a593Smuzhiyun  */
cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg,int16_t value)601*4882a593Smuzhiyun static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	reg ^= SWIZZLE_16;
604*4882a593Smuzhiyun 	cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun /**
608*4882a593Smuzhiyun  * Perform an atomic 8 bit write
609*4882a593Smuzhiyun  *
610*4882a593Smuzhiyun  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
611*4882a593Smuzhiyun  * @value:   Signed value to write.
612*4882a593Smuzhiyun  */
cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg,int8_t value)613*4882a593Smuzhiyun static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun 	reg ^= SWIZZLE_8;
616*4882a593Smuzhiyun 	cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun #endif /* __CVMX_FAU_H__ */
620