xref: /optee_os/core/include/io.h (revision 5f7f88c6b9d618d1e068166bbf2b07757350791d)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014-2019, Linaro Limited
4  */
5 #ifndef __IO_H
6 #define __IO_H
7 
8 #include <compiler.h>
9 #include <kernel/delay.h>
10 #include <stdint.h>
11 #include <types_ext.h>
12 #include <utee_defines.h>
13 
14 /*
15  * Make sure that compiler reads/writes given variable only once. This is needed
16  * in cases when we have normal shared memory, and this memory can be changed
17  * at any moment. Compiler does not knows about this, so it can optimize memory
18  * access in any way, including repeated accesses from the same address.
19  * These macro enforce compiler to access memory only once.
20  */
21 #define READ_ONCE(p)		__compiler_atomic_load(&(p))
22 #define WRITE_ONCE(p, v)	__compiler_atomic_store(&(p), (v))
23 
24 static inline void io_write8(vaddr_t addr, uint8_t val)
25 {
26 	*(volatile uint8_t *)addr = val;
27 }
28 
29 static inline void io_write16(vaddr_t addr, uint16_t val)
30 {
31 	*(volatile uint16_t *)addr = val;
32 }
33 
34 static inline void io_write32(vaddr_t addr, uint32_t val)
35 {
36 	*(volatile uint32_t *)addr = val;
37 }
38 
39 static inline void io_write64(vaddr_t addr, uint64_t val)
40 {
41 	*(volatile uint64_t *)addr = val;
42 }
43 
44 static inline uint8_t io_read8(vaddr_t addr)
45 {
46 	return *(volatile uint8_t *)addr;
47 }
48 
49 static inline uint16_t io_read16(vaddr_t addr)
50 {
51 	return *(volatile uint16_t *)addr;
52 }
53 
54 static inline uint32_t io_read32(vaddr_t addr)
55 {
56 	return *(volatile uint32_t *)addr;
57 }
58 
59 static inline uint64_t io_read64(vaddr_t addr)
60 {
61 	return *(volatile uint64_t *)addr;
62 }
63 
64 static inline void io_mask8(vaddr_t addr, uint8_t val, uint8_t mask)
65 {
66 	io_write8(addr, (io_read8(addr) & ~mask) | (val & mask));
67 }
68 
69 static inline void io_mask16(vaddr_t addr, uint16_t val, uint16_t mask)
70 {
71 	io_write16(addr, (io_read16(addr) & ~mask) | (val & mask));
72 }
73 
74 static inline void io_mask32(vaddr_t addr, uint32_t val, uint32_t mask)
75 {
76 	io_write32(addr, (io_read32(addr) & ~mask) | (val & mask));
77 }
78 
79 static inline uint64_t get_be64(const void *p)
80 {
81 	return TEE_U64_FROM_BIG_ENDIAN(*(const uint64_t *)p);
82 }
83 
84 static inline void put_be64(void *p, uint64_t val)
85 {
86 	*(uint64_t *)p = TEE_U64_TO_BIG_ENDIAN(val);
87 }
88 
89 static inline uint32_t get_be32(const void *p)
90 {
91 	return TEE_U32_FROM_BIG_ENDIAN(*(const uint32_t *)p);
92 }
93 
94 static inline void put_be32(void *p, uint32_t val)
95 {
96 	*(uint32_t *)p = TEE_U32_TO_BIG_ENDIAN(val);
97 }
98 
99 static inline uint16_t get_be16(const void *p)
100 {
101 	return TEE_U16_FROM_BIG_ENDIAN(*(const uint16_t *)p);
102 }
103 
104 static inline void put_be16(void *p, uint16_t val)
105 {
106 	*(uint16_t *)p = TEE_U16_TO_BIG_ENDIAN(val);
107 }
108 
109 static inline void put_le32(const void *p, uint32_t val)
110 {
111 	 *(uint32_t *)p = val;
112 }
113 
114 static inline uint32_t get_le32(const void *p)
115 {
116 	return *(const uint32_t *)p;
117 }
118 
119 static inline void put_le64(const void *p, uint64_t val)
120 {
121 	 *(uint64_t *)p = val;
122 }
123 
124 static inline uint64_t get_le64(const void *p)
125 {
126 	return *(const uint64_t *)p;
127 }
128 
129 /* Unaligned accesses */
130 
131 struct __unaligned_u16_t { uint16_t x; } __packed;
132 struct __unaligned_u32_t { uint32_t x; } __packed;
133 struct __unaligned_u64_t { uint64_t x; } __packed;
134 
135 static inline uint64_t get_unaligned_be64(const void *p)
136 {
137 	const struct __unaligned_u64_t *tmp = p;
138 
139 	return TEE_U64_FROM_BIG_ENDIAN(tmp->x);
140 }
141 
142 static inline void put_unaligned_be64(void *p, uint64_t val)
143 {
144 	struct __unaligned_u64_t *tmp = p;
145 
146 	tmp->x = TEE_U64_TO_BIG_ENDIAN(val);
147 }
148 
149 static inline uint32_t get_unaligned_be32(const void *p)
150 {
151 	const struct __unaligned_u32_t *tmp = p;
152 
153 	return TEE_U32_FROM_BIG_ENDIAN(tmp->x);
154 }
155 
156 static inline void put_unaligned_be32(void *p, uint32_t val)
157 {
158 	struct __unaligned_u32_t *tmp = p;
159 
160 	tmp->x = TEE_U32_TO_BIG_ENDIAN(val);
161 }
162 
163 static inline uint16_t get_unaligned_be16(const void *p)
164 {
165 	const struct __unaligned_u16_t *tmp = p;
166 
167 	return TEE_U16_FROM_BIG_ENDIAN(tmp->x);
168 }
169 
170 static inline void put_unaligned_be16(void *p, uint16_t val)
171 {
172 	struct __unaligned_u16_t *tmp = p;
173 
174 	tmp->x = TEE_U16_TO_BIG_ENDIAN(val);
175 }
176 
177 static inline void put_unaligned_le64(void *p, uint64_t val)
178 {
179 	struct __unaligned_u64_t *tmp = p;
180 
181 	tmp->x = val;
182 }
183 
184 static inline uint64_t get_unaligned_le64(const void *p)
185 {
186 	const struct __unaligned_u64_t *tmp = p;
187 
188 	return tmp->x;
189 }
190 
191 static inline void put_unaligned_le32(void *p, uint32_t val)
192 {
193 	struct __unaligned_u32_t *tmp = p;
194 
195 	tmp->x = val;
196 }
197 
198 static inline uint32_t get_unaligned_le32(const void *p)
199 {
200 	const struct __unaligned_u32_t *tmp = p;
201 
202 	return tmp->x;
203 }
204 
205 static inline void put_unaligned_le16(void *p, uint16_t val)
206 {
207 	struct __unaligned_u16_t *tmp = p;
208 
209 	tmp->x = val;
210 }
211 
212 static inline uint16_t get_unaligned_le16(const void *p)
213 {
214 	const struct __unaligned_u16_t *tmp = p;
215 
216 	return tmp->x;
217 }
218 
219 /*
220  * Set and clear bits helpers.
221  *
222  * @addr is the address of the memory cell accessed
223  * @set_mask represents the bit mask of the bit(s) to set, aka set to 1
224  * @clear_mask represents the bit mask of the bit(s) to clear, aka reset to 0
225  *
226  * io_clrsetbits32() clears then sets the target bits in this order. If a bit
227  * position is defined by both @set_mask and @clear_mask, the bit will be set.
228  */
229 static inline void io_setbits32(vaddr_t addr, uint32_t set_mask)
230 {
231 	io_write32(addr, io_read32(addr) | set_mask);
232 }
233 
234 static inline void io_clrbits32(vaddr_t addr, uint32_t clear_mask)
235 {
236 	io_write32(addr, io_read32(addr) & ~clear_mask);
237 }
238 
239 static inline void io_clrsetbits32(vaddr_t addr, uint32_t clear_mask,
240 				   uint32_t set_mask)
241 {
242 	io_write32(addr, (io_read32(addr) & ~clear_mask) | set_mask);
243 }
244 
245 static inline void io_setbits16(vaddr_t addr, uint16_t set_mask)
246 {
247 	io_write16(addr, io_read16(addr) | set_mask);
248 }
249 
250 static inline void io_clrbits16(vaddr_t addr, uint16_t clear_mask)
251 {
252 	io_write16(addr, io_read16(addr) & ~clear_mask);
253 }
254 
255 static inline void io_clrsetbits16(vaddr_t addr, uint16_t clear_mask,
256 				   uint16_t set_mask)
257 {
258 	io_write16(addr, (io_read16(addr) & ~clear_mask) | set_mask);
259 }
260 
261 static inline void io_setbits8(vaddr_t addr, uint8_t set_mask)
262 {
263 	io_write8(addr, io_read8(addr) | set_mask);
264 }
265 
266 static inline void io_clrbits8(vaddr_t addr, uint8_t clear_mask)
267 {
268 	io_write8(addr, io_read8(addr) & ~clear_mask);
269 }
270 
271 static inline void io_clrsetbits8(vaddr_t addr, uint8_t clear_mask,
272 				  uint8_t set_mask)
273 {
274 	io_write8(addr, (io_read8(addr) & ~clear_mask) | set_mask);
275 }
276 
277 /*
278  * Poll on a IO memory content or timeout
279  *
280  * @_addr is the address of the memory cell accessed
281  * @_val represents the val of the memory cell accessed
282  * @_cond represents the condition to get the correct value
283  * @_delay_us represents the read interval in mircorseconds
284  * @_timeout_us represents the timeout period in microseconds
285  *
286  * @return nonzero value means timeout, 0 means got right value
287  */
288 #define IO_READ32_POLL_TIMEOUT(_addr, _val, _cond, _delay_us, _timeout_us) \
289 	({ \
290 		uint32_t __timeout = 0; \
291 		uint32_t __delay = (_delay_us); \
292 		\
293 		while (__timeout < (_timeout_us)) { \
294 			(_val) = io_read32(_addr); \
295 			if (_cond) \
296 				break; \
297 			__timeout += (__delay); \
298 			udelay(__delay); \
299 		} \
300 		(_val) = io_read32(_addr); \
301 		!(_cond); \
302 	})
303 
304 #endif /*__IO_H*/
305