xref: /optee_os/core/include/io.h (revision 98d105a565cef6cc3e861184b7e2a745a985cf49)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014-2019, Linaro Limited
4  */
5 #ifndef __IO_H
6 #define __IO_H
7 
8 #include <compiler.h>
9 #include <kernel/delay.h>
10 #include <kernel/delay_arch.h>
11 #include <stdint.h>
12 #include <types_ext.h>
13 #include <utee_defines.h>
14 
15 /*
16  * Make sure that compiler reads/writes given variable only once. This is needed
17  * in cases when we have normal shared memory, and this memory can be changed
18  * at any moment. Compiler does not knows about this, so it can optimize memory
19  * access in any way, including repeated accesses from the same address.
20  * These macro enforce compiler to access memory only once.
21  */
22 #define READ_ONCE(p)		__compiler_atomic_load(&(p))
23 #define WRITE_ONCE(p, v)	__compiler_atomic_store(&(p), (v))
24 
io_write8(vaddr_t addr,uint8_t val)25 static inline void io_write8(vaddr_t addr, uint8_t val)
26 {
27 	*(volatile uint8_t *)addr = val;
28 }
29 
io_write16(vaddr_t addr,uint16_t val)30 static inline void io_write16(vaddr_t addr, uint16_t val)
31 {
32 	*(volatile uint16_t *)addr = val;
33 }
34 
io_write32(vaddr_t addr,uint32_t val)35 static inline void io_write32(vaddr_t addr, uint32_t val)
36 {
37 	*(volatile uint32_t *)addr = val;
38 }
39 
io_write64(vaddr_t addr,uint64_t val)40 static inline void io_write64(vaddr_t addr, uint64_t val)
41 {
42 	*(volatile uint64_t *)addr = val;
43 }
44 
io_read8(vaddr_t addr)45 static inline uint8_t io_read8(vaddr_t addr)
46 {
47 	return *(volatile uint8_t *)addr;
48 }
49 
io_read16(vaddr_t addr)50 static inline uint16_t io_read16(vaddr_t addr)
51 {
52 	return *(volatile uint16_t *)addr;
53 }
54 
io_read32(vaddr_t addr)55 static inline uint32_t io_read32(vaddr_t addr)
56 {
57 	return *(volatile uint32_t *)addr;
58 }
59 
io_read64(vaddr_t addr)60 static inline uint64_t io_read64(vaddr_t addr)
61 {
62 	return *(volatile uint64_t *)addr;
63 }
64 
io_mask8(vaddr_t addr,uint8_t val,uint8_t mask)65 static inline void io_mask8(vaddr_t addr, uint8_t val, uint8_t mask)
66 {
67 	io_write8(addr, (io_read8(addr) & ~mask) | (val & mask));
68 }
69 
io_mask16(vaddr_t addr,uint16_t val,uint16_t mask)70 static inline void io_mask16(vaddr_t addr, uint16_t val, uint16_t mask)
71 {
72 	io_write16(addr, (io_read16(addr) & ~mask) | (val & mask));
73 }
74 
io_mask32(vaddr_t addr,uint32_t val,uint32_t mask)75 static inline void io_mask32(vaddr_t addr, uint32_t val, uint32_t mask)
76 {
77 	io_write32(addr, (io_read32(addr) & ~mask) | (val & mask));
78 }
79 
get_be64(const void * p)80 static inline uint64_t get_be64(const void *p)
81 {
82 	return TEE_U64_FROM_BIG_ENDIAN(*(const uint64_t *)p);
83 }
84 
put_be64(void * p,uint64_t val)85 static inline void put_be64(void *p, uint64_t val)
86 {
87 	*(uint64_t *)p = TEE_U64_TO_BIG_ENDIAN(val);
88 }
89 
get_be32(const void * p)90 static inline uint32_t get_be32(const void *p)
91 {
92 	return TEE_U32_FROM_BIG_ENDIAN(*(const uint32_t *)p);
93 }
94 
put_be32(void * p,uint32_t val)95 static inline void put_be32(void *p, uint32_t val)
96 {
97 	*(uint32_t *)p = TEE_U32_TO_BIG_ENDIAN(val);
98 }
99 
get_be16(const void * p)100 static inline uint16_t get_be16(const void *p)
101 {
102 	return TEE_U16_FROM_BIG_ENDIAN(*(const uint16_t *)p);
103 }
104 
put_be16(void * p,uint16_t val)105 static inline void put_be16(void *p, uint16_t val)
106 {
107 	*(uint16_t *)p = TEE_U16_TO_BIG_ENDIAN(val);
108 }
109 
put_le32(const void * p,uint32_t val)110 static inline void put_le32(const void *p, uint32_t val)
111 {
112 	 *(uint32_t *)p = val;
113 }
114 
get_le32(const void * p)115 static inline uint32_t get_le32(const void *p)
116 {
117 	return *(const uint32_t *)p;
118 }
119 
put_le64(const void * p,uint64_t val)120 static inline void put_le64(const void *p, uint64_t val)
121 {
122 	 *(uint64_t *)p = val;
123 }
124 
get_le64(const void * p)125 static inline uint64_t get_le64(const void *p)
126 {
127 	return *(const uint64_t *)p;
128 }
129 
130 /* Unaligned accesses */
131 
132 struct __unaligned_u16_t { uint16_t x; } __packed;
133 struct __unaligned_u32_t { uint32_t x; } __packed;
134 struct __unaligned_u64_t { uint64_t x; } __packed;
135 
get_unaligned_be64(const void * p)136 static inline uint64_t get_unaligned_be64(const void *p)
137 {
138 	const struct __unaligned_u64_t *tmp = p;
139 
140 	return TEE_U64_FROM_BIG_ENDIAN(tmp->x);
141 }
142 
put_unaligned_be64(void * p,uint64_t val)143 static inline void put_unaligned_be64(void *p, uint64_t val)
144 {
145 	struct __unaligned_u64_t *tmp = p;
146 
147 	tmp->x = TEE_U64_TO_BIG_ENDIAN(val);
148 }
149 
get_unaligned_be32(const void * p)150 static inline uint32_t get_unaligned_be32(const void *p)
151 {
152 	const struct __unaligned_u32_t *tmp = p;
153 
154 	return TEE_U32_FROM_BIG_ENDIAN(tmp->x);
155 }
156 
put_unaligned_be32(void * p,uint32_t val)157 static inline void put_unaligned_be32(void *p, uint32_t val)
158 {
159 	struct __unaligned_u32_t *tmp = p;
160 
161 	tmp->x = TEE_U32_TO_BIG_ENDIAN(val);
162 }
163 
get_unaligned_be16(const void * p)164 static inline uint16_t get_unaligned_be16(const void *p)
165 {
166 	const struct __unaligned_u16_t *tmp = p;
167 
168 	return TEE_U16_FROM_BIG_ENDIAN(tmp->x);
169 }
170 
put_unaligned_be16(void * p,uint16_t val)171 static inline void put_unaligned_be16(void *p, uint16_t val)
172 {
173 	struct __unaligned_u16_t *tmp = p;
174 
175 	tmp->x = TEE_U16_TO_BIG_ENDIAN(val);
176 }
177 
put_unaligned_le64(void * p,uint64_t val)178 static inline void put_unaligned_le64(void *p, uint64_t val)
179 {
180 	struct __unaligned_u64_t *tmp = p;
181 
182 	tmp->x = val;
183 }
184 
get_unaligned_le64(const void * p)185 static inline uint64_t get_unaligned_le64(const void *p)
186 {
187 	const struct __unaligned_u64_t *tmp = p;
188 
189 	return tmp->x;
190 }
191 
put_unaligned_le32(void * p,uint32_t val)192 static inline void put_unaligned_le32(void *p, uint32_t val)
193 {
194 	struct __unaligned_u32_t *tmp = p;
195 
196 	tmp->x = val;
197 }
198 
get_unaligned_le32(const void * p)199 static inline uint32_t get_unaligned_le32(const void *p)
200 {
201 	const struct __unaligned_u32_t *tmp = p;
202 
203 	return tmp->x;
204 }
205 
put_unaligned_le16(void * p,uint16_t val)206 static inline void put_unaligned_le16(void *p, uint16_t val)
207 {
208 	struct __unaligned_u16_t *tmp = p;
209 
210 	tmp->x = val;
211 }
212 
get_unaligned_le16(const void * p)213 static inline uint16_t get_unaligned_le16(const void *p)
214 {
215 	const struct __unaligned_u16_t *tmp = p;
216 
217 	return tmp->x;
218 }
219 
220 /*
221  * Set and clear bits helpers.
222  *
223  * @addr is the address of the memory cell accessed
224  * @set_mask represents the bit mask of the bit(s) to set, aka set to 1
225  * @clear_mask represents the bit mask of the bit(s) to clear, aka reset to 0
226  *
227  * io_clrsetbits32() clears then sets the target bits in this order. If a bit
228  * position is defined by both @set_mask and @clear_mask, the bit will be set.
229  */
io_setbits32(vaddr_t addr,uint32_t set_mask)230 static inline void io_setbits32(vaddr_t addr, uint32_t set_mask)
231 {
232 	io_write32(addr, io_read32(addr) | set_mask);
233 }
234 
io_clrbits32(vaddr_t addr,uint32_t clear_mask)235 static inline void io_clrbits32(vaddr_t addr, uint32_t clear_mask)
236 {
237 	io_write32(addr, io_read32(addr) & ~clear_mask);
238 }
239 
io_clrsetbits32(vaddr_t addr,uint32_t clear_mask,uint32_t set_mask)240 static inline void io_clrsetbits32(vaddr_t addr, uint32_t clear_mask,
241 				   uint32_t set_mask)
242 {
243 	io_write32(addr, (io_read32(addr) & ~clear_mask) | set_mask);
244 }
245 
io_setbits16(vaddr_t addr,uint16_t set_mask)246 static inline void io_setbits16(vaddr_t addr, uint16_t set_mask)
247 {
248 	io_write16(addr, io_read16(addr) | set_mask);
249 }
250 
io_clrbits16(vaddr_t addr,uint16_t clear_mask)251 static inline void io_clrbits16(vaddr_t addr, uint16_t clear_mask)
252 {
253 	io_write16(addr, io_read16(addr) & ~clear_mask);
254 }
255 
io_clrsetbits16(vaddr_t addr,uint16_t clear_mask,uint16_t set_mask)256 static inline void io_clrsetbits16(vaddr_t addr, uint16_t clear_mask,
257 				   uint16_t set_mask)
258 {
259 	io_write16(addr, (io_read16(addr) & ~clear_mask) | set_mask);
260 }
261 
io_setbits8(vaddr_t addr,uint8_t set_mask)262 static inline void io_setbits8(vaddr_t addr, uint8_t set_mask)
263 {
264 	io_write8(addr, io_read8(addr) | set_mask);
265 }
266 
io_clrbits8(vaddr_t addr,uint8_t clear_mask)267 static inline void io_clrbits8(vaddr_t addr, uint8_t clear_mask)
268 {
269 	io_write8(addr, io_read8(addr) & ~clear_mask);
270 }
271 
io_clrsetbits8(vaddr_t addr,uint8_t clear_mask,uint8_t set_mask)272 static inline void io_clrsetbits8(vaddr_t addr, uint8_t clear_mask,
273 				  uint8_t set_mask)
274 {
275 	io_write8(addr, (io_read8(addr) & ~clear_mask) | set_mask);
276 }
277 
278 /*
279  * Poll on a IO memory content or timeout
280  *
281  * @_addr is the address of the memory cell accessed
282  * @_val represents the val of the memory cell accessed
283  * @_cond represents the condition to get the correct value
284  * @_delay_us represents the read interval in mircorseconds
285  * @_timeout_us represents the timeout period in microseconds
286  *
287  * @return nonzero value means timeout, 0 means got right value
288  */
289 #define IO_READ32_POLL_TIMEOUT(_addr, _val, _cond, _delay_us, _timeout_us) \
290 	({ \
291 		uint64_t __timeout = timeout_init_us(_timeout_us); \
292 		uint32_t __delay = (_delay_us); \
293 		\
294 		while (!timeout_elapsed(__timeout)) { \
295 			(_val) = io_read32(_addr); \
296 			if (_cond) \
297 				break; \
298 			udelay(__delay); \
299 		} \
300 		(_val) = io_read32(_addr); \
301 		!(_cond); \
302 	})
303 
304 #endif /*__IO_H*/
305