xref: /optee_os/core/include/io.h (revision 74bd878e0765e11f55580667e985bd408aed6167)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014-2019, Linaro Limited
4  */
5 #ifndef IO_H
6 #define IO_H
7 
8 #include <compiler.h>
9 #include <stdint.h>
10 #include <types_ext.h>
11 #include <utee_defines.h>
12 
13 /*
14  * Make sure that compiler reads/writes given variable only once. This is needed
15  * in cases when we have normal shared memory, and this memory can be changed
16  * at any moment. Compiler does not knows about this, so it can optimize memory
17  * access in any way, including repeated accesses from the same address.
18  * These macro enforce compiler to access memory only once.
19  */
20 #define READ_ONCE(p)		__compiler_atomic_load(&(p))
21 #define WRITE_ONCE(p, v)	__compiler_atomic_store(&(p), (v))
22 
23 static inline void io_write8(vaddr_t addr, uint8_t val)
24 {
25 	*(volatile uint8_t *)addr = val;
26 }
27 
28 static inline void io_write16(vaddr_t addr, uint16_t val)
29 {
30 	*(volatile uint16_t *)addr = val;
31 }
32 
33 static inline void io_write32(vaddr_t addr, uint32_t val)
34 {
35 	*(volatile uint32_t *)addr = val;
36 }
37 
38 static inline uint8_t io_read8(vaddr_t addr)
39 {
40 	return *(volatile uint8_t *)addr;
41 }
42 
43 static inline uint16_t io_read16(vaddr_t addr)
44 {
45 	return *(volatile uint16_t *)addr;
46 }
47 
48 static inline uint32_t io_read32(vaddr_t addr)
49 {
50 	return *(volatile uint32_t *)addr;
51 }
52 
53 static inline void io_mask8(vaddr_t addr, uint8_t val, uint8_t mask)
54 {
55 	io_write8(addr, (io_read8(addr) & ~mask) | (val & mask));
56 }
57 
58 static inline void io_mask16(vaddr_t addr, uint16_t val, uint16_t mask)
59 {
60 	io_write16(addr, (io_read16(addr) & ~mask) | (val & mask));
61 }
62 
63 static inline void io_mask32(vaddr_t addr, uint32_t val, uint32_t mask)
64 {
65 	io_write32(addr, (io_read32(addr) & ~mask) | (val & mask));
66 }
67 
68 static inline uint64_t get_be64(const void *p)
69 {
70 	return TEE_U64_FROM_BIG_ENDIAN(*(const uint64_t *)p);
71 }
72 
73 static inline void put_be64(void *p, uint64_t val)
74 {
75 	*(uint64_t *)p = TEE_U64_TO_BIG_ENDIAN(val);
76 }
77 
78 static inline uint32_t get_be32(const void *p)
79 {
80 	return TEE_U32_FROM_BIG_ENDIAN(*(const uint32_t *)p);
81 }
82 
83 static inline void put_be32(void *p, uint32_t val)
84 {
85 	*(uint32_t *)p = TEE_U32_TO_BIG_ENDIAN(val);
86 }
87 
88 static inline uint16_t get_be16(const void *p)
89 {
90 	return TEE_U16_FROM_BIG_ENDIAN(*(const uint16_t *)p);
91 }
92 
93 static inline void put_be16(void *p, uint16_t val)
94 {
95 	*(uint16_t *)p = TEE_U16_TO_BIG_ENDIAN(val);
96 }
97 
98 static inline void put_le32(const void *p, uint32_t val)
99 {
100 	 *(uint32_t *)p = val;
101 }
102 
103 static inline uint32_t get_le32(const void *p)
104 {
105 	return *(const uint32_t *)p;
106 }
107 
108 static inline void put_le64(const void *p, uint64_t val)
109 {
110 	 *(uint64_t *)p = val;
111 }
112 
113 static inline uint64_t get_le64(const void *p)
114 {
115 	return *(const uint64_t *)p;
116 }
117 
118 /* Unaligned accesses */
119 
120 struct __unaligned_u16_t { uint16_t x; } __packed;
121 struct __unaligned_u32_t { uint32_t x; } __packed;
122 struct __unaligned_u64_t { uint64_t x; } __packed;
123 
124 static inline uint64_t get_unaligned_be64(const void *p)
125 {
126 	const struct __unaligned_u64_t *tmp = p;
127 
128 	return TEE_U64_FROM_BIG_ENDIAN(tmp->x);
129 }
130 
131 static inline void put_unaligned_be64(void *p, uint64_t val)
132 {
133 	struct __unaligned_u64_t *tmp = p;
134 
135 	tmp->x = TEE_U64_TO_BIG_ENDIAN(val);
136 }
137 
138 static inline uint32_t get_unaligned_be32(const void *p)
139 {
140 	const struct __unaligned_u32_t *tmp = p;
141 
142 	return TEE_U32_FROM_BIG_ENDIAN(tmp->x);
143 }
144 
145 static inline void put_unaligned_be32(void *p, uint32_t val)
146 {
147 	struct __unaligned_u32_t *tmp = p;
148 
149 	tmp->x = TEE_U32_TO_BIG_ENDIAN(val);
150 }
151 
152 static inline uint16_t get_unaligned_be16(const void *p)
153 {
154 	const struct __unaligned_u16_t *tmp = p;
155 
156 	return TEE_U16_FROM_BIG_ENDIAN(tmp->x);
157 }
158 
159 static inline void put_unaligned_be16(void *p, uint16_t val)
160 {
161 	struct __unaligned_u16_t *tmp = p;
162 
163 	tmp->x = TEE_U16_TO_BIG_ENDIAN(val);
164 }
165 
166 static inline void put_unaligned_le64(void *p, uint64_t val)
167 {
168 	struct __unaligned_u64_t *tmp = p;
169 
170 	tmp->x = val;
171 }
172 
173 static inline uint64_t get_unaligned_le64(const void *p)
174 {
175 	const struct __unaligned_u64_t *tmp = p;
176 
177 	return tmp->x;
178 }
179 
180 static inline void put_unaligned_le32(void *p, uint32_t val)
181 {
182 	struct __unaligned_u32_t *tmp = p;
183 
184 	tmp->x = val;
185 }
186 
187 static inline uint32_t get_unaligned_le32(const void *p)
188 {
189 	const struct __unaligned_u32_t *tmp = p;
190 
191 	return tmp->x;
192 }
193 
194 static inline void put_unaligned_le16(void *p, uint16_t val)
195 {
196 	struct __unaligned_u16_t *tmp = p;
197 
198 	tmp->x = val;
199 }
200 
201 static inline uint16_t get_unaligned_le16(const void *p)
202 {
203 	const struct __unaligned_u16_t *tmp = p;
204 
205 	return tmp->x;
206 }
207 
208 /*
209  * Set and clear bits helpers.
210  *
211  * @addr is the address of the memory cell accessed
212  * @set_mask represents the bit mask of the bit(s) to set, aka set to 1
213  * @clear_mask represents the bit mask of the bit(s) to clear, aka reset to 0
214  *
215  * io_clrsetbits32() clears then sets the target bits in this order. If a bit
216  * position is defined by both @set_mask and @clear_mask, the bit will be set.
217  */
218 static inline void io_setbits32(vaddr_t addr, uint32_t set_mask)
219 {
220 	io_write32(addr, io_read32(addr) | set_mask);
221 }
222 
223 static inline void io_clrbits32(vaddr_t addr, uint32_t clear_mask)
224 {
225 	io_write32(addr, io_read32(addr) & ~clear_mask);
226 }
227 
228 static inline void io_clrsetbits32(vaddr_t addr, uint32_t clear_mask,
229 				   uint32_t set_mask)
230 {
231 	io_write32(addr, (io_read32(addr) & ~clear_mask) | set_mask);
232 }
233 
234 static inline void io_setbits16(vaddr_t addr, uint16_t set_mask)
235 {
236 	io_write16(addr, io_read16(addr) | set_mask);
237 }
238 
239 static inline void io_clrbits16(vaddr_t addr, uint16_t clear_mask)
240 {
241 	io_write16(addr, io_read16(addr) & ~clear_mask);
242 }
243 
244 static inline void io_clrsetbits16(vaddr_t addr, uint16_t clear_mask,
245 				   uint16_t set_mask)
246 {
247 	io_write16(addr, (io_read16(addr) & ~clear_mask) | set_mask);
248 }
249 
250 static inline void io_setbits8(vaddr_t addr, uint8_t set_mask)
251 {
252 	io_write8(addr, io_read8(addr) | set_mask);
253 }
254 
255 static inline void io_clrbits8(vaddr_t addr, uint8_t clear_mask)
256 {
257 	io_write8(addr, io_read8(addr) & ~clear_mask);
258 }
259 
260 static inline void io_clrsetbits8(vaddr_t addr, uint8_t clear_mask,
261 				  uint8_t set_mask)
262 {
263 	io_write8(addr, (io_read8(addr) & ~clear_mask) | set_mask);
264 }
265 
266 #endif /*IO_H*/
267