xref: /optee_os/core/arch/riscv/include/riscv.h (revision 55a4d839310ce46aca79a12015ab8e1da9f110e5)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright 2022-2023 NXP
4  */
5 
6 #ifndef __RISCV_H
7 #define __RISCV_H
8 
9 #include <compiler.h>
10 #include <encoding.h>
11 #include <stdbool.h>
12 #include <stdint.h>
13 #include <sys/cdefs.h>
14 #include <util.h>
15 
16 #define RISCV_XLEN_BITS		(__riscv_xlen)
17 #define RISCV_XLEN_BYTES	(__riscv_xlen / 8)
18 
19 /* Bind registers to their ABI names */
20 #define REG_RA	1
21 #define REG_SP	2
22 #define REG_GP	3
23 #define REG_TP	4
24 #define REG_T0	5
25 #define REG_T2	7
26 #define REG_S0	8
27 #define REG_S1	9
28 #define REG_A0	10
29 #define REG_A1	11
30 #define REG_A2	12
31 #define REG_A3	13
32 #define REG_A5	15
33 #define REG_A7	17
34 #define REG_S2	18
35 #define REG_S11	27
36 #define REG_T3	28
37 #define REG_T6	31
38 
39 #if defined(CFG_RISCV_M_MODE)
40 #define CSR_MODE_OFFSET	PRV_M
41 #define XRET			mret
42 #elif defined(CFG_RISCV_S_MODE)
43 #define CSR_MODE_OFFSET	PRV_S
44 #define XRET			sret
45 #endif
46 
47 #define CSR_MODE_BITS		SHIFT_U64(CSR_MODE_OFFSET, 8)
48 
49 #define CSR_XSTATUS		(CSR_MODE_BITS | 0x000)
50 #define CSR_XIE			(CSR_MODE_BITS | 0x004)
51 #define CSR_XTVEC		(CSR_MODE_BITS | 0x005)
52 #define CSR_XSCRATCH		(CSR_MODE_BITS | 0x040)
53 #define CSR_XEPC		(CSR_MODE_BITS | 0x041)
54 #define CSR_XCAUSE		(CSR_MODE_BITS | 0x042)
55 #define CSR_XTVAL		(CSR_MODE_BITS | 0x043)
56 #define CSR_XIP			(CSR_MODE_BITS | 0x044)
57 
58 #define IRQ_XSOFT		(CSR_MODE_OFFSET + 0)
59 #define IRQ_XTIMER		(CSR_MODE_OFFSET + 4)
60 #define IRQ_XEXT		(CSR_MODE_OFFSET + 8)
61 
62 #define CSR_XIE_SIE		BIT64(IRQ_XSOFT)
63 #define CSR_XIE_TIE		BIT64(IRQ_XTIMER)
64 #define CSR_XIE_EIE		BIT64(IRQ_XEXT)
65 
66 #define CSR_XSTATUS_IE		BIT(CSR_MODE_OFFSET + 0)
67 #define CSR_XSTATUS_PIE		BIT(CSR_MODE_OFFSET + 4)
68 #define CSR_XSTATUS_SPP		BIT(8)
69 #define CSR_XSTATUS_SUM		BIT(18)
70 #define CSR_XSTATUS_MXR		BIT(19)
71 
72 #define CSR_XCAUSE_INTR_FLAG	BIT64(__riscv_xlen - 1)
73 
74 #ifndef __ASSEMBLER__
75 
76 #define read_csr(csr)							\
77 	({								\
78 		unsigned long __tmp;					\
79 		asm volatile ("csrr %0, %1" : "=r"(__tmp) : "i"(csr));	\
80 		__tmp;							\
81 	})
82 
83 #define write_csr(csr, val)						\
84 	({								\
85 		asm volatile ("csrw %0, %1" : : "i"(csr), "rK"(val));	\
86 	})
87 
88 #define swap_csr(csr, val)						\
89 	({								\
90 		unsigned long __tmp;					\
91 		asm volatile ("csrrw %0, %1, %2"			\
92 			      : "=r"(__tmp) : "i"(csr), "rK"(val));	\
93 		__tmp;							\
94 	})
95 
96 #define set_csr(csr, bit)						\
97 	({								\
98 		unsigned long __tmp;					\
99 		asm volatile ("csrrs %0, %1, %2"			\
100 			      : "=r"(__tmp) : "i"(csr), "rK"(bit));	\
101 		__tmp;							\
102 	})
103 
104 #define clear_csr(csr, bit)						\
105 	({								\
106 		unsigned long __tmp;					\
107 		asm volatile ("csrrc %0, %1, %2"			\
108 			      : "=r"(__tmp) : "i"(csr), "rK"(bit));	\
109 		__tmp;							\
110 	})
111 
112 #define rdtime() read_csr(CSR_TIME)
113 #define rdcycle() read_csr(CSR_CYCLE)
114 #define rdinstret() read_csr(CSR_INSTRET)
115 
116 static inline __noprof void mb(void)
117 {
118 	asm volatile ("fence" : : : "memory");
119 }
120 
121 static inline __noprof unsigned long read_gp(void)
122 {
123 	unsigned long gp = 0;
124 
125 	asm volatile("mv %0, gp" : "=&r"(gp));
126 	return gp;
127 }
128 
129 static inline __noprof unsigned long read_tp(void)
130 {
131 	unsigned long tp = 0;
132 
133 	asm volatile("mv %0, tp" : "=&r"(tp));
134 	return tp;
135 }
136 
137 static inline __noprof unsigned long read_fp(void)
138 {
139 	unsigned long fp = 0;
140 
141 	asm volatile ("mv %0, s0" : "=r" (fp));
142 
143 	return fp;
144 }
145 
146 static inline __noprof unsigned long read_pc(void)
147 {
148 	unsigned long pc = 0;
149 
150 	asm volatile ("auipc %0, 0" : "=r" (pc));
151 
152 	return pc;
153 }
154 
155 static inline __noprof void wfi(void)
156 {
157 	asm volatile ("wfi");
158 }
159 
160 static inline __noprof void riscv_cpu_pause(void)
161 {
162 	unsigned long dummy = 0;
163 
164 	/*
165 	 * Use a divide instruction to force wait
166 	 * for multiple CPU cycles.
167 	 * Note: RISC-V does not raise an exception
168 	 * on divide by zero.
169 	 */
170 	asm volatile ("div %0, %0, zero" : "=r" (dummy));
171 
172 	/*
173 	 * Use the encoding of the 'pause' instruction,
174 	 * thus no need to verify toolchain support for
175 	 * zihintpause.
176 	 * On hardware platforms that do not implement
177 	 * this extension, it will simply serve as a no-op.
178 	 */
179 	asm volatile (".4byte 0x100000f"); /* pause */
180 	barrier();
181 }
182 
183 static inline __noprof void flush_tlb(void)
184 {
185 	asm volatile("sfence.vma zero, zero");
186 }
187 
188 static inline __noprof void flush_tlb_entry(unsigned long va)
189 {
190 	asm volatile ("sfence.vma %0" : : "r" (va) : "memory");
191 }
192 
193 /* supervisor address translation and protection */
194 static inline __noprof unsigned long read_satp(void)
195 {
196 	unsigned long satp;
197 
198 	asm volatile("csrr %0, satp" : "=r" (satp));
199 
200 	return satp;
201 }
202 
203 static inline __noprof void write_satp(unsigned long satp)
204 {
205 	asm volatile("csrw satp, %0" : : "r" (satp));
206 }
207 
208 /* machine trap-vector base-address register */
209 static inline __noprof unsigned long read_mtvec(void)
210 {
211 	unsigned long mtvec;
212 
213 	asm volatile("csrr %0, mtvec" : "=r" (mtvec));
214 
215 	return mtvec;
216 }
217 
218 static inline __noprof void write_mtvec(unsigned long mtvec)
219 {
220 	asm volatile("csrw mtvec, %0" : : "r" (mtvec));
221 }
222 
223 /* supervisor trap-vector base-address register */
224 static inline __noprof unsigned long read_stvec(void)
225 {
226 	unsigned long stvec;
227 
228 	asm volatile("csrr %0, stvec" : "=r" (stvec));
229 
230 	return stvec;
231 }
232 
233 static inline __noprof void write_stvec(unsigned long stvec)
234 {
235 	asm volatile("csrw stvec, %0" : : "r" (stvec));
236 }
237 
238 /* machine status register */
239 static inline __noprof unsigned long read_mstatus(void)
240 {
241 	unsigned long mstatus;
242 
243 	asm volatile("csrr %0, mstatus" : "=r" (mstatus));
244 
245 	return mstatus;
246 }
247 
248 static inline __noprof void write_mstatus(unsigned long mstatus)
249 {
250 	asm volatile("csrw mstatus, %0" : : "r" (mstatus));
251 }
252 
253 /* supervisor status register */
254 static inline __noprof unsigned long read_sstatus(void)
255 {
256 	unsigned long sstatus;
257 
258 	asm volatile("csrr %0, sstatus" : "=r" (sstatus));
259 
260 	return sstatus;
261 }
262 
263 static inline __noprof void write_sstatus(unsigned long sstatus)
264 {
265 	asm volatile("csrw sstatus, %0" : : "r" (sstatus));
266 }
267 
268 static inline __noprof void set_sstatus(unsigned long sstatus)
269 {
270 	unsigned long x;
271 
272 	asm volatile ("csrrs %0, sstatus, %1" : "=r"(x) : "rK"(sstatus));
273 }
274 
275 /* machine exception delegation */
276 static inline __noprof unsigned long read_medeleg(void)
277 {
278 	unsigned long medeleg;
279 
280 	asm volatile("csrr %0, medeleg" : "=r" (medeleg));
281 
282 	return medeleg;
283 }
284 
285 static inline __noprof void write_medeleg(unsigned long medeleg)
286 {
287 	asm volatile("csrw medeleg, %0" : : "r" (medeleg));
288 }
289 
290 /* machine interrupt delegation */
291 static inline __noprof unsigned long read_mideleg(void)
292 {
293 	unsigned long mideleg;
294 
295 	asm volatile("csrr %0, mideleg" : "=r" (mideleg));
296 
297 	return mideleg;
298 }
299 
300 static inline __noprof void write_mideleg(unsigned long mideleg)
301 {
302 	asm volatile("csrw mideleg, %0" : : "r" (mideleg));
303 }
304 
305 /* machine interrupt-enable register */
306 static inline __noprof unsigned long read_mie(void)
307 {
308 	unsigned long mie;
309 
310 	asm volatile("csrr %0, mie" : "=r" (mie));
311 
312 	return mie;
313 }
314 
315 static inline __noprof void write_mie(unsigned long mie)
316 {
317 	asm volatile("csrw mie, %0" : : "r" (mie));
318 }
319 
320 /* supervisor interrupt-enable register */
321 static inline __noprof unsigned long read_sie(void)
322 {
323 	unsigned long sie;
324 
325 	asm volatile("csrr %0, sie" : "=r" (sie));
326 
327 	return sie;
328 }
329 
330 static inline __noprof void write_sie(unsigned long sie)
331 {
332 	asm volatile("csrw sie, %0" : : "r" (sie));
333 }
334 
335 /* machine exception program counter */
336 static inline __noprof unsigned long read_mepc(void)
337 {
338 	unsigned long mepc;
339 
340 	asm volatile("csrr %0, mepc" : "=r" (mepc));
341 
342 	return mepc;
343 }
344 
345 static inline __noprof void write_mepc(unsigned long mepc)
346 {
347 	asm volatile("csrw mepc, %0" : : "r" (mepc));
348 }
349 
350 /* supervisor exception program counter */
351 static inline __noprof unsigned long read_sepc(void)
352 {
353 	unsigned long sepc;
354 
355 	asm volatile("csrr %0, sepc" : "=r" (sepc));
356 
357 	return sepc;
358 }
359 
360 static inline __noprof void write_sepc(unsigned long sepc)
361 {
362 	asm volatile("csrw sepc, %0" : : "r" (sepc));
363 }
364 
365 /* machine scratch register */
366 static inline __noprof unsigned long read_mscratch(void)
367 {
368 	unsigned long mscratch;
369 
370 	asm volatile("csrr %0, mscratch" : "=r" (mscratch));
371 
372 	return mscratch;
373 }
374 
375 static inline __noprof void write_mscratch(unsigned long mscratch)
376 {
377 	asm volatile("csrw mscratch, %0" : : "r" (mscratch));
378 }
379 
380 /* supervisor scratch register */
381 static inline __noprof unsigned long read_sscratch(void)
382 {
383 	unsigned long sscratch;
384 
385 	asm volatile("csrr %0, sscratch" : "=r" (sscratch));
386 
387 	return sscratch;
388 }
389 
390 static inline __noprof void write_sscratch(unsigned long sscratch)
391 {
392 	asm volatile("csrw sscratch, %0" : : "r" (sscratch));
393 }
394 
395 /* trap-return instructions */
396 static inline __noprof void mret(void)
397 {
398 	asm volatile("mret");
399 }
400 
401 static inline __noprof void sret(void)
402 {
403 	asm volatile("sret");
404 }
405 
406 static inline __noprof void uret(void)
407 {
408 	asm volatile("uret");
409 }
410 
411 __noprof uint64_t read_time(void);
412 
413 static inline __noprof uint64_t barrier_read_counter_timer(void)
414 {
415 	mb();	/* Get timer value after pending operations have completed */
416 	return read_time();
417 }
418 
419 static inline __noprof uint32_t read_cntfrq(void)
420 {
421 	return CFG_RISCV_MTIME_RATE;
422 }
423 
424 __noprof bool riscv_detect_csr_seed(void);
425 
426 #endif /*__ASSEMBLER__*/
427 
428 #endif /*__RISCV_H*/
429