1 /*
2 * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #ifndef ERRATA_H
8 #define ERRATA_H
9
10 #include <lib/cpus/cpu_ops.h>
11
12 #define ERRATUM_CHECK_FUNC_SIZE CPU_WORD_SIZE
13 #define ERRATUM_ID_SIZE 4
14 #define ERRATUM_CVE_SIZE 2
15 #define ERRATUM_CHOSEN_SIZE 1
16 #define ERRATUM_ALIGNMENT_SIZE 1
17
18 #define ERRATUM_CHECK_FUNC 0
19 #define ERRATUM_ID ERRATUM_CHECK_FUNC + ERRATUM_CHECK_FUNC_SIZE
20 #define ERRATUM_CVE ERRATUM_ID + ERRATUM_ID_SIZE
21 #define ERRATUM_CHOSEN ERRATUM_CVE + ERRATUM_CVE_SIZE
22 #define ERRATUM_ALIGNMENT ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE
23 #define ERRATUM_ENTRY_SIZE ERRATUM_ALIGNMENT + ERRATUM_ALIGNMENT_SIZE
24
25 /* Errata status */
26 #define ERRATA_NOT_APPLIES 0
27 #define ERRATA_APPLIES 1
28 #define ERRATA_MISSING 2
29
30 /* Errata ID for smc workarounds */
31 #define ARCH_WORKAROUND_2 2
32 #define ARCH_WORKAROUND_3 3
33
34 #define INCLUDE_ERRATA_LIST ( \
35 REPORT_ERRATA | \
36 ERRATA_ABI_SUPPORT | \
37 WORKAROUND_CVE_2017_5715 | \
38 WORKAROUND_CVE_2018_3639 | \
39 WORKAROUND_CVE_2022_23960 | \
40 WORKAROUND_CVE_2024_7881)
41
42 #ifndef __ASSEMBLER__
43 #include <lib/cassert.h>
44
45 void print_errata_status(void);
46
47 /*
48 * NOTE that this structure will be different on AArch32 and AArch64. The
49 * uintptr_t will reflect the change and the alignment will be correct in both.
50 */
51 struct erratum_entry {
52 uintptr_t (*check_func)(uint64_t cpu_rev);
53 /* Will fit CVEs with up to 10 character in the ID field */
54 uint32_t id;
55 /* Denote CVEs with their year or errata with 0 */
56 uint16_t cve;
57 /*
58 * a bitfield:
59 * bit 0 - denotes if the erratum is enabled in build.
60 * bit 1 - denotes if the erratum workaround is split and
61 * also needs to be implemented at a lower EL.
62 */
63 uint8_t chosen;
64 uint8_t _alignment;
65 } __packed;
66
67 CASSERT(sizeof(struct erratum_entry) == ERRATUM_ENTRY_SIZE,
68 assert_erratum_entry_asm_c_different_sizes);
69
70 /*
71 * Runtime errata helpers.
72 */
73 #if ERRATA_A75_764081
74 bool errata_a75_764081_applies(void);
75 #else
errata_a75_764081_applies(void)76 static inline bool errata_a75_764081_applies(void)
77 {
78 return false;
79 }
80 #endif
81
82 bool check_if_trbe_disable_affected_core(void);
83 bool errata_ich_vmcr_el2_applies(void);
84 struct erratum_entry *find_erratum_entry(uint32_t errata_id);
85 int check_erratum_applies(uint32_t cve, int errata_id);
86
87 /*
88 * API to perform CPP RCTX instruction functionality in EL3
89 *
90 * Rather than trapping EL3 to EL3 if CPP RCTX is needed, it is simpler to just
91 * have an API that performs the workaround steps. TF-A does not support nested
92 * exceptions outside of specific circumstances, and enabling that generically
93 * is not trivial, so this is a simpler and faster solution.
94 *
95 * The workaround is not reliant on the config register passed to the CPP RCTX
96 * instruction, but the argument is included for compatibility in systems that
97 * might have some cores that need the workaround and some that do not. If the
98 * workaround is not needed, the argument will be used in a normal CPP RCTX call
99 * rather than the workaround procedure.
100 */
101 #if WORKAROUND_CVE_2025_0647
102 void wa_cve_2025_0647_execute_cpp_el3(uint64_t arg);
103 #else
104 __attribute__((always_inline))
wa_cve_2025_0647_execute_cpp_el3(uint64_t arg)105 static inline void wa_cve_2025_0647_execute_cpp_el3(uint64_t arg)
106 {
107 #ifdef __aarch64__
108 register uint64_t x0 __asm__("x0") = arg;
109 __asm__ volatile ("cpp rctx, x0" : : "r"(x0) : "memory");
110 #endif /* __aarch64__ */
111 }
112 #endif /* WORKAROUND_CVE_2025_0647 */
113
114 #else
115
116 /*
117 * errata framework macro helpers
118 *
119 * NOTE an erratum and CVE id could clash. However, both numbers are very large
120 * and the probablity is minuscule. Working around this makes code very
121 * complicated and extremely difficult to read so it is not considered. In the
122 * unlikely event that this does happen, the build will fail, and unless the
123 * framework is updated to account for this one of the IDs will need to be
124 * altered to prevent the conflict.
125 */
126 #define NO_ISB 1
127 #define NO_ASSERT 0
128 #define GET_CPU_REV 1
129 #define NO_GET_CPU_REV 0
130
131 /* useful for errata that end up always being worked around */
132 #define ERRATUM_ALWAYS_CHOSEN 1
133
134 #endif /* __ASSEMBLER__ */
135
136 #define ERRATUM(id) 0, id
137 #define CVE(year, id) year, id
138
139 /* Macro to get CPU revision code for checking errata version compatibility. */
140 #define CPU_REV(r, p) ((r << 4) | p)
141
142 /* Used for errata that have split workaround */
143 #define SPLIT_WA 1
144
145 /* chosen bitfield entries */
146 #define WA_ENABLED_MASK BIT(0)
147 #define SPLIT_WA_MASK BIT(1)
148
149 #endif /* ERRATA_H */
150