1 /*
2 * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
3 * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 #include <assert.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11
12 #include <arch_helpers.h>
13 #include <common/debug.h>
14 #include <plat_startup.h>
15
16
17 /*
18 * HandoffParams
19 * Parameter bitfield encoding
20 * -----------------------------------------------------------------------------
21 * Exec State 0 0 -> Aarch64, 1-> Aarch32
22 * endianness 1 0 -> LE, 1 -> BE
23 * secure (TZ) 2 0 -> Non secure, 1 -> secure
24 * EL 3:4 00 -> EL0, 01 -> EL1, 10 -> EL2, 11 -> EL3
25 * CPU# 5:6 00 -> A53_0, 01 -> A53_1, 10 -> A53_2, 11 -> A53_3
26 * Reserved 7:10 Reserved
27 * Cluster# 11:12 00 -> Cluster 0, 01 -> Cluster 1, 10 -> Cluster 2,
28 * 11 -> Cluster (Applicable for Versal NET only).
29 * Reserved 13:16 Reserved
30 */
31
32 #define XBL_FLAGS_ESTATE_SHIFT 0U
33 #define XBL_FLAGS_ESTATE_MASK (1U << XBL_FLAGS_ESTATE_SHIFT)
34 #define XBL_FLAGS_ESTATE_A64 0U
35 #define XBL_FLAGS_ESTATE_A32 1U
36
37 #define XBL_FLAGS_ENDIAN_SHIFT 1U
38 #define XBL_FLAGS_ENDIAN_MASK (1U << XBL_FLAGS_ENDIAN_SHIFT)
39 #define XBL_FLAGS_ENDIAN_LE 0U
40 #define XBL_FLAGS_ENDIAN_BE 1U
41
42 #define XBL_FLAGS_TZ_SHIFT 2U
43 #define XBL_FLAGS_TZ_MASK (1U << XBL_FLAGS_TZ_SHIFT)
44 #define XBL_FLAGS_NON_SECURE 0U
45 #define XBL_FLAGS_SECURE 1U
46
47 #define XBL_FLAGS_EL_SHIFT 3U
48 #define XBL_FLAGS_EL_MASK (3U << XBL_FLAGS_EL_SHIFT)
49 #define XBL_FLAGS_EL0 0U
50 #define XBL_FLAGS_EL1 1U
51 #define XBL_FLAGS_EL2 2U
52 #define XBL_FLAGS_EL3 3U
53
54 #define XBL_FLAGS_CPU_SHIFT 5U
55 #define XBL_FLAGS_CPU_MASK (3U << XBL_FLAGS_CPU_SHIFT)
56 #define XBL_FLAGS_A53_0 0U
57 #define XBL_FLAGS_A53_1 1U
58 #define XBL_FLAGS_A53_2 2U
59 #define XBL_FLAGS_A53_3 3U
60
61 #if defined(PLAT_versal_net)
62 #define XBL_FLAGS_CLUSTER_SHIFT 11U
63 #define XBL_FLAGS_CLUSTER_MASK GENMASK(11, 12)
64
65 #define XBL_FLAGS_CLUSTER_0 0U
66 #endif /* PLAT_versal_net */
67
68 /**
69 * get_xbl_cpu() - Get the target CPU for partition.
70 * @partition: Pointer to partition struct.
71 *
72 * Return: XBL_FLAGS_A53_0, XBL_FLAGS_A53_1, XBL_FLAGS_A53_2 or XBL_FLAGS_A53_3.
73 *
74 */
get_xbl_cpu(const struct xbl_partition * partition)75 static uint32_t get_xbl_cpu(const struct xbl_partition *partition)
76 {
77 uint64_t flags = partition->flags & XBL_FLAGS_CPU_MASK;
78
79 flags >>= XBL_FLAGS_CPU_SHIFT;
80
81 return (uint32_t)flags;
82 }
83
84 /**
85 * get_xbl_el() - Get the target exception level for partition.
86 * @partition: Pointer to partition struct.
87 *
88 * Return: XBL_FLAGS_EL0, XBL_FLAGS_EL1, XBL_FLAGS_EL2 or XBL_FLAGS_EL3.
89 *
90 */
get_xbl_el(const struct xbl_partition * partition)91 static uint32_t get_xbl_el(const struct xbl_partition *partition)
92 {
93 uint64_t flags = partition->flags & XBL_FLAGS_EL_MASK;
94
95 flags >>= XBL_FLAGS_EL_SHIFT;
96
97 return (uint32_t)flags;
98 }
99
100 /**
101 * get_xbl_ss() - Get the target security state for partition.
102 * @partition: Pointer to partition struct.
103 *
104 * Return: XBL_FLAGS_NON_SECURE or XBL_FLAGS_SECURE.
105 *
106 */
get_xbl_ss(const struct xbl_partition * partition)107 static uint32_t get_xbl_ss(const struct xbl_partition *partition)
108 {
109 uint64_t flags = partition->flags & XBL_FLAGS_TZ_MASK;
110
111 flags >>= XBL_FLAGS_TZ_SHIFT;
112
113 return (uint32_t)flags;
114 }
115
116 /**
117 * get_xbl_endian() - Get the target endianness for partition.
118 * @partition: Pointer to partition struct.
119 *
120 * Return: SPSR_E_LITTLE or SPSR_E_BIG.
121 *
122 */
get_xbl_endian(const struct xbl_partition * partition)123 static uint32_t get_xbl_endian(const struct xbl_partition *partition)
124 {
125 uint64_t flags = partition->flags & XBL_FLAGS_ENDIAN_MASK;
126 uint32_t spsr_value = 0U;
127
128 flags >>= XBL_FLAGS_ENDIAN_SHIFT;
129
130 if (flags == XBL_FLAGS_ENDIAN_BE) {
131 spsr_value = SPSR_E_BIG;
132 } else {
133 spsr_value = SPSR_E_LITTLE;
134 }
135
136 return spsr_value;
137 }
138
139 /**
140 * get_xbl_estate() - Get the target execution state for partition.
141 * @partition: Pointer to partition struct.
142 *
143 * Return: XBL_FLAGS_ESTATE_A32 or XBL_FLAGS_ESTATE_A64.
144 *
145 */
get_xbl_estate(const struct xbl_partition * partition)146 static uint32_t get_xbl_estate(const struct xbl_partition *partition)
147 {
148 uint64_t flags = partition->flags & XBL_FLAGS_ESTATE_MASK;
149
150 flags >>= XBL_FLAGS_ESTATE_SHIFT;
151
152 return (uint32_t)flags;
153 }
154
155 #if defined(PLAT_versal_net)
156 /**
157 * get_xbl_cluster - Get the cluster number
158 * @partition: pointer to the partition structure.
159 *
160 * Return: cluster number for the partition.
161 */
get_xbl_cluster(const struct xbl_partition * partition)162 static uint32_t get_xbl_cluster(const struct xbl_partition *partition)
163 {
164 uint64_t flags = partition->flags & XBL_FLAGS_CLUSTER_MASK;
165
166 return (flags >> XBL_FLAGS_CLUSTER_SHIFT);
167 }
168 #endif /* PLAT_versal_net */
169
170 /**
171 * xbl_handover() - Populates the bl32 and bl33 image info structures.
172 * @bl32: BL32 image info structure.
173 * @bl33: BL33 image info structure.
174 * @handoff_addr: TF-A handoff address.
175 *
176 * Process the handoff parameters from the XBL and populate the BL32 and BL33
177 * image info structures accordingly.
178 *
179 * Return: Return the status of the handoff. The value will be from the
180 * xbl_handoff enum.
181 *
182 */
xbl_handover(entry_point_info_t * bl32,entry_point_info_t * bl33,uint64_t handoff_addr)183 enum xbl_handoff xbl_handover(entry_point_info_t *bl32,
184 entry_point_info_t *bl33,
185 uint64_t handoff_addr)
186 {
187 const struct xbl_handoff_params *HandoffParams;
188 enum xbl_handoff xbl_status = XBL_HANDOFF_SUCCESS;
189
190 if (handoff_addr == 0U) {
191 WARN("BL31: No handoff structure passed\n");
192 xbl_status = XBL_HANDOFF_NO_STRUCT;
193 goto exit_label;
194 }
195
196 HandoffParams = (struct xbl_handoff_params *)handoff_addr;
197 if ((HandoffParams->magic[0] != (uint8_t)'X') ||
198 (HandoffParams->magic[1] != (uint8_t)'L') ||
199 (HandoffParams->magic[2] != (uint8_t)'N') ||
200 (HandoffParams->magic[3] != (uint8_t)'X')) {
201 ERROR("BL31: invalid handoff structure at %" PRIx64 "\n", handoff_addr);
202 xbl_status = XBL_HANDOFF_INVAL_STRUCT;
203 goto exit_label;
204 }
205
206 VERBOSE("BL31: TF-A handoff params at:0x%" PRIx64 ", entries:%u\n",
207 handoff_addr, HandoffParams->num_entries);
208 if (HandoffParams->num_entries > XBL_MAX_PARTITIONS) {
209 ERROR("BL31: TF-A handoff params: too many partitions (%u/%u)\n",
210 HandoffParams->num_entries, XBL_MAX_PARTITIONS);
211 xbl_status = XBL_HANDOFF_TOO_MANY_PARTS;
212 goto exit_label;
213 }
214
215 /*
216 * we loop over all passed entries but only populate two image structs
217 * (bl32, bl33). I.e. the last applicable images in the handoff
218 * structure will be used for the hand off
219 */
220 for (size_t i = 0; i < HandoffParams->num_entries; i++) {
221 entry_point_info_t *image;
222 uint32_t target_estate, target_secure, target_cpu;
223 uint32_t target_endianness, target_el;
224
225 VERBOSE("BL31: %zd: entry:0x%" PRIx64 ", flags:0x%" PRIx64 "\n", i,
226 HandoffParams->partition[i].entry_point,
227 HandoffParams->partition[i].flags);
228
229 #if defined(PLAT_versal_net)
230 uint32_t target_cluster;
231
232 target_cluster = get_xbl_cluster(&HandoffParams->partition[i]);
233 if (target_cluster != XBL_FLAGS_CLUSTER_0) {
234 WARN("BL31: invalid target Cluster (%i)\n",
235 target_cluster);
236 continue;
237 }
238 #endif /* PLAT_versal_net */
239
240 target_cpu = get_xbl_cpu(&HandoffParams->partition[i]);
241 if (target_cpu != XBL_FLAGS_A53_0) {
242 WARN("BL31: invalid target CPU (%i)\n", target_cpu);
243 continue;
244 }
245
246 target_el = get_xbl_el(&HandoffParams->partition[i]);
247 if ((target_el == XBL_FLAGS_EL3) ||
248 (target_el == XBL_FLAGS_EL0)) {
249 WARN("BL31: invalid target exception level(%i)\n",
250 target_el);
251 continue;
252 }
253
254 target_secure = get_xbl_ss(&HandoffParams->partition[i]);
255 if ((target_secure == XBL_FLAGS_SECURE) &&
256 (target_el == XBL_FLAGS_EL2)) {
257 WARN("BL31: invalid security state (%i) for exception level (%i)\n",
258 target_secure, target_el);
259 continue;
260 }
261
262 target_estate = get_xbl_estate(&HandoffParams->partition[i]);
263 target_endianness = get_xbl_endian(&HandoffParams->partition[i]);
264
265 if (target_secure == XBL_FLAGS_SECURE) {
266 image = bl32;
267
268 if (target_estate == XBL_FLAGS_ESTATE_A32) {
269 bl32->spsr = (uint32_t)SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
270 (uint64_t)target_endianness,
271 DISABLE_ALL_EXCEPTIONS);
272 } else {
273 bl32->spsr = (uint32_t)SPSR_64(MODE_EL1, MODE_SP_ELX,
274 DISABLE_ALL_EXCEPTIONS);
275 }
276 } else {
277 image = bl33;
278
279 if (target_estate == XBL_FLAGS_ESTATE_A32) {
280 if (target_el == XBL_FLAGS_EL2) {
281 target_el = MODE32_hyp;
282 } else {
283 target_el = MODE32_sys;
284 }
285
286 bl33->spsr = (uint32_t)SPSR_MODE32((uint64_t)target_el, SPSR_T_ARM,
287 (uint64_t)target_endianness,
288 DISABLE_ALL_EXCEPTIONS);
289 } else {
290 if (target_el == XBL_FLAGS_EL2) {
291 target_el = MODE_EL2;
292 } else {
293 target_el = MODE_EL1;
294 }
295
296 bl33->spsr = (uint32_t)SPSR_64((uint64_t)target_el, MODE_SP_ELX,
297 DISABLE_ALL_EXCEPTIONS);
298 }
299 }
300
301 VERBOSE("Setting up %s entry point to:%" PRIx64 ", el:%x\n",
302 (target_secure == XBL_FLAGS_SECURE) ? "BL32" : "BL33",
303 HandoffParams->partition[i].entry_point,
304 target_el);
305 image->pc = HandoffParams->partition[i].entry_point;
306
307 if (target_endianness == SPSR_E_BIG) {
308 EP_SET_EE(image->h.attr, EP_EE_BIG);
309 } else {
310 EP_SET_EE(image->h.attr, EP_EE_LITTLE);
311 }
312 }
313
314 exit_label:
315 return xbl_status;
316 }
317