1 /* 2 * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved. 3 * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 #include <assert.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 12 #include <arch_helpers.h> 13 #include <common/debug.h> 14 #include <plat_startup.h> 15 16 17 /* 18 * HandoffParams 19 * Parameter bitfield encoding 20 * ----------------------------------------------------------------------------- 21 * Exec State 0 0 -> Aarch64, 1-> Aarch32 22 * endianness 1 0 -> LE, 1 -> BE 23 * secure (TZ) 2 0 -> Non secure, 1 -> secure 24 * EL 3:4 00 -> EL0, 01 -> EL1, 10 -> EL2, 11 -> EL3 25 * CPU# 5:6 00 -> A53_0, 01 -> A53_1, 10 -> A53_2, 11 -> A53_3 26 * Reserved 7:10 Reserved 27 * Cluster# 11:12 00 -> Cluster 0, 01 -> Cluster 1, 10 -> Cluster 2, 28 * 11 -> Cluster (Applicable for Versal NET only). 29 * Reserved 13:16 Reserved 30 */ 31 32 #define XBL_FLAGS_ESTATE_SHIFT 0U 33 #define XBL_FLAGS_ESTATE_MASK (1U << XBL_FLAGS_ESTATE_SHIFT) 34 #define XBL_FLAGS_ESTATE_A64 0U 35 #define XBL_FLAGS_ESTATE_A32 1U 36 37 #define XBL_FLAGS_ENDIAN_SHIFT 1U 38 #define XBL_FLAGS_ENDIAN_MASK (1U << XBL_FLAGS_ENDIAN_SHIFT) 39 #define XBL_FLAGS_ENDIAN_LE 0U 40 #define XBL_FLAGS_ENDIAN_BE 1U 41 42 #define XBL_FLAGS_TZ_SHIFT 2U 43 #define XBL_FLAGS_TZ_MASK (1U << XBL_FLAGS_TZ_SHIFT) 44 #define XBL_FLAGS_NON_SECURE 0U 45 #define XBL_FLAGS_SECURE 1U 46 47 #define XBL_FLAGS_EL_SHIFT 3U 48 #define XBL_FLAGS_EL_MASK (3U << XBL_FLAGS_EL_SHIFT) 49 #define XBL_FLAGS_EL0 0U 50 #define XBL_FLAGS_EL1 1U 51 #define XBL_FLAGS_EL2 2U 52 #define XBL_FLAGS_EL3 3U 53 54 #define XBL_FLAGS_CPU_SHIFT 5U 55 #define XBL_FLAGS_CPU_MASK (3U << XBL_FLAGS_CPU_SHIFT) 56 #define XBL_FLAGS_A53_0 0U 57 #define XBL_FLAGS_A53_1 1U 58 #define XBL_FLAGS_A53_2 2U 59 #define XBL_FLAGS_A53_3 3U 60 61 #if defined(PLAT_versal_net) 62 #define XBL_FLAGS_CLUSTER_SHIFT 11U 63 #define XBL_FLAGS_CLUSTER_MASK GENMASK(11, 12) 64 65 #define XBL_FLAGS_CLUSTER_0 0U 66 #endif /* PLAT_versal_net */ 67 68 /** 69 * get_xbl_cpu() - Get the target CPU for partition. 70 * @partition: Pointer to partition struct. 71 * 72 * Return: XBL_FLAGS_A53_0, XBL_FLAGS_A53_1, XBL_FLAGS_A53_2 or XBL_FLAGS_A53_3. 73 * 74 */ 75 static uint32_t get_xbl_cpu(const struct xbl_partition *partition) 76 { 77 uint64_t flags = partition->flags & XBL_FLAGS_CPU_MASK; 78 79 flags >>= XBL_FLAGS_CPU_SHIFT; 80 81 return (uint32_t)flags; 82 } 83 84 /** 85 * get_xbl_el() - Get the target exception level for partition. 86 * @partition: Pointer to partition struct. 87 * 88 * Return: XBL_FLAGS_EL0, XBL_FLAGS_EL1, XBL_FLAGS_EL2 or XBL_FLAGS_EL3. 89 * 90 */ 91 static uint32_t get_xbl_el(const struct xbl_partition *partition) 92 { 93 uint64_t flags = partition->flags & XBL_FLAGS_EL_MASK; 94 95 flags >>= XBL_FLAGS_EL_SHIFT; 96 97 return (uint32_t)flags; 98 } 99 100 /** 101 * get_xbl_ss() - Get the target security state for partition. 102 * @partition: Pointer to partition struct. 103 * 104 * Return: XBL_FLAGS_NON_SECURE or XBL_FLAGS_SECURE. 105 * 106 */ 107 static uint32_t get_xbl_ss(const struct xbl_partition *partition) 108 { 109 uint64_t flags = partition->flags & XBL_FLAGS_TZ_MASK; 110 111 flags >>= XBL_FLAGS_TZ_SHIFT; 112 113 return (uint32_t)flags; 114 } 115 116 /** 117 * get_xbl_endian() - Get the target endianness for partition. 118 * @partition: Pointer to partition struct. 119 * 120 * Return: SPSR_E_LITTLE or SPSR_E_BIG. 121 * 122 */ 123 static uint32_t get_xbl_endian(const struct xbl_partition *partition) 124 { 125 uint64_t flags = partition->flags & XBL_FLAGS_ENDIAN_MASK; 126 127 flags >>= XBL_FLAGS_ENDIAN_SHIFT; 128 129 if (flags == XBL_FLAGS_ENDIAN_BE) { 130 return SPSR_E_BIG; 131 } else { 132 return SPSR_E_LITTLE; 133 } 134 } 135 136 /** 137 * get_xbl_estate() - Get the target execution state for partition. 138 * @partition: Pointer to partition struct. 139 * 140 * Return: XBL_FLAGS_ESTATE_A32 or XBL_FLAGS_ESTATE_A64. 141 * 142 */ 143 static uint32_t get_xbl_estate(const struct xbl_partition *partition) 144 { 145 uint64_t flags = partition->flags & XBL_FLAGS_ESTATE_MASK; 146 147 flags >>= XBL_FLAGS_ESTATE_SHIFT; 148 149 return flags; 150 } 151 152 #if defined(PLAT_versal_net) 153 /** 154 * get_xbl_cluster - Get the cluster number 155 * @partition: pointer to the partition structure. 156 * 157 * Return: cluster number for the partition. 158 */ 159 static uint32_t get_xbl_cluster(const struct xbl_partition *partition) 160 { 161 uint64_t flags = partition->flags & XBL_FLAGS_CLUSTER_MASK; 162 163 return (flags >> XBL_FLAGS_CLUSTER_SHIFT); 164 } 165 #endif /* PLAT_versal_net */ 166 167 /** 168 * xbl_handover() - Populates the bl32 and bl33 image info structures. 169 * @bl32: BL32 image info structure. 170 * @bl33: BL33 image info structure. 171 * @handoff_addr: TF-A handoff address. 172 * 173 * Process the handoff parameters from the XBL and populate the BL32 and BL33 174 * image info structures accordingly. 175 * 176 * Return: Return the status of the handoff. The value will be from the 177 * xbl_handoff enum. 178 * 179 */ 180 enum xbl_handoff xbl_handover(entry_point_info_t *bl32, 181 entry_point_info_t *bl33, 182 uint64_t handoff_addr) 183 { 184 const struct xbl_handoff_params *HandoffParams; 185 186 if (handoff_addr == 0U) { 187 WARN("BL31: No handoff structure passed\n"); 188 return XBL_HANDOFF_NO_STRUCT; 189 } 190 191 HandoffParams = (struct xbl_handoff_params *)handoff_addr; 192 if ((HandoffParams->magic[0] != (uint8_t)'X') || 193 (HandoffParams->magic[1] != (uint8_t)'L') || 194 (HandoffParams->magic[2] != (uint8_t)'N') || 195 (HandoffParams->magic[3] != (uint8_t)'X')) { 196 ERROR("BL31: invalid handoff structure at %" PRIx64 "\n", handoff_addr); 197 return XBL_HANDOFF_INVAL_STRUCT; 198 } 199 200 VERBOSE("BL31: TF-A handoff params at:0x%" PRIx64 ", entries:%u\n", 201 handoff_addr, HandoffParams->num_entries); 202 if (HandoffParams->num_entries > XBL_MAX_PARTITIONS) { 203 ERROR("BL31: TF-A handoff params: too many partitions (%u/%u)\n", 204 HandoffParams->num_entries, XBL_MAX_PARTITIONS); 205 return XBL_HANDOFF_TOO_MANY_PARTS; 206 } 207 208 /* 209 * we loop over all passed entries but only populate two image structs 210 * (bl32, bl33). I.e. the last applicable images in the handoff 211 * structure will be used for the hand off 212 */ 213 for (size_t i = 0; i < HandoffParams->num_entries; i++) { 214 entry_point_info_t *image; 215 uint32_t target_estate, target_secure, target_cpu; 216 uint32_t target_endianness, target_el; 217 218 VERBOSE("BL31: %zd: entry:0x%" PRIx64 ", flags:0x%" PRIx64 "\n", i, 219 HandoffParams->partition[i].entry_point, 220 HandoffParams->partition[i].flags); 221 222 #if defined(PLAT_versal_net) 223 uint32_t target_cluster; 224 225 target_cluster = get_xbl_cluster(&HandoffParams->partition[i]); 226 if (target_cluster != XBL_FLAGS_CLUSTER_0) { 227 WARN("BL31: invalid target Cluster (%i)\n", 228 target_cluster); 229 continue; 230 } 231 #endif /* PLAT_versal_net */ 232 233 target_cpu = get_xbl_cpu(&HandoffParams->partition[i]); 234 if (target_cpu != XBL_FLAGS_A53_0) { 235 WARN("BL31: invalid target CPU (%i)\n", target_cpu); 236 continue; 237 } 238 239 target_el = get_xbl_el(&HandoffParams->partition[i]); 240 if ((target_el == XBL_FLAGS_EL3) || 241 (target_el == XBL_FLAGS_EL0)) { 242 WARN("BL31: invalid target exception level(%i)\n", 243 target_el); 244 continue; 245 } 246 247 target_secure = get_xbl_ss(&HandoffParams->partition[i]); 248 if ((target_secure == XBL_FLAGS_SECURE) && 249 (target_el == XBL_FLAGS_EL2)) { 250 WARN("BL31: invalid security state (%i) for exception level (%i)\n", 251 target_secure, target_el); 252 continue; 253 } 254 255 target_estate = get_xbl_estate(&HandoffParams->partition[i]); 256 target_endianness = get_xbl_endian(&HandoffParams->partition[i]); 257 258 if (target_secure == XBL_FLAGS_SECURE) { 259 image = bl32; 260 261 if (target_estate == XBL_FLAGS_ESTATE_A32) { 262 bl32->spsr = (uint32_t)SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 263 (uint64_t)target_endianness, 264 DISABLE_ALL_EXCEPTIONS); 265 } else { 266 bl32->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, 267 DISABLE_ALL_EXCEPTIONS); 268 } 269 } else { 270 image = bl33; 271 272 if (target_estate == XBL_FLAGS_ESTATE_A32) { 273 if (target_el == XBL_FLAGS_EL2) { 274 target_el = MODE32_hyp; 275 } else { 276 target_el = MODE32_sys; 277 } 278 279 bl33->spsr = (uint32_t)SPSR_MODE32((uint64_t)target_el, SPSR_T_ARM, 280 (uint64_t)target_endianness, 281 DISABLE_ALL_EXCEPTIONS); 282 } else { 283 if (target_el == XBL_FLAGS_EL2) { 284 target_el = MODE_EL2; 285 } else { 286 target_el = MODE_EL1; 287 } 288 289 bl33->spsr = (uint32_t)SPSR_64((uint64_t)target_el, MODE_SP_ELX, 290 DISABLE_ALL_EXCEPTIONS); 291 } 292 } 293 294 VERBOSE("Setting up %s entry point to:%" PRIx64 ", el:%x\n", 295 (target_secure == XBL_FLAGS_SECURE) ? "BL32" : "BL33", 296 HandoffParams->partition[i].entry_point, 297 target_el); 298 image->pc = HandoffParams->partition[i].entry_point; 299 300 if (target_endianness == SPSR_E_BIG) { 301 EP_SET_EE(image->h.attr, EP_EE_BIG); 302 } else { 303 EP_SET_EE(image->h.attr, EP_EE_LITTLE); 304 } 305 } 306 307 return XBL_HANDOFF_SUCCESS; 308 } 309