1/* 2 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <platform_def.h> 8 9#include <common/bl_common.ld.h> 10#include <lib/xlat_tables/xlat_tables_defs.h> 11 12OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) 13OUTPUT_ARCH(PLATFORM_LINKER_ARCH) 14ENTRY(bl31_entrypoint) 15 16 17MEMORY { 18 RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE 19#if SEPARATE_NOBITS_REGION 20 NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE 21#else 22#define NOBITS RAM 23#endif 24} 25 26#ifdef PLAT_EXTRA_LD_SCRIPT 27#include <plat.ld.S> 28#endif 29 30SECTIONS 31{ 32 . = BL31_BASE; 33 ASSERT(. == ALIGN(PAGE_SIZE), 34 "BL31_BASE address is not aligned on a page boundary.") 35 36 __BL31_START__ = .; 37 38#if SEPARATE_CODE_AND_RODATA 39 .text . : { 40 __TEXT_START__ = .; 41 *bl31_entrypoint.o(.text*) 42 *(SORT_BY_ALIGNMENT(.text*)) 43 *(.vectors) 44 . = ALIGN(PAGE_SIZE); 45 __TEXT_END__ = .; 46 } >RAM 47 48 .rodata . : { 49 __RODATA_START__ = .; 50 *(SORT_BY_ALIGNMENT(.rodata*)) 51 52 /* Ensure 8-byte alignment for descriptors and ensure inclusion */ 53 . = ALIGN(8); 54 __RT_SVC_DESCS_START__ = .; 55 KEEP(*(rt_svc_descs)) 56 __RT_SVC_DESCS_END__ = .; 57 58#if ENABLE_PMF 59 /* Ensure 8-byte alignment for descriptors and ensure inclusion */ 60 . = ALIGN(8); 61 __PMF_SVC_DESCS_START__ = .; 62 KEEP(*(pmf_svc_descs)) 63 __PMF_SVC_DESCS_END__ = .; 64#endif /* ENABLE_PMF */ 65 66 /* 67 * Ensure 8-byte alignment for cpu_ops so that its fields are also 68 * aligned. Also ensure cpu_ops inclusion. 69 */ 70 . = ALIGN(8); 71 __CPU_OPS_START__ = .; 72 KEEP(*(cpu_ops)) 73 __CPU_OPS_END__ = .; 74 75 /* 76 * Keep the .got section in the RO section as it is patched 77 * prior to enabling the MMU and having the .got in RO is better for 78 * security. GOT is a table of addresses so ensure 8-byte alignment. 79 */ 80 . = ALIGN(8); 81 __GOT_START__ = .; 82 *(.got) 83 __GOT_END__ = .; 84 85 /* Place pubsub sections for events */ 86 . = ALIGN(8); 87#include <lib/el3_runtime/pubsub_events.h> 88 89 . = ALIGN(PAGE_SIZE); 90 __RODATA_END__ = .; 91 } >RAM 92#else 93 ro . : { 94 __RO_START__ = .; 95 *bl31_entrypoint.o(.text*) 96 *(SORT_BY_ALIGNMENT(.text*)) 97 *(SORT_BY_ALIGNMENT(.rodata*)) 98 99 /* Ensure 8-byte alignment for descriptors and ensure inclusion */ 100 . = ALIGN(8); 101 __RT_SVC_DESCS_START__ = .; 102 KEEP(*(rt_svc_descs)) 103 __RT_SVC_DESCS_END__ = .; 104 105#if ENABLE_PMF 106 /* Ensure 8-byte alignment for descriptors and ensure inclusion */ 107 . = ALIGN(8); 108 __PMF_SVC_DESCS_START__ = .; 109 KEEP(*(pmf_svc_descs)) 110 __PMF_SVC_DESCS_END__ = .; 111#endif /* ENABLE_PMF */ 112 113 /* 114 * Ensure 8-byte alignment for cpu_ops so that its fields are also 115 * aligned. Also ensure cpu_ops inclusion. 116 */ 117 . = ALIGN(8); 118 __CPU_OPS_START__ = .; 119 KEEP(*(cpu_ops)) 120 __CPU_OPS_END__ = .; 121 122 /* 123 * Keep the .got section in the RO section as it is patched 124 * prior to enabling the MMU and having the .got in RO is better for 125 * security. GOT is a table of addresses so ensure 8-byte alignment. 126 */ 127 . = ALIGN(8); 128 __GOT_START__ = .; 129 *(.got) 130 __GOT_END__ = .; 131 132 /* Place pubsub sections for events */ 133 . = ALIGN(8); 134#include <lib/el3_runtime/pubsub_events.h> 135 136 *(.vectors) 137 __RO_END_UNALIGNED__ = .; 138 /* 139 * Memory page(s) mapped to this section will be marked as read-only, 140 * executable. No RW data from the next section must creep in. 141 * Ensure the rest of the current memory page is unused. 142 */ 143 . = ALIGN(PAGE_SIZE); 144 __RO_END__ = .; 145 } >RAM 146#endif 147 148 ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__, 149 "cpu_ops not defined for this platform.") 150 151#if SPM_MM 152#ifndef SPM_SHIM_EXCEPTIONS_VMA 153#define SPM_SHIM_EXCEPTIONS_VMA RAM 154#endif 155 156 /* 157 * Exception vectors of the SPM shim layer. They must be aligned to a 2K 158 * address, but we need to place them in a separate page so that we can set 159 * individual permissions to them, so the actual alignment needed is 4K. 160 * 161 * There's no need to include this into the RO section of BL31 because it 162 * doesn't need to be accessed by BL31. 163 */ 164 spm_shim_exceptions : ALIGN(PAGE_SIZE) { 165 __SPM_SHIM_EXCEPTIONS_START__ = .; 166 *(.spm_shim_exceptions) 167 . = ALIGN(PAGE_SIZE); 168 __SPM_SHIM_EXCEPTIONS_END__ = .; 169 } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM 170 171 PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions)); 172 . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions); 173#endif 174 175 /* 176 * Define a linker symbol to mark start of the RW memory area for this 177 * image. 178 */ 179 __RW_START__ = . ; 180 181 /* 182 * .data must be placed at a lower address than the stacks if the stack 183 * protector is enabled. Alternatively, the .data.stack_protector_canary 184 * section can be placed independently of the main .data section. 185 */ 186 .data . : { 187 __DATA_START__ = .; 188 *(SORT_BY_ALIGNMENT(.data*)) 189 __DATA_END__ = .; 190 } >RAM 191 192 /* 193 * .rela.dyn needs to come after .data for the read-elf utility to parse 194 * this section correctly. Ensure 8-byte alignment so that the fields of 195 * RELA data structure are aligned. 196 */ 197 . = ALIGN(8); 198 __RELA_START__ = .; 199 .rela.dyn . : { 200 } >RAM 201 __RELA_END__ = .; 202 203#ifdef BL31_PROGBITS_LIMIT 204 ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.") 205#endif 206 207#if SEPARATE_NOBITS_REGION 208 /* 209 * Define a linker symbol to mark end of the RW memory area for this 210 * image. 211 */ 212 . = ALIGN(PAGE_SIZE); 213 __RW_END__ = .; 214 __BL31_END__ = .; 215 216 ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.") 217 218 . = BL31_NOBITS_BASE; 219 ASSERT(. == ALIGN(PAGE_SIZE), 220 "BL31 NOBITS base address is not aligned on a page boundary.") 221 222 __NOBITS_START__ = .; 223#endif 224 225 stacks (NOLOAD) : { 226 __STACKS_START__ = .; 227 *(tzfw_normal_stacks) 228 __STACKS_END__ = .; 229 } >NOBITS 230 231 /* 232 * The .bss section gets initialised to 0 at runtime. 233 * Its base address should be 16-byte aligned for better performance of the 234 * zero-initialization code. 235 */ 236 .bss (NOLOAD) : ALIGN(16) { 237 __BSS_START__ = .; 238 *(SORT_BY_ALIGNMENT(.bss*)) 239 *(COMMON) 240#if !USE_COHERENT_MEM 241 /* 242 * Bakery locks are stored in normal .bss memory 243 * 244 * Each lock's data is spread across multiple cache lines, one per CPU, 245 * but multiple locks can share the same cache line. 246 * The compiler will allocate enough memory for one CPU's bakery locks, 247 * the remaining cache lines are allocated by the linker script 248 */ 249 . = ALIGN(CACHE_WRITEBACK_GRANULE); 250 __BAKERY_LOCK_START__ = .; 251 __PERCPU_BAKERY_LOCK_START__ = .; 252 *(bakery_lock) 253 . = ALIGN(CACHE_WRITEBACK_GRANULE); 254 __PERCPU_BAKERY_LOCK_END__ = .; 255 __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); 256 . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 257 __BAKERY_LOCK_END__ = .; 258 259 /* 260 * If BL31 doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__ 261 * will be zero. For this reason, the only two valid values for 262 * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value 263 * PLAT_PERCPU_BAKERY_LOCK_SIZE. 264 */ 265#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE 266 ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE), 267 "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements"); 268#endif 269#endif 270 271#if ENABLE_PMF 272 /* 273 * Time-stamps are stored in normal .bss memory 274 * 275 * The compiler will allocate enough memory for one CPU's time-stamps, 276 * the remaining memory for other CPUs is allocated by the 277 * linker script 278 */ 279 . = ALIGN(CACHE_WRITEBACK_GRANULE); 280 __PMF_TIMESTAMP_START__ = .; 281 KEEP(*(pmf_timestamp_array)) 282 . = ALIGN(CACHE_WRITEBACK_GRANULE); 283 __PMF_PERCPU_TIMESTAMP_END__ = .; 284 __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); 285 . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 286 __PMF_TIMESTAMP_END__ = .; 287#endif /* ENABLE_PMF */ 288 __BSS_END__ = .; 289 } >NOBITS 290 291 XLAT_TABLE_SECTION >NOBITS 292 293#if USE_COHERENT_MEM 294 /* 295 * The base address of the coherent memory section must be page-aligned (4K) 296 * to guarantee that the coherent data are stored on their own pages and 297 * are not mixed with normal data. This is required to set up the correct 298 * memory attributes for the coherent data page tables. 299 */ 300 coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) { 301 __COHERENT_RAM_START__ = .; 302 /* 303 * Bakery locks are stored in coherent memory 304 * 305 * Each lock's data is contiguous and fully allocated by the compiler 306 */ 307 *(bakery_lock) 308 *(tzfw_coherent_mem) 309 __COHERENT_RAM_END_UNALIGNED__ = .; 310 /* 311 * Memory page(s) mapped to this section will be marked 312 * as device memory. No other unexpected data must creep in. 313 * Ensure the rest of the current memory page is unused. 314 */ 315 . = ALIGN(PAGE_SIZE); 316 __COHERENT_RAM_END__ = .; 317 } >NOBITS 318#endif 319 320#if SEPARATE_NOBITS_REGION 321 /* 322 * Define a linker symbol to mark end of the NOBITS memory area for this 323 * image. 324 */ 325 __NOBITS_END__ = .; 326 327 ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.") 328#else 329 /* 330 * Define a linker symbol to mark end of the RW memory area for this 331 * image. 332 */ 333 __RW_END__ = .; 334 __BL31_END__ = .; 335 336 /DISCARD/ : { 337 *(.dynsym .dynstr .hash .gnu.hash) 338 } 339 340 ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.") 341#endif 342} 343