1665e71b8SMasahiro Yamada /* 2*7256cf0aSRohit Mathew * Copyright (c) 2020-2025, ARM Limited and Contributors. All rights reserved. 3665e71b8SMasahiro Yamada * 4665e71b8SMasahiro Yamada * SPDX-License-Identifier: BSD-3-Clause 5665e71b8SMasahiro Yamada */ 6665e71b8SMasahiro Yamada 7665e71b8SMasahiro Yamada #ifndef BL_COMMON_LD_H 8665e71b8SMasahiro Yamada #define BL_COMMON_LD_H 9665e71b8SMasahiro Yamada 109fb288a0SMasahiro Yamada #include <platform_def.h> 119fb288a0SMasahiro Yamada 129fb288a0SMasahiro Yamada #ifdef __aarch64__ 139fb288a0SMasahiro Yamada #define STRUCT_ALIGN 8 14a7739bc7SMasahiro Yamada #define BSS_ALIGN 16 159fb288a0SMasahiro Yamada #else 169fb288a0SMasahiro Yamada #define STRUCT_ALIGN 4 17a7739bc7SMasahiro Yamada #define BSS_ALIGN 8 189fb288a0SMasahiro Yamada #endif 199fb288a0SMasahiro Yamada 20caa3e7e0SMasahiro Yamada #ifndef DATA_ALIGN 21caa3e7e0SMasahiro Yamada #define DATA_ALIGN 1 22caa3e7e0SMasahiro Yamada #endif 23caa3e7e0SMasahiro Yamada 249fb288a0SMasahiro Yamada #define CPU_OPS \ 259fb288a0SMasahiro Yamada . = ALIGN(STRUCT_ALIGN); \ 269fb288a0SMasahiro Yamada __CPU_OPS_START__ = .; \ 27da04341eSChris Kay KEEP(*(.cpu_ops)) \ 289fb288a0SMasahiro Yamada __CPU_OPS_END__ = .; 299fb288a0SMasahiro Yamada 309fb288a0SMasahiro Yamada #define PARSER_LIB_DESCS \ 319fb288a0SMasahiro Yamada . = ALIGN(STRUCT_ALIGN); \ 329fb288a0SMasahiro Yamada __PARSER_LIB_DESCS_START__ = .; \ 339fb288a0SMasahiro Yamada KEEP(*(.img_parser_lib_descs)) \ 349fb288a0SMasahiro Yamada __PARSER_LIB_DESCS_END__ = .; 359fb288a0SMasahiro Yamada 369fb288a0SMasahiro Yamada #define RT_SVC_DESCS \ 379fb288a0SMasahiro Yamada . = ALIGN(STRUCT_ALIGN); \ 389fb288a0SMasahiro Yamada __RT_SVC_DESCS_START__ = .; \ 39da04341eSChris Kay KEEP(*(.rt_svc_descs)) \ 409fb288a0SMasahiro Yamada __RT_SVC_DESCS_END__ = .; 419fb288a0SMasahiro Yamada 427affa25cSMarc Bonnici #if SPMC_AT_EL3 437affa25cSMarc Bonnici #define EL3_LP_DESCS \ 447affa25cSMarc Bonnici . = ALIGN(STRUCT_ALIGN); \ 457affa25cSMarc Bonnici __EL3_LP_DESCS_START__ = .; \ 46da04341eSChris Kay KEEP(*(.el3_lp_descs)) \ 477affa25cSMarc Bonnici __EL3_LP_DESCS_END__ = .; 487affa25cSMarc Bonnici #else 497affa25cSMarc Bonnici #define EL3_LP_DESCS 507affa25cSMarc Bonnici #endif 517affa25cSMarc Bonnici 52890b5088SRaghu Krishnamurthy #if ENABLE_SPMD_LP 53890b5088SRaghu Krishnamurthy #define SPMD_LP_DESCS \ 54890b5088SRaghu Krishnamurthy . = ALIGN(STRUCT_ALIGN); \ 55890b5088SRaghu Krishnamurthy __SPMD_LP_DESCS_START__ = .; \ 56890b5088SRaghu Krishnamurthy KEEP(*(.spmd_lp_descs)) \ 57890b5088SRaghu Krishnamurthy __SPMD_LP_DESCS_END__ = .; 58890b5088SRaghu Krishnamurthy #else 59890b5088SRaghu Krishnamurthy #define SPMD_LP_DESCS 60890b5088SRaghu Krishnamurthy #endif 619fb288a0SMasahiro Yamada #define PMF_SVC_DESCS \ 629fb288a0SMasahiro Yamada . = ALIGN(STRUCT_ALIGN); \ 639fb288a0SMasahiro Yamada __PMF_SVC_DESCS_START__ = .; \ 64da04341eSChris Kay KEEP(*(.pmf_svc_descs)) \ 659fb288a0SMasahiro Yamada __PMF_SVC_DESCS_END__ = .; 669fb288a0SMasahiro Yamada 679fb288a0SMasahiro Yamada #define FCONF_POPULATOR \ 689fb288a0SMasahiro Yamada . = ALIGN(STRUCT_ALIGN); \ 699fb288a0SMasahiro Yamada __FCONF_POPULATOR_START__ = .; \ 709fb288a0SMasahiro Yamada KEEP(*(.fconf_populator)) \ 719fb288a0SMasahiro Yamada __FCONF_POPULATOR_END__ = .; 729fb288a0SMasahiro Yamada 739fb288a0SMasahiro Yamada /* 749fb288a0SMasahiro Yamada * Keep the .got section in the RO section as it is patched prior to enabling 759fb288a0SMasahiro Yamada * the MMU and having the .got in RO is better for security. GOT is a table of 769fb288a0SMasahiro Yamada * addresses so ensure pointer size alignment. 779fb288a0SMasahiro Yamada */ 789fb288a0SMasahiro Yamada #define GOT \ 799fb288a0SMasahiro Yamada . = ALIGN(STRUCT_ALIGN); \ 809fb288a0SMasahiro Yamada __GOT_START__ = .; \ 819fb288a0SMasahiro Yamada *(.got) \ 829fb288a0SMasahiro Yamada __GOT_END__ = .; 839fb288a0SMasahiro Yamada 84268131c2SMasahiro Yamada /* 85268131c2SMasahiro Yamada * The base xlat table 86268131c2SMasahiro Yamada * 87268131c2SMasahiro Yamada * It is put into the rodata section if PLAT_RO_XLAT_TABLES=1, 88268131c2SMasahiro Yamada * or into the bss section otherwise. 89268131c2SMasahiro Yamada */ 90268131c2SMasahiro Yamada #define BASE_XLAT_TABLE \ 91268131c2SMasahiro Yamada . = ALIGN(16); \ 92bb5b942eSYann Gautier __BASE_XLAT_TABLE_START__ = .; \ 93da04341eSChris Kay *(.base_xlat_table) \ 94bb5b942eSYann Gautier __BASE_XLAT_TABLE_END__ = .; 95268131c2SMasahiro Yamada 96268131c2SMasahiro Yamada #if PLAT_RO_XLAT_TABLES 97268131c2SMasahiro Yamada #define BASE_XLAT_TABLE_RO BASE_XLAT_TABLE 98268131c2SMasahiro Yamada #define BASE_XLAT_TABLE_BSS 99268131c2SMasahiro Yamada #else 100268131c2SMasahiro Yamada #define BASE_XLAT_TABLE_RO 101268131c2SMasahiro Yamada #define BASE_XLAT_TABLE_BSS BASE_XLAT_TABLE 102268131c2SMasahiro Yamada #endif 103268131c2SMasahiro Yamada 1040a0a7a9aSMasahiro Yamada #define RODATA_COMMON \ 1050a0a7a9aSMasahiro Yamada RT_SVC_DESCS \ 1060a0a7a9aSMasahiro Yamada FCONF_POPULATOR \ 1070a0a7a9aSMasahiro Yamada PMF_SVC_DESCS \ 1080a0a7a9aSMasahiro Yamada PARSER_LIB_DESCS \ 1090a0a7a9aSMasahiro Yamada CPU_OPS \ 110268131c2SMasahiro Yamada GOT \ 1117affa25cSMarc Bonnici BASE_XLAT_TABLE_RO \ 112890b5088SRaghu Krishnamurthy EL3_LP_DESCS \ 113890b5088SRaghu Krishnamurthy SPMD_LP_DESCS 1140a0a7a9aSMasahiro Yamada 115caa3e7e0SMasahiro Yamada /* 116caa3e7e0SMasahiro Yamada * .data must be placed at a lower address than the stacks if the stack 117caa3e7e0SMasahiro Yamada * protector is enabled. Alternatively, the .data.stack_protector_canary 118caa3e7e0SMasahiro Yamada * section can be placed independently of the main .data section. 119caa3e7e0SMasahiro Yamada */ 120caa3e7e0SMasahiro Yamada #define DATA_SECTION \ 121caa3e7e0SMasahiro Yamada .data . : ALIGN(DATA_ALIGN) { \ 122caa3e7e0SMasahiro Yamada __DATA_START__ = .; \ 123caa3e7e0SMasahiro Yamada *(SORT_BY_ALIGNMENT(.data*)) \ 124caa3e7e0SMasahiro Yamada __DATA_END__ = .; \ 125caa3e7e0SMasahiro Yamada } 126caa3e7e0SMasahiro Yamada 127e8ad6168SMasahiro Yamada /* 128e8ad6168SMasahiro Yamada * .rela.dyn needs to come after .data for the read-elf utility to parse 129e8ad6168SMasahiro Yamada * this section correctly. 130e8ad6168SMasahiro Yamada */ 1314324a14bSYann Gautier #if __aarch64__ 1324324a14bSYann Gautier #define RELA_DYN_NAME .rela.dyn 1334324a14bSYann Gautier #define RELOC_SECTIONS_PATTERN *(.rela*) 1344324a14bSYann Gautier #else 1354324a14bSYann Gautier #define RELA_DYN_NAME .rel.dyn 1364324a14bSYann Gautier #define RELOC_SECTIONS_PATTERN *(.rel*) 1374324a14bSYann Gautier #endif 1384324a14bSYann Gautier 139e8ad6168SMasahiro Yamada #define RELA_SECTION \ 1404324a14bSYann Gautier RELA_DYN_NAME : ALIGN(STRUCT_ALIGN) { \ 141e8ad6168SMasahiro Yamada __RELA_START__ = .; \ 1424324a14bSYann Gautier RELOC_SECTIONS_PATTERN \ 143e8ad6168SMasahiro Yamada __RELA_END__ = .; \ 144e8ad6168SMasahiro Yamada } 145e8ad6168SMasahiro Yamada 14634dd1e96SAlexei Fedorov #if !(defined(IMAGE_BL31) && RECLAIM_INIT_CODE) 1479fb288a0SMasahiro Yamada #define STACK_SECTION \ 148da04341eSChris Kay .stacks (NOLOAD) : { \ 1499fb288a0SMasahiro Yamada __STACKS_START__ = .; \ 150da04341eSChris Kay *(.tzfw_normal_stacks) \ 1519fb288a0SMasahiro Yamada __STACKS_END__ = .; \ 1529fb288a0SMasahiro Yamada } 15334dd1e96SAlexei Fedorov #endif 1549fb288a0SMasahiro Yamada 1559fb288a0SMasahiro Yamada /* 1569fb288a0SMasahiro Yamada * If BL doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__ 1579fb288a0SMasahiro Yamada * will be zero. For this reason, the only two valid values for 1589fb288a0SMasahiro Yamada * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value 1599fb288a0SMasahiro Yamada * PLAT_PERCPU_BAKERY_LOCK_SIZE. 1609fb288a0SMasahiro Yamada */ 1619fb288a0SMasahiro Yamada #ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE 1629fb288a0SMasahiro Yamada #define BAKERY_LOCK_SIZE_CHECK \ 1639fb288a0SMasahiro Yamada ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || \ 1649fb288a0SMasahiro Yamada (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE), \ 1659fb288a0SMasahiro Yamada "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements"); 1669fb288a0SMasahiro Yamada #else 1679fb288a0SMasahiro Yamada #define BAKERY_LOCK_SIZE_CHECK 1689fb288a0SMasahiro Yamada #endif 1699fb288a0SMasahiro Yamada 1709fb288a0SMasahiro Yamada /* 1719fb288a0SMasahiro Yamada * Bakery locks are stored in normal .bss memory 1729fb288a0SMasahiro Yamada * 1739fb288a0SMasahiro Yamada * Each lock's data is spread across multiple cache lines, one per CPU, 1749fb288a0SMasahiro Yamada * but multiple locks can share the same cache line. 1759fb288a0SMasahiro Yamada * The compiler will allocate enough memory for one CPU's bakery locks, 1769fb288a0SMasahiro Yamada * the remaining cache lines are allocated by the linker script 1779fb288a0SMasahiro Yamada */ 1789fb288a0SMasahiro Yamada #if !USE_COHERENT_MEM 1799fb288a0SMasahiro Yamada #define BAKERY_LOCK_NORMAL \ 1809fb288a0SMasahiro Yamada . = ALIGN(CACHE_WRITEBACK_GRANULE); \ 1819fb288a0SMasahiro Yamada __BAKERY_LOCK_START__ = .; \ 1829fb288a0SMasahiro Yamada __PERCPU_BAKERY_LOCK_START__ = .; \ 183da04341eSChris Kay *(.bakery_lock) \ 1849fb288a0SMasahiro Yamada . = ALIGN(CACHE_WRITEBACK_GRANULE); \ 1859fb288a0SMasahiro Yamada __PERCPU_BAKERY_LOCK_END__ = .; \ 1869fb288a0SMasahiro Yamada __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); \ 1879fb288a0SMasahiro Yamada . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \ 1889fb288a0SMasahiro Yamada __BAKERY_LOCK_END__ = .; \ 1899fb288a0SMasahiro Yamada BAKERY_LOCK_SIZE_CHECK 1909fb288a0SMasahiro Yamada #else 1919fb288a0SMasahiro Yamada #define BAKERY_LOCK_NORMAL 1929fb288a0SMasahiro Yamada #endif 1939fb288a0SMasahiro Yamada 1949fb288a0SMasahiro Yamada /* 1959fb288a0SMasahiro Yamada * Time-stamps are stored in normal .bss memory 1969fb288a0SMasahiro Yamada * 1979fb288a0SMasahiro Yamada * The compiler will allocate enough memory for one CPU's time-stamps, 1989fb288a0SMasahiro Yamada * the remaining memory for other CPUs is allocated by the 1999fb288a0SMasahiro Yamada * linker script 2009fb288a0SMasahiro Yamada */ 2019fb288a0SMasahiro Yamada #define PMF_TIMESTAMP \ 2029fb288a0SMasahiro Yamada . = ALIGN(CACHE_WRITEBACK_GRANULE); \ 2039fb288a0SMasahiro Yamada __PMF_TIMESTAMP_START__ = .; \ 204da04341eSChris Kay KEEP(*(.pmf_timestamp_array)) \ 2059fb288a0SMasahiro Yamada . = ALIGN(CACHE_WRITEBACK_GRANULE); \ 2069fb288a0SMasahiro Yamada __PMF_PERCPU_TIMESTAMP_END__ = .; \ 2079fb288a0SMasahiro Yamada __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); \ 2089fb288a0SMasahiro Yamada . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \ 2099fb288a0SMasahiro Yamada __PMF_TIMESTAMP_END__ = .; 2109fb288a0SMasahiro Yamada 211a7739bc7SMasahiro Yamada 212a7739bc7SMasahiro Yamada /* 213a7739bc7SMasahiro Yamada * The .bss section gets initialised to 0 at runtime. 214a7739bc7SMasahiro Yamada * Its base address has bigger alignment for better performance of the 215a7739bc7SMasahiro Yamada * zero-initialization code. 216a7739bc7SMasahiro Yamada */ 217a7739bc7SMasahiro Yamada #define BSS_SECTION \ 218a7739bc7SMasahiro Yamada .bss (NOLOAD) : ALIGN(BSS_ALIGN) { \ 219a7739bc7SMasahiro Yamada __BSS_START__ = .; \ 220a7739bc7SMasahiro Yamada *(SORT_BY_ALIGNMENT(.bss*)) \ 221a7739bc7SMasahiro Yamada *(COMMON) \ 222a7739bc7SMasahiro Yamada BAKERY_LOCK_NORMAL \ 223a7739bc7SMasahiro Yamada PMF_TIMESTAMP \ 224268131c2SMasahiro Yamada BASE_XLAT_TABLE_BSS \ 225a7739bc7SMasahiro Yamada __BSS_END__ = .; \ 226a7739bc7SMasahiro Yamada } 227a7739bc7SMasahiro Yamada 228*7256cf0aSRohit Mathew #define PER_CPU \ 229*7256cf0aSRohit Mathew /* The .per_cpu section gets initialised to 0 at runtime. */ \ 230*7256cf0aSRohit Mathew .per_cpu (NOLOAD) : ALIGN(CACHE_WRITEBACK_GRANULE) { \ 231*7256cf0aSRohit Mathew __PER_CPU_START__ = .; \ 232*7256cf0aSRohit Mathew __PER_CPU_UNIT_START__ = .; \ 233*7256cf0aSRohit Mathew *(SORT_BY_ALIGNMENT(.per_cpu*)) \ 234*7256cf0aSRohit Mathew __PER_CPU_UNIT_UNALIGNED_END_UNIT__ = .; \ 235*7256cf0aSRohit Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); \ 236*7256cf0aSRohit Mathew __PER_CPU_UNIT_END__ = .; \ 237*7256cf0aSRohit Mathew __PER_CPU_UNIT_SECTION_SIZE__ = \ 238*7256cf0aSRohit Mathew ABSOLUTE(__PER_CPU_UNIT_END__ - __PER_CPU_UNIT_START__);\ 239*7256cf0aSRohit Mathew . = . + (PER_CPU_NODE_CORE_COUNT - 1) * \ 240*7256cf0aSRohit Mathew __PER_CPU_UNIT_SECTION_SIZE__; \ 241*7256cf0aSRohit Mathew __PER_CPU_END__ = .; \ 242*7256cf0aSRohit Mathew } 243*7256cf0aSRohit Mathew 244665e71b8SMasahiro Yamada /* 245da04341eSChris Kay * The .xlat_table section is for full, aligned page tables (4K). 246665e71b8SMasahiro Yamada * Removing them from .bss avoids forcing 4K alignment on 247665e71b8SMasahiro Yamada * the .bss section. The tables are initialized to zero by the translation 248665e71b8SMasahiro Yamada * tables library. 249665e71b8SMasahiro Yamada */ 250665e71b8SMasahiro Yamada #define XLAT_TABLE_SECTION \ 251da04341eSChris Kay .xlat_table (NOLOAD) : { \ 252bb5b942eSYann Gautier __XLAT_TABLE_START__ = .; \ 253da04341eSChris Kay *(.xlat_table) \ 254bb5b942eSYann Gautier __XLAT_TABLE_END__ = .; \ 255665e71b8SMasahiro Yamada } 256665e71b8SMasahiro Yamada 257665e71b8SMasahiro Yamada #endif /* BL_COMMON_LD_H */ 258