19003fa0bSYatharth Kochar/* 2da04341eSChris Kay * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved. 39003fa0bSYatharth Kochar * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 59003fa0bSYatharth Kochar */ 69003fa0bSYatharth Kochar 79003fa0bSYatharth Kochar#include <platform_def.h> 809d40e0eSAntonio Nino Diaz 9665e71b8SMasahiro Yamada#include <common/bl_common.ld.h> 1009d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h> 119003fa0bSYatharth Kochar 129003fa0bSYatharth KocharOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) 139003fa0bSYatharth KocharOUTPUT_ARCH(PLATFORM_LINKER_ARCH) 149003fa0bSYatharth KocharENTRY(bl2u_entrypoint) 159003fa0bSYatharth Kochar 169003fa0bSYatharth KocharMEMORY { 179003fa0bSYatharth Kochar RAM (rwx): ORIGIN = BL2U_BASE, LENGTH = BL2U_LIMIT - BL2U_BASE 189003fa0bSYatharth Kochar} 199003fa0bSYatharth Kochar 20f90fe02fSChris KaySECTIONS { 21f6088168SHarrison Mutai RAM_REGION_START = ORIGIN(RAM); 22f6088168SHarrison Mutai RAM_REGION_LENGTH = LENGTH(RAM); 239003fa0bSYatharth Kochar . = BL2U_BASE; 24f90fe02fSChris Kay 25a2aedac2SAntonio Nino Diaz ASSERT(. == ALIGN(PAGE_SIZE), 269003fa0bSYatharth Kochar "BL2U_BASE address is not aligned on a page boundary.") 279003fa0bSYatharth Kochar 285d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA 295d1c104fSSandrine Bailleux .text . : { 30*3d6edc32SAndrey Skvortsov ASSERT(. == ALIGN(PAGE_SIZE), 31*3d6edc32SAndrey Skvortsov ".text address is not aligned on a page boundary."); 32*3d6edc32SAndrey Skvortsov 335d1c104fSSandrine Bailleux __TEXT_START__ = .; 34f90fe02fSChris Kay 355d1c104fSSandrine Bailleux *bl2u_entrypoint.o(.text*) 36ebd6efaeSSamuel Holland *(SORT_BY_ALIGNMENT(.text*)) 375d1c104fSSandrine Bailleux *(.vectors) 38f7d445fcSMichal Simek __TEXT_END_UNALIGNED__ = .; 39f90fe02fSChris Kay 405629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 41f90fe02fSChris Kay 425d1c104fSSandrine Bailleux __TEXT_END__ = .; 435d1c104fSSandrine Bailleux } >RAM 445d1c104fSSandrine Bailleux 45f90fe02fSChris Kay /* .ARM.extab and .ARM.exidx are only added because Clang needs them */ 46ad925094SRoberto Vargas .ARM.extab . : { 47ad925094SRoberto Vargas *(.ARM.extab* .gnu.linkonce.armextab.*) 48ad925094SRoberto Vargas } >RAM 49ad925094SRoberto Vargas 50ad925094SRoberto Vargas .ARM.exidx . : { 51ad925094SRoberto Vargas *(.ARM.exidx* .gnu.linkonce.armexidx.*) 52ad925094SRoberto Vargas } >RAM 53ad925094SRoberto Vargas 545d1c104fSSandrine Bailleux .rodata . : { 555d1c104fSSandrine Bailleux __RODATA_START__ = .; 56ebd6efaeSSamuel Holland *(SORT_BY_ALIGNMENT(.rodata*)) 570a0a7a9aSMasahiro Yamada 580a0a7a9aSMasahiro Yamada RODATA_COMMON 590a0a7a9aSMasahiro Yamada 60f7d445fcSMichal Simek __RODATA_END_UNALIGNED__ = .; 615629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 625d1c104fSSandrine Bailleux __RODATA_END__ = .; 635d1c104fSSandrine Bailleux } >RAM 64f90fe02fSChris Kay#else /* SEPARATE_CODE_AND_RODATA */ 65da04341eSChris Kay .ro . : { 66*3d6edc32SAndrey Skvortsov ASSERT(. == ALIGN(PAGE_SIZE), 67*3d6edc32SAndrey Skvortsov ".ro address is not aligned on a page boundary."); 68*3d6edc32SAndrey Skvortsov 699003fa0bSYatharth Kochar __RO_START__ = .; 70f90fe02fSChris Kay 719003fa0bSYatharth Kochar *bl2u_entrypoint.o(.text*) 72ebd6efaeSSamuel Holland *(SORT_BY_ALIGNMENT(.text*)) 73ebd6efaeSSamuel Holland *(SORT_BY_ALIGNMENT(.rodata*)) 749003fa0bSYatharth Kochar 750a0a7a9aSMasahiro Yamada RODATA_COMMON 760a0a7a9aSMasahiro Yamada 779003fa0bSYatharth Kochar *(.vectors) 78f90fe02fSChris Kay 799003fa0bSYatharth Kochar __RO_END_UNALIGNED__ = .; 809003fa0bSYatharth Kochar 819003fa0bSYatharth Kochar /* 82f90fe02fSChris Kay * Memory page(s) mapped to this section will be marked as read-only, 83f90fe02fSChris Kay * executable. No RW data from the next section must creep in. Ensure 84f90fe02fSChris Kay * that the rest of the current memory page is unused. 859003fa0bSYatharth Kochar */ 86f90fe02fSChris Kay . = ALIGN(PAGE_SIZE); 87f90fe02fSChris Kay 88f90fe02fSChris Kay __RO_END__ = .; 89f90fe02fSChris Kay } >RAM 90f90fe02fSChris Kay#endif /* SEPARATE_CODE_AND_RODATA */ 91f90fe02fSChris Kay 929003fa0bSYatharth Kochar __RW_START__ = .; 939003fa0bSYatharth Kochar 94caa3e7e0SMasahiro Yamada DATA_SECTION >RAM 95a926a9f6SMasahiro Yamada STACK_SECTION >RAM 96a7739bc7SMasahiro Yamada BSS_SECTION >RAM 97665e71b8SMasahiro Yamada XLAT_TABLE_SECTION >RAM 989003fa0bSYatharth Kochar 999003fa0bSYatharth Kochar#if USE_COHERENT_MEM 1009003fa0bSYatharth Kochar /* 101f90fe02fSChris Kay * The base address of the coherent memory section must be page-aligned to 102f90fe02fSChris Kay * guarantee that the coherent data are stored on their own pages and are 103f90fe02fSChris Kay * not mixed with normal data. This is required to set up the correct 1049003fa0bSYatharth Kochar * memory attributes for the coherent data page tables. 1059003fa0bSYatharth Kochar */ 106da04341eSChris Kay .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) { 1079003fa0bSYatharth Kochar __COHERENT_RAM_START__ = .; 108da04341eSChris Kay *(.tzfw_coherent_mem) 1099003fa0bSYatharth Kochar __COHERENT_RAM_END_UNALIGNED__ = .; 1109003fa0bSYatharth Kochar 1119003fa0bSYatharth Kochar /* 112f90fe02fSChris Kay * Memory page(s) mapped to this section will be marked as device 113f90fe02fSChris Kay * memory. No other unexpected data must creep in. Ensure the rest of 114f90fe02fSChris Kay * the current memory page is unused. 1159003fa0bSYatharth Kochar */ 116f90fe02fSChris Kay . = ALIGN(PAGE_SIZE); 117f90fe02fSChris Kay 118f90fe02fSChris Kay __COHERENT_RAM_END__ = .; 119f90fe02fSChris Kay } >RAM 120f90fe02fSChris Kay#endif /* USE_COHERENT_MEM */ 121f90fe02fSChris Kay 1229003fa0bSYatharth Kochar __RW_END__ = .; 1239003fa0bSYatharth Kochar __BL2U_END__ = .; 1249003fa0bSYatharth Kochar 1259003fa0bSYatharth Kochar __BSS_SIZE__ = SIZEOF(.bss); 1269003fa0bSYatharth Kochar 1279003fa0bSYatharth Kochar ASSERT(. <= BL2U_LIMIT, "BL2U image has exceeded its limit.") 128f6088168SHarrison Mutai RAM_REGION_END = .; 1299003fa0bSYatharth Kochar} 130