19003fa0bSYatharth Kochar/* 2665e71b8SMasahiro Yamada * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved. 39003fa0bSYatharth Kochar * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 59003fa0bSYatharth Kochar */ 69003fa0bSYatharth Kochar 79003fa0bSYatharth Kochar#include <platform_def.h> 809d40e0eSAntonio Nino Diaz 9665e71b8SMasahiro Yamada#include <common/bl_common.ld.h> 1009d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h> 119003fa0bSYatharth Kochar 129003fa0bSYatharth KocharOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) 139003fa0bSYatharth KocharOUTPUT_ARCH(PLATFORM_LINKER_ARCH) 149003fa0bSYatharth KocharENTRY(bl2u_entrypoint) 159003fa0bSYatharth Kochar 169003fa0bSYatharth KocharMEMORY { 179003fa0bSYatharth Kochar RAM (rwx): ORIGIN = BL2U_BASE, LENGTH = BL2U_LIMIT - BL2U_BASE 189003fa0bSYatharth Kochar} 199003fa0bSYatharth Kochar 209003fa0bSYatharth Kochar 219003fa0bSYatharth KocharSECTIONS 229003fa0bSYatharth Kochar{ 239003fa0bSYatharth Kochar . = BL2U_BASE; 24a2aedac2SAntonio Nino Diaz ASSERT(. == ALIGN(PAGE_SIZE), 259003fa0bSYatharth Kochar "BL2U_BASE address is not aligned on a page boundary.") 269003fa0bSYatharth Kochar 275d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA 285d1c104fSSandrine Bailleux .text . : { 295d1c104fSSandrine Bailleux __TEXT_START__ = .; 305d1c104fSSandrine Bailleux *bl2u_entrypoint.o(.text*) 31ebd6efaeSSamuel Holland *(SORT_BY_ALIGNMENT(.text*)) 325d1c104fSSandrine Bailleux *(.vectors) 335629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 345d1c104fSSandrine Bailleux __TEXT_END__ = .; 355d1c104fSSandrine Bailleux } >RAM 365d1c104fSSandrine Bailleux 37ad925094SRoberto Vargas /* .ARM.extab and .ARM.exidx are only added because Clang need them */ 38ad925094SRoberto Vargas .ARM.extab . : { 39ad925094SRoberto Vargas *(.ARM.extab* .gnu.linkonce.armextab.*) 40ad925094SRoberto Vargas } >RAM 41ad925094SRoberto Vargas 42ad925094SRoberto Vargas .ARM.exidx . : { 43ad925094SRoberto Vargas *(.ARM.exidx* .gnu.linkonce.armexidx.*) 44ad925094SRoberto Vargas } >RAM 45ad925094SRoberto Vargas 465d1c104fSSandrine Bailleux .rodata . : { 475d1c104fSSandrine Bailleux __RODATA_START__ = .; 48ebd6efaeSSamuel Holland *(SORT_BY_ALIGNMENT(.rodata*)) 49*0a0a7a9aSMasahiro Yamada 50*0a0a7a9aSMasahiro Yamada RODATA_COMMON 51*0a0a7a9aSMasahiro Yamada 525629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 535d1c104fSSandrine Bailleux __RODATA_END__ = .; 545d1c104fSSandrine Bailleux } >RAM 555d1c104fSSandrine Bailleux#else 569003fa0bSYatharth Kochar ro . : { 579003fa0bSYatharth Kochar __RO_START__ = .; 589003fa0bSYatharth Kochar *bl2u_entrypoint.o(.text*) 59ebd6efaeSSamuel Holland *(SORT_BY_ALIGNMENT(.text*)) 60ebd6efaeSSamuel Holland *(SORT_BY_ALIGNMENT(.rodata*)) 619003fa0bSYatharth Kochar 62*0a0a7a9aSMasahiro Yamada RODATA_COMMON 63*0a0a7a9aSMasahiro Yamada 649003fa0bSYatharth Kochar *(.vectors) 659003fa0bSYatharth Kochar __RO_END_UNALIGNED__ = .; 669003fa0bSYatharth Kochar /* 679003fa0bSYatharth Kochar * Memory page(s) mapped to this section will be marked as 689003fa0bSYatharth Kochar * read-only, executable. No RW data from the next section must 699003fa0bSYatharth Kochar * creep in. Ensure the rest of the current memory page is unused. 709003fa0bSYatharth Kochar */ 715629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 729003fa0bSYatharth Kochar __RO_END__ = .; 739003fa0bSYatharth Kochar } >RAM 745d1c104fSSandrine Bailleux#endif 759003fa0bSYatharth Kochar 769003fa0bSYatharth Kochar /* 779003fa0bSYatharth Kochar * Define a linker symbol to mark start of the RW memory area for this 789003fa0bSYatharth Kochar * image. 799003fa0bSYatharth Kochar */ 809003fa0bSYatharth Kochar __RW_START__ = . ; 819003fa0bSYatharth Kochar 8251faada7SDouglas Raillard /* 8351faada7SDouglas Raillard * .data must be placed at a lower address than the stacks if the stack 8451faada7SDouglas Raillard * protector is enabled. Alternatively, the .data.stack_protector_canary 8551faada7SDouglas Raillard * section can be placed independently of the main .data section. 8651faada7SDouglas Raillard */ 879003fa0bSYatharth Kochar .data . : { 889003fa0bSYatharth Kochar __DATA_START__ = .; 89ebd6efaeSSamuel Holland *(SORT_BY_ALIGNMENT(.data*)) 909003fa0bSYatharth Kochar __DATA_END__ = .; 919003fa0bSYatharth Kochar } >RAM 929003fa0bSYatharth Kochar 939003fa0bSYatharth Kochar stacks (NOLOAD) : { 949003fa0bSYatharth Kochar __STACKS_START__ = .; 959003fa0bSYatharth Kochar *(tzfw_normal_stacks) 969003fa0bSYatharth Kochar __STACKS_END__ = .; 979003fa0bSYatharth Kochar } >RAM 989003fa0bSYatharth Kochar 999003fa0bSYatharth Kochar /* 1009003fa0bSYatharth Kochar * The .bss section gets initialised to 0 at runtime. 101308d359bSDouglas Raillard * Its base address should be 16-byte aligned for better performance of the 102308d359bSDouglas Raillard * zero-initialization code. 1039003fa0bSYatharth Kochar */ 1049003fa0bSYatharth Kochar .bss : ALIGN(16) { 1059003fa0bSYatharth Kochar __BSS_START__ = .; 1069003fa0bSYatharth Kochar *(SORT_BY_ALIGNMENT(.bss*)) 1079003fa0bSYatharth Kochar *(COMMON) 1089003fa0bSYatharth Kochar __BSS_END__ = .; 1099003fa0bSYatharth Kochar } >RAM 1109003fa0bSYatharth Kochar 111665e71b8SMasahiro Yamada XLAT_TABLE_SECTION >RAM 1129003fa0bSYatharth Kochar 1139003fa0bSYatharth Kochar#if USE_COHERENT_MEM 1149003fa0bSYatharth Kochar /* 1159003fa0bSYatharth Kochar * The base address of the coherent memory section must be page-aligned (4K) 1169003fa0bSYatharth Kochar * to guarantee that the coherent data are stored on their own pages and 1179003fa0bSYatharth Kochar * are not mixed with normal data. This is required to set up the correct 1189003fa0bSYatharth Kochar * memory attributes for the coherent data page tables. 1199003fa0bSYatharth Kochar */ 120a2aedac2SAntonio Nino Diaz coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) { 1219003fa0bSYatharth Kochar __COHERENT_RAM_START__ = .; 1229003fa0bSYatharth Kochar *(tzfw_coherent_mem) 1239003fa0bSYatharth Kochar __COHERENT_RAM_END_UNALIGNED__ = .; 1249003fa0bSYatharth Kochar /* 1259003fa0bSYatharth Kochar * Memory page(s) mapped to this section will be marked 1269003fa0bSYatharth Kochar * as device memory. No other unexpected data must creep in. 1279003fa0bSYatharth Kochar * Ensure the rest of the current memory page is unused. 1289003fa0bSYatharth Kochar */ 1295629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 1309003fa0bSYatharth Kochar __COHERENT_RAM_END__ = .; 1319003fa0bSYatharth Kochar } >RAM 1329003fa0bSYatharth Kochar#endif 1339003fa0bSYatharth Kochar 1349003fa0bSYatharth Kochar /* 1359003fa0bSYatharth Kochar * Define a linker symbol to mark end of the RW memory area for this 1369003fa0bSYatharth Kochar * image. 1379003fa0bSYatharth Kochar */ 1389003fa0bSYatharth Kochar __RW_END__ = .; 1399003fa0bSYatharth Kochar __BL2U_END__ = .; 1409003fa0bSYatharth Kochar 1419003fa0bSYatharth Kochar __BSS_SIZE__ = SIZEOF(.bss); 1429003fa0bSYatharth Kochar 1439003fa0bSYatharth Kochar ASSERT(. <= BL2U_LIMIT, "BL2U image has exceeded its limit.") 1449003fa0bSYatharth Kochar} 145