19003fa0bSYatharth Kochar/* 2308d359bSDouglas Raillard * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. 39003fa0bSYatharth Kochar * 4*82cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 59003fa0bSYatharth Kochar */ 69003fa0bSYatharth Kochar 79003fa0bSYatharth Kochar#include <platform_def.h> 89003fa0bSYatharth Kochar 99003fa0bSYatharth KocharOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) 109003fa0bSYatharth KocharOUTPUT_ARCH(PLATFORM_LINKER_ARCH) 119003fa0bSYatharth KocharENTRY(bl2u_entrypoint) 129003fa0bSYatharth Kochar 139003fa0bSYatharth KocharMEMORY { 149003fa0bSYatharth Kochar RAM (rwx): ORIGIN = BL2U_BASE, LENGTH = BL2U_LIMIT - BL2U_BASE 159003fa0bSYatharth Kochar} 169003fa0bSYatharth Kochar 179003fa0bSYatharth Kochar 189003fa0bSYatharth KocharSECTIONS 199003fa0bSYatharth Kochar{ 209003fa0bSYatharth Kochar . = BL2U_BASE; 219003fa0bSYatharth Kochar ASSERT(. == ALIGN(4096), 229003fa0bSYatharth Kochar "BL2U_BASE address is not aligned on a page boundary.") 239003fa0bSYatharth Kochar 245d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA 255d1c104fSSandrine Bailleux .text . : { 265d1c104fSSandrine Bailleux __TEXT_START__ = .; 275d1c104fSSandrine Bailleux *bl2u_entrypoint.o(.text*) 285d1c104fSSandrine Bailleux *(.text*) 295d1c104fSSandrine Bailleux *(.vectors) 305d1c104fSSandrine Bailleux . = NEXT(4096); 315d1c104fSSandrine Bailleux __TEXT_END__ = .; 325d1c104fSSandrine Bailleux } >RAM 335d1c104fSSandrine Bailleux 345d1c104fSSandrine Bailleux .rodata . : { 355d1c104fSSandrine Bailleux __RODATA_START__ = .; 365d1c104fSSandrine Bailleux *(.rodata*) 375d1c104fSSandrine Bailleux . = NEXT(4096); 385d1c104fSSandrine Bailleux __RODATA_END__ = .; 395d1c104fSSandrine Bailleux } >RAM 405d1c104fSSandrine Bailleux#else 419003fa0bSYatharth Kochar ro . : { 429003fa0bSYatharth Kochar __RO_START__ = .; 439003fa0bSYatharth Kochar *bl2u_entrypoint.o(.text*) 449003fa0bSYatharth Kochar *(.text*) 459003fa0bSYatharth Kochar *(.rodata*) 469003fa0bSYatharth Kochar 479003fa0bSYatharth Kochar *(.vectors) 489003fa0bSYatharth Kochar __RO_END_UNALIGNED__ = .; 499003fa0bSYatharth Kochar /* 509003fa0bSYatharth Kochar * Memory page(s) mapped to this section will be marked as 519003fa0bSYatharth Kochar * read-only, executable. No RW data from the next section must 529003fa0bSYatharth Kochar * creep in. Ensure the rest of the current memory page is unused. 539003fa0bSYatharth Kochar */ 549003fa0bSYatharth Kochar . = NEXT(4096); 559003fa0bSYatharth Kochar __RO_END__ = .; 569003fa0bSYatharth Kochar } >RAM 575d1c104fSSandrine Bailleux#endif 589003fa0bSYatharth Kochar 599003fa0bSYatharth Kochar /* 609003fa0bSYatharth Kochar * Define a linker symbol to mark start of the RW memory area for this 619003fa0bSYatharth Kochar * image. 629003fa0bSYatharth Kochar */ 639003fa0bSYatharth Kochar __RW_START__ = . ; 649003fa0bSYatharth Kochar 6551faada7SDouglas Raillard /* 6651faada7SDouglas Raillard * .data must be placed at a lower address than the stacks if the stack 6751faada7SDouglas Raillard * protector is enabled. Alternatively, the .data.stack_protector_canary 6851faada7SDouglas Raillard * section can be placed independently of the main .data section. 6951faada7SDouglas Raillard */ 709003fa0bSYatharth Kochar .data . : { 719003fa0bSYatharth Kochar __DATA_START__ = .; 729003fa0bSYatharth Kochar *(.data*) 739003fa0bSYatharth Kochar __DATA_END__ = .; 749003fa0bSYatharth Kochar } >RAM 759003fa0bSYatharth Kochar 769003fa0bSYatharth Kochar stacks (NOLOAD) : { 779003fa0bSYatharth Kochar __STACKS_START__ = .; 789003fa0bSYatharth Kochar *(tzfw_normal_stacks) 799003fa0bSYatharth Kochar __STACKS_END__ = .; 809003fa0bSYatharth Kochar } >RAM 819003fa0bSYatharth Kochar 829003fa0bSYatharth Kochar /* 839003fa0bSYatharth Kochar * The .bss section gets initialised to 0 at runtime. 84308d359bSDouglas Raillard * Its base address should be 16-byte aligned for better performance of the 85308d359bSDouglas Raillard * zero-initialization code. 869003fa0bSYatharth Kochar */ 879003fa0bSYatharth Kochar .bss : ALIGN(16) { 889003fa0bSYatharth Kochar __BSS_START__ = .; 899003fa0bSYatharth Kochar *(SORT_BY_ALIGNMENT(.bss*)) 909003fa0bSYatharth Kochar *(COMMON) 919003fa0bSYatharth Kochar __BSS_END__ = .; 929003fa0bSYatharth Kochar } >RAM 939003fa0bSYatharth Kochar 949003fa0bSYatharth Kochar /* 959003fa0bSYatharth Kochar * The xlat_table section is for full, aligned page tables (4K). 969003fa0bSYatharth Kochar * Removing them from .bss avoids forcing 4K alignment on 979003fa0bSYatharth Kochar * the .bss section and eliminates the unecessary zero init 989003fa0bSYatharth Kochar */ 999003fa0bSYatharth Kochar xlat_table (NOLOAD) : { 1009003fa0bSYatharth Kochar *(xlat_table) 1019003fa0bSYatharth Kochar } >RAM 1029003fa0bSYatharth Kochar 1039003fa0bSYatharth Kochar#if USE_COHERENT_MEM 1049003fa0bSYatharth Kochar /* 1059003fa0bSYatharth Kochar * The base address of the coherent memory section must be page-aligned (4K) 1069003fa0bSYatharth Kochar * to guarantee that the coherent data are stored on their own pages and 1079003fa0bSYatharth Kochar * are not mixed with normal data. This is required to set up the correct 1089003fa0bSYatharth Kochar * memory attributes for the coherent data page tables. 1099003fa0bSYatharth Kochar */ 1109003fa0bSYatharth Kochar coherent_ram (NOLOAD) : ALIGN(4096) { 1119003fa0bSYatharth Kochar __COHERENT_RAM_START__ = .; 1129003fa0bSYatharth Kochar *(tzfw_coherent_mem) 1139003fa0bSYatharth Kochar __COHERENT_RAM_END_UNALIGNED__ = .; 1149003fa0bSYatharth Kochar /* 1159003fa0bSYatharth Kochar * Memory page(s) mapped to this section will be marked 1169003fa0bSYatharth Kochar * as device memory. No other unexpected data must creep in. 1179003fa0bSYatharth Kochar * Ensure the rest of the current memory page is unused. 1189003fa0bSYatharth Kochar */ 1199003fa0bSYatharth Kochar . = NEXT(4096); 1209003fa0bSYatharth Kochar __COHERENT_RAM_END__ = .; 1219003fa0bSYatharth Kochar } >RAM 1229003fa0bSYatharth Kochar#endif 1239003fa0bSYatharth Kochar 1249003fa0bSYatharth Kochar /* 1259003fa0bSYatharth Kochar * Define a linker symbol to mark end of the RW memory area for this 1269003fa0bSYatharth Kochar * image. 1279003fa0bSYatharth Kochar */ 1289003fa0bSYatharth Kochar __RW_END__ = .; 1299003fa0bSYatharth Kochar __BL2U_END__ = .; 1309003fa0bSYatharth Kochar 1319003fa0bSYatharth Kochar __BSS_SIZE__ = SIZEOF(.bss); 1329003fa0bSYatharth Kochar 1339003fa0bSYatharth Kochar ASSERT(. <= BL2U_LIMIT, "BL2U image has exceeded its limit.") 1349003fa0bSYatharth Kochar} 135