19003fa0bSYatharth Kochar/* 2308d359bSDouglas Raillard * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. 39003fa0bSYatharth Kochar * 49003fa0bSYatharth Kochar * Redistribution and use in source and binary forms, with or without 59003fa0bSYatharth Kochar * modification, are permitted provided that the following conditions are met: 69003fa0bSYatharth Kochar * 79003fa0bSYatharth Kochar * Redistributions of source code must retain the above copyright notice, this 89003fa0bSYatharth Kochar * list of conditions and the following disclaimer. 99003fa0bSYatharth Kochar * 109003fa0bSYatharth Kochar * Redistributions in binary form must reproduce the above copyright notice, 119003fa0bSYatharth Kochar * this list of conditions and the following disclaimer in the documentation 129003fa0bSYatharth Kochar * and/or other materials provided with the distribution. 139003fa0bSYatharth Kochar * 149003fa0bSYatharth Kochar * Neither the name of ARM nor the names of its contributors may be used 159003fa0bSYatharth Kochar * to endorse or promote products derived from this software without specific 169003fa0bSYatharth Kochar * prior written permission. 179003fa0bSYatharth Kochar * 189003fa0bSYatharth Kochar * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 199003fa0bSYatharth Kochar * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 209003fa0bSYatharth Kochar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 219003fa0bSYatharth Kochar * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 229003fa0bSYatharth Kochar * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 239003fa0bSYatharth Kochar * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 249003fa0bSYatharth Kochar * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 259003fa0bSYatharth Kochar * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 269003fa0bSYatharth Kochar * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 279003fa0bSYatharth Kochar * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 289003fa0bSYatharth Kochar * POSSIBILITY OF SUCH DAMAGE. 299003fa0bSYatharth Kochar */ 309003fa0bSYatharth Kochar 319003fa0bSYatharth Kochar#include <platform_def.h> 329003fa0bSYatharth Kochar 339003fa0bSYatharth KocharOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) 349003fa0bSYatharth KocharOUTPUT_ARCH(PLATFORM_LINKER_ARCH) 359003fa0bSYatharth KocharENTRY(bl2u_entrypoint) 369003fa0bSYatharth Kochar 379003fa0bSYatharth KocharMEMORY { 389003fa0bSYatharth Kochar RAM (rwx): ORIGIN = BL2U_BASE, LENGTH = BL2U_LIMIT - BL2U_BASE 399003fa0bSYatharth Kochar} 409003fa0bSYatharth Kochar 419003fa0bSYatharth Kochar 429003fa0bSYatharth KocharSECTIONS 439003fa0bSYatharth Kochar{ 449003fa0bSYatharth Kochar . = BL2U_BASE; 459003fa0bSYatharth Kochar ASSERT(. == ALIGN(4096), 469003fa0bSYatharth Kochar "BL2U_BASE address is not aligned on a page boundary.") 479003fa0bSYatharth Kochar 485d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA 495d1c104fSSandrine Bailleux .text . : { 505d1c104fSSandrine Bailleux __TEXT_START__ = .; 515d1c104fSSandrine Bailleux *bl2u_entrypoint.o(.text*) 525d1c104fSSandrine Bailleux *(.text*) 535d1c104fSSandrine Bailleux *(.vectors) 545d1c104fSSandrine Bailleux . = NEXT(4096); 555d1c104fSSandrine Bailleux __TEXT_END__ = .; 565d1c104fSSandrine Bailleux } >RAM 575d1c104fSSandrine Bailleux 585d1c104fSSandrine Bailleux .rodata . : { 595d1c104fSSandrine Bailleux __RODATA_START__ = .; 605d1c104fSSandrine Bailleux *(.rodata*) 615d1c104fSSandrine Bailleux . = NEXT(4096); 625d1c104fSSandrine Bailleux __RODATA_END__ = .; 635d1c104fSSandrine Bailleux } >RAM 645d1c104fSSandrine Bailleux#else 659003fa0bSYatharth Kochar ro . : { 669003fa0bSYatharth Kochar __RO_START__ = .; 679003fa0bSYatharth Kochar *bl2u_entrypoint.o(.text*) 689003fa0bSYatharth Kochar *(.text*) 699003fa0bSYatharth Kochar *(.rodata*) 709003fa0bSYatharth Kochar 719003fa0bSYatharth Kochar *(.vectors) 729003fa0bSYatharth Kochar __RO_END_UNALIGNED__ = .; 739003fa0bSYatharth Kochar /* 749003fa0bSYatharth Kochar * Memory page(s) mapped to this section will be marked as 759003fa0bSYatharth Kochar * read-only, executable. No RW data from the next section must 769003fa0bSYatharth Kochar * creep in. Ensure the rest of the current memory page is unused. 779003fa0bSYatharth Kochar */ 789003fa0bSYatharth Kochar . = NEXT(4096); 799003fa0bSYatharth Kochar __RO_END__ = .; 809003fa0bSYatharth Kochar } >RAM 815d1c104fSSandrine Bailleux#endif 829003fa0bSYatharth Kochar 839003fa0bSYatharth Kochar /* 849003fa0bSYatharth Kochar * Define a linker symbol to mark start of the RW memory area for this 859003fa0bSYatharth Kochar * image. 869003fa0bSYatharth Kochar */ 879003fa0bSYatharth Kochar __RW_START__ = . ; 889003fa0bSYatharth Kochar 89*51faada7SDouglas Raillard /* 90*51faada7SDouglas Raillard * .data must be placed at a lower address than the stacks if the stack 91*51faada7SDouglas Raillard * protector is enabled. Alternatively, the .data.stack_protector_canary 92*51faada7SDouglas Raillard * section can be placed independently of the main .data section. 93*51faada7SDouglas Raillard */ 949003fa0bSYatharth Kochar .data . : { 959003fa0bSYatharth Kochar __DATA_START__ = .; 969003fa0bSYatharth Kochar *(.data*) 979003fa0bSYatharth Kochar __DATA_END__ = .; 989003fa0bSYatharth Kochar } >RAM 999003fa0bSYatharth Kochar 1009003fa0bSYatharth Kochar stacks (NOLOAD) : { 1019003fa0bSYatharth Kochar __STACKS_START__ = .; 1029003fa0bSYatharth Kochar *(tzfw_normal_stacks) 1039003fa0bSYatharth Kochar __STACKS_END__ = .; 1049003fa0bSYatharth Kochar } >RAM 1059003fa0bSYatharth Kochar 1069003fa0bSYatharth Kochar /* 1079003fa0bSYatharth Kochar * The .bss section gets initialised to 0 at runtime. 108308d359bSDouglas Raillard * Its base address should be 16-byte aligned for better performance of the 109308d359bSDouglas Raillard * zero-initialization code. 1109003fa0bSYatharth Kochar */ 1119003fa0bSYatharth Kochar .bss : ALIGN(16) { 1129003fa0bSYatharth Kochar __BSS_START__ = .; 1139003fa0bSYatharth Kochar *(SORT_BY_ALIGNMENT(.bss*)) 1149003fa0bSYatharth Kochar *(COMMON) 1159003fa0bSYatharth Kochar __BSS_END__ = .; 1169003fa0bSYatharth Kochar } >RAM 1179003fa0bSYatharth Kochar 1189003fa0bSYatharth Kochar /* 1199003fa0bSYatharth Kochar * The xlat_table section is for full, aligned page tables (4K). 1209003fa0bSYatharth Kochar * Removing them from .bss avoids forcing 4K alignment on 1219003fa0bSYatharth Kochar * the .bss section and eliminates the unecessary zero init 1229003fa0bSYatharth Kochar */ 1239003fa0bSYatharth Kochar xlat_table (NOLOAD) : { 1249003fa0bSYatharth Kochar *(xlat_table) 1259003fa0bSYatharth Kochar } >RAM 1269003fa0bSYatharth Kochar 1279003fa0bSYatharth Kochar#if USE_COHERENT_MEM 1289003fa0bSYatharth Kochar /* 1299003fa0bSYatharth Kochar * The base address of the coherent memory section must be page-aligned (4K) 1309003fa0bSYatharth Kochar * to guarantee that the coherent data are stored on their own pages and 1319003fa0bSYatharth Kochar * are not mixed with normal data. This is required to set up the correct 1329003fa0bSYatharth Kochar * memory attributes for the coherent data page tables. 1339003fa0bSYatharth Kochar */ 1349003fa0bSYatharth Kochar coherent_ram (NOLOAD) : ALIGN(4096) { 1359003fa0bSYatharth Kochar __COHERENT_RAM_START__ = .; 1369003fa0bSYatharth Kochar *(tzfw_coherent_mem) 1379003fa0bSYatharth Kochar __COHERENT_RAM_END_UNALIGNED__ = .; 1389003fa0bSYatharth Kochar /* 1399003fa0bSYatharth Kochar * Memory page(s) mapped to this section will be marked 1409003fa0bSYatharth Kochar * as device memory. No other unexpected data must creep in. 1419003fa0bSYatharth Kochar * Ensure the rest of the current memory page is unused. 1429003fa0bSYatharth Kochar */ 1439003fa0bSYatharth Kochar . = NEXT(4096); 1449003fa0bSYatharth Kochar __COHERENT_RAM_END__ = .; 1459003fa0bSYatharth Kochar } >RAM 1469003fa0bSYatharth Kochar#endif 1479003fa0bSYatharth Kochar 1489003fa0bSYatharth Kochar /* 1499003fa0bSYatharth Kochar * Define a linker symbol to mark end of the RW memory area for this 1509003fa0bSYatharth Kochar * image. 1519003fa0bSYatharth Kochar */ 1529003fa0bSYatharth Kochar __RW_END__ = .; 1539003fa0bSYatharth Kochar __BL2U_END__ = .; 1549003fa0bSYatharth Kochar 1559003fa0bSYatharth Kochar __BSS_SIZE__ = SIZEOF(.bss); 1569003fa0bSYatharth Kochar 1579003fa0bSYatharth Kochar ASSERT(. <= BL2U_LIMIT, "BL2U image has exceeded its limit.") 1589003fa0bSYatharth Kochar} 159