17c88f3f6SAchin Gupta/* 2da04341eSChris Kay * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved. 37c88f3f6SAchin Gupta * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 57c88f3f6SAchin Gupta */ 67c88f3f6SAchin Gupta 7665e71b8SMasahiro Yamada#include <common/bl_common.ld.h> 809d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h> 97c88f3f6SAchin Gupta 107c88f3f6SAchin GuptaOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) 117c88f3f6SAchin GuptaOUTPUT_ARCH(PLATFORM_LINKER_ARCH) 129f98aa1aSJeenu ViswambharanENTRY(tsp_entrypoint) 139f98aa1aSJeenu Viswambharan 147c88f3f6SAchin GuptaMEMORY { 152467f70fSSandrine Bailleux RAM (rwx): ORIGIN = TSP_SEC_MEM_BASE, LENGTH = TSP_SEC_MEM_SIZE 167c88f3f6SAchin Gupta} 177c88f3f6SAchin Gupta 18f90fe02fSChris KaySECTIONS { 19fcb72e16SHarrison Mutai RAM_REGION_START = ORIGIN(RAM); 20fcb72e16SHarrison Mutai RAM_REGION_LENGTH = LENGTH(RAM); 217c88f3f6SAchin Gupta . = BL32_BASE; 22f90fe02fSChris Kay 23a2aedac2SAntonio Nino Diaz ASSERT(. == ALIGN(PAGE_SIZE), 247c88f3f6SAchin Gupta "BL32_BASE address is not aligned on a page boundary.") 257c88f3f6SAchin Gupta 265d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA 275d1c104fSSandrine Bailleux .text . : { 28*3d6edc32SAndrey Skvortsov ASSERT(. == ALIGN(PAGE_SIZE), 29*3d6edc32SAndrey Skvortsov ".text address is not aligned on a page boundary."); 30*3d6edc32SAndrey Skvortsov 315d1c104fSSandrine Bailleux __TEXT_START__ = .; 32f90fe02fSChris Kay 335d1c104fSSandrine Bailleux *tsp_entrypoint.o(.text*) 345d1c104fSSandrine Bailleux *(.text*) 355d1c104fSSandrine Bailleux *(.vectors) 36f7d445fcSMichal Simek __TEXT_END_UNALIGNED__ = .; 37f90fe02fSChris Kay 385629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 39f90fe02fSChris Kay 405d1c104fSSandrine Bailleux __TEXT_END__ = .; 415d1c104fSSandrine Bailleux } >RAM 425d1c104fSSandrine Bailleux 435d1c104fSSandrine Bailleux .rodata . : { 445d1c104fSSandrine Bailleux __RODATA_START__ = .; 45f90fe02fSChris Kay 465d1c104fSSandrine Bailleux *(.rodata*) 47d974301dSMasahiro Yamada 480a0a7a9aSMasahiro Yamada RODATA_COMMON 49d974301dSMasahiro Yamada 50f7d445fcSMichal Simek __RODATA_END_UNALIGNED__ = .; 515629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 52f90fe02fSChris Kay 535d1c104fSSandrine Bailleux __RODATA_END__ = .; 545d1c104fSSandrine Bailleux } >RAM 55f90fe02fSChris Kay#else /* SEPARATE_CODE_AND_RODATA */ 56da04341eSChris Kay .ro . : { 57*3d6edc32SAndrey Skvortsov ASSERT(. == ALIGN(PAGE_SIZE), 58*3d6edc32SAndrey Skvortsov ".ro address is not aligned on a page boundary."); 59*3d6edc32SAndrey Skvortsov 607c88f3f6SAchin Gupta __RO_START__ = .; 61f90fe02fSChris Kay 62dccc537aSAndrew Thoelke *tsp_entrypoint.o(.text*) 63dccc537aSAndrew Thoelke *(.text*) 647c88f3f6SAchin Gupta *(.rodata*) 65d974301dSMasahiro Yamada 660a0a7a9aSMasahiro Yamada RODATA_COMMON 67d974301dSMasahiro Yamada 687c88f3f6SAchin Gupta *(.vectors) 69d974301dSMasahiro Yamada 707c88f3f6SAchin Gupta __RO_END_UNALIGNED__ = .; 717c88f3f6SAchin Gupta 7254dc71e7SAchin Gupta /* 73f90fe02fSChris Kay * Memory page(s) mapped to this section will be marked as read-only, 74f90fe02fSChris Kay * executable. No RW data from the next section must creep in. Ensure 75f90fe02fSChris Kay * that the rest of the current memory page is unused. 7654dc71e7SAchin Gupta */ 77f90fe02fSChris Kay . = ALIGN(PAGE_SIZE); 78f90fe02fSChris Kay 79f90fe02fSChris Kay __RO_END__ = .; 80f90fe02fSChris Kay } >RAM 81f90fe02fSChris Kay#endif /* SEPARATE_CODE_AND_RODATA */ 82f90fe02fSChris Kay 8354dc71e7SAchin Gupta __RW_START__ = .; 8454dc71e7SAchin Gupta 85caa3e7e0SMasahiro Yamada DATA_SECTION >RAM 86e8ad6168SMasahiro Yamada RELA_SECTION >RAM 87d974301dSMasahiro Yamada 885a06bb7eSDan Handley#ifdef TSP_PROGBITS_LIMIT 895a06bb7eSDan Handley ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.") 90f90fe02fSChris Kay#endif /* TSP_PROGBITS_LIMIT */ 91a1b6db6cSSandrine Bailleux 92a926a9f6SMasahiro Yamada STACK_SECTION >RAM 93a7739bc7SMasahiro Yamada BSS_SECTION >RAM 94665e71b8SMasahiro Yamada XLAT_TABLE_SECTION >RAM 957c88f3f6SAchin Gupta 96ab8707e6SSoby Mathew#if USE_COHERENT_MEM 977c88f3f6SAchin Gupta /* 98f90fe02fSChris Kay * The base address of the coherent memory section must be page-aligned to 99f90fe02fSChris Kay * guarantee that the coherent data are stored on their own pages and are 100f90fe02fSChris Kay * not mixed with normal data. This is required to set up the correct memory 101f90fe02fSChris Kay * attributes for the coherent data page tables. 1027c88f3f6SAchin Gupta */ 103da04341eSChris Kay .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) { 1047c88f3f6SAchin Gupta __COHERENT_RAM_START__ = .; 105da04341eSChris Kay *(.tzfw_coherent_mem) 1067c88f3f6SAchin Gupta __COHERENT_RAM_END_UNALIGNED__ = .; 1077c88f3f6SAchin Gupta 10854dc71e7SAchin Gupta /* 109f90fe02fSChris Kay * Memory page(s) mapped to this section will be marked as device 110f90fe02fSChris Kay * memory. No other unexpected data must creep in. Ensure that the rest 111f90fe02fSChris Kay * of the current memory page is unused. 11254dc71e7SAchin Gupta */ 113f90fe02fSChris Kay . = ALIGN(PAGE_SIZE); 114f90fe02fSChris Kay 115f90fe02fSChris Kay __COHERENT_RAM_END__ = .; 116f90fe02fSChris Kay } >RAM 117f90fe02fSChris Kay#endif /* USE_COHERENT_MEM */ 118f90fe02fSChris Kay 11954dc71e7SAchin Gupta __RW_END__ = .; 12053514b29SSandrine Bailleux __BL32_END__ = .; 1217c88f3f6SAchin Gupta 122d974301dSMasahiro Yamada /DISCARD/ : { 123d974301dSMasahiro Yamada *(.dynsym .dynstr .hash .gnu.hash) 124d974301dSMasahiro Yamada } 125d974301dSMasahiro Yamada 1267c88f3f6SAchin Gupta __BSS_SIZE__ = SIZEOF(.bss); 127f90fe02fSChris Kay 128ab8707e6SSoby Mathew#if USE_COHERENT_MEM 1297c88f3f6SAchin Gupta __COHERENT_RAM_UNALIGNED_SIZE__ = 1307c88f3f6SAchin Gupta __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__; 131f90fe02fSChris Kay#endif /* USE_COHERENT_MEM */ 1327c88f3f6SAchin Gupta 133d178637dSJuan Castillo ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.") 134fcb72e16SHarrison Mutai RAM_REGION_END = .; 1357c88f3f6SAchin Gupta} 136