1/* 2 * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <common/bl_common.ld.h> 8#include <lib/xlat_tables/xlat_tables_defs.h> 9 10OUTPUT_FORMAT(elf32-littlearm) 11OUTPUT_ARCH(arm) 12ENTRY(sp_min_vector_table) 13 14MEMORY { 15 RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE 16} 17 18#ifdef PLAT_SP_MIN_EXTRA_LD_SCRIPT 19#include <plat_sp_min.ld.S> 20#endif 21 22SECTIONS 23{ 24 . = BL32_BASE; 25 ASSERT(. == ALIGN(PAGE_SIZE), 26 "BL32_BASE address is not aligned on a page boundary.") 27 28#if SEPARATE_CODE_AND_RODATA 29 .text . : { 30 __TEXT_START__ = .; 31 *entrypoint.o(.text*) 32 *(.text*) 33 *(.vectors) 34 . = ALIGN(PAGE_SIZE); 35 __TEXT_END__ = .; 36 } >RAM 37 38 /* .ARM.extab and .ARM.exidx are only added because Clang need them */ 39 .ARM.extab . : { 40 *(.ARM.extab* .gnu.linkonce.armextab.*) 41 } >RAM 42 43 .ARM.exidx . : { 44 *(.ARM.exidx* .gnu.linkonce.armexidx.*) 45 } >RAM 46 47 .rodata . : { 48 __RODATA_START__ = .; 49 *(.rodata*) 50 51 RT_SVC_DESCS 52 FCONF_POPULATOR 53 PMF_SVC_DESCS 54 CPU_OPS 55 56 /* Place pubsub sections for events */ 57 . = ALIGN(8); 58#include <lib/el3_runtime/pubsub_events.h> 59 60 . = ALIGN(PAGE_SIZE); 61 __RODATA_END__ = .; 62 } >RAM 63#else 64 ro . : { 65 __RO_START__ = .; 66 *entrypoint.o(.text*) 67 *(.text*) 68 *(.rodata*) 69 70 RT_SVC_DESCS 71 FCONF_POPULATOR 72 CPU_OPS 73 74 /* Place pubsub sections for events */ 75 . = ALIGN(8); 76#include <lib/el3_runtime/pubsub_events.h> 77 78 *(.vectors) 79 __RO_END_UNALIGNED__ = .; 80 81 /* 82 * Memory page(s) mapped to this section will be marked as 83 * read-only, executable. No RW data from the next section must 84 * creep in. Ensure the rest of the current memory block is unused. 85 */ 86 . = ALIGN(PAGE_SIZE); 87 __RO_END__ = .; 88 } >RAM 89#endif 90 91 ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__, 92 "cpu_ops not defined for this platform.") 93 /* 94 * Define a linker symbol to mark start of the RW memory area for this 95 * image. 96 */ 97 __RW_START__ = . ; 98 99 .data . : { 100 __DATA_START__ = .; 101 *(.data*) 102 __DATA_END__ = .; 103 } >RAM 104 105#ifdef BL32_PROGBITS_LIMIT 106 ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.") 107#endif 108 109 stacks (NOLOAD) : { 110 __STACKS_START__ = .; 111 *(tzfw_normal_stacks) 112 __STACKS_END__ = .; 113 } >RAM 114 115 /* 116 * The .bss section gets initialised to 0 at runtime. 117 * Its base address should be 8-byte aligned for better performance of the 118 * zero-initialization code. 119 */ 120 .bss (NOLOAD) : ALIGN(8) { 121 __BSS_START__ = .; 122 *(.bss*) 123 *(COMMON) 124 BAKERY_LOCK_NORMAL 125 PMF_TIMESTAMP 126 __BSS_END__ = .; 127 } >RAM 128 129 XLAT_TABLE_SECTION >RAM 130 131 __BSS_SIZE__ = SIZEOF(.bss); 132 133#if USE_COHERENT_MEM 134 /* 135 * The base address of the coherent memory section must be page-aligned (4K) 136 * to guarantee that the coherent data are stored on their own pages and 137 * are not mixed with normal data. This is required to set up the correct 138 * memory attributes for the coherent data page tables. 139 */ 140 coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) { 141 __COHERENT_RAM_START__ = .; 142 /* 143 * Bakery locks are stored in coherent memory 144 * 145 * Each lock's data is contiguous and fully allocated by the compiler 146 */ 147 *(bakery_lock) 148 *(tzfw_coherent_mem) 149 __COHERENT_RAM_END_UNALIGNED__ = .; 150 /* 151 * Memory page(s) mapped to this section will be marked 152 * as device memory. No other unexpected data must creep in. 153 * Ensure the rest of the current memory page is unused. 154 */ 155 . = ALIGN(PAGE_SIZE); 156 __COHERENT_RAM_END__ = .; 157 } >RAM 158 159 __COHERENT_RAM_UNALIGNED_SIZE__ = 160 __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__; 161#endif 162 163 /* 164 * Define a linker symbol to mark end of the RW memory area for this 165 * image. 166 */ 167 __RW_END__ = .; 168 169 __BL32_END__ = .; 170} 171