1c11ba852SSoby Mathew/* 2*665e71b8SMasahiro Yamada * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. 3c11ba852SSoby Mathew * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 5c11ba852SSoby Mathew */ 6c11ba852SSoby Mathew 7c11ba852SSoby Mathew#include <platform_def.h> 809d40e0eSAntonio Nino Diaz 9*665e71b8SMasahiro Yamada#include <common/bl_common.ld.h> 1009d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h> 11c11ba852SSoby Mathew 12c11ba852SSoby MathewOUTPUT_FORMAT(elf32-littlearm) 13c11ba852SSoby MathewOUTPUT_ARCH(arm) 14c11ba852SSoby MathewENTRY(sp_min_vector_table) 15c11ba852SSoby Mathew 16c11ba852SSoby MathewMEMORY { 17c11ba852SSoby Mathew RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE 18c11ba852SSoby Mathew} 19c11ba852SSoby Mathew 2014e09cc4SHeiko Stuebner#ifdef PLAT_SP_MIN_EXTRA_LD_SCRIPT 2114e09cc4SHeiko Stuebner#include <plat_sp_min.ld.S> 2214e09cc4SHeiko Stuebner#endif 23c11ba852SSoby Mathew 24c11ba852SSoby MathewSECTIONS 25c11ba852SSoby Mathew{ 26c11ba852SSoby Mathew . = BL32_BASE; 27a2aedac2SAntonio Nino Diaz ASSERT(. == ALIGN(PAGE_SIZE), 28c11ba852SSoby Mathew "BL32_BASE address is not aligned on a page boundary.") 29c11ba852SSoby Mathew 30c11ba852SSoby Mathew#if SEPARATE_CODE_AND_RODATA 31c11ba852SSoby Mathew .text . : { 32c11ba852SSoby Mathew __TEXT_START__ = .; 33c11ba852SSoby Mathew *entrypoint.o(.text*) 34c11ba852SSoby Mathew *(.text*) 353bdf0e5dSYatharth Kochar *(.vectors) 365629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 37c11ba852SSoby Mathew __TEXT_END__ = .; 38c11ba852SSoby Mathew } >RAM 39c11ba852SSoby Mathew 40ad925094SRoberto Vargas /* .ARM.extab and .ARM.exidx are only added because Clang need them */ 41ad925094SRoberto Vargas .ARM.extab . : { 42ad925094SRoberto Vargas *(.ARM.extab* .gnu.linkonce.armextab.*) 43ad925094SRoberto Vargas } >RAM 44ad925094SRoberto Vargas 45ad925094SRoberto Vargas .ARM.exidx . : { 46ad925094SRoberto Vargas *(.ARM.exidx* .gnu.linkonce.armexidx.*) 47ad925094SRoberto Vargas } >RAM 48ad925094SRoberto Vargas 49c11ba852SSoby Mathew .rodata . : { 50c11ba852SSoby Mathew __RODATA_START__ = .; 51c11ba852SSoby Mathew *(.rodata*) 52c11ba852SSoby Mathew 53c11ba852SSoby Mathew /* Ensure 4-byte alignment for descriptors and ensure inclusion */ 54c11ba852SSoby Mathew . = ALIGN(4); 55c11ba852SSoby Mathew __RT_SVC_DESCS_START__ = .; 56c11ba852SSoby Mathew KEEP(*(rt_svc_descs)) 57c11ba852SSoby Mathew __RT_SVC_DESCS_END__ = .; 58c11ba852SSoby Mathew 590531ada5SBence Szépkúti#if ENABLE_PMF 600531ada5SBence Szépkúti /* Ensure 4-byte alignment for descriptors and ensure inclusion */ 610531ada5SBence Szépkúti . = ALIGN(4); 620531ada5SBence Szépkúti __PMF_SVC_DESCS_START__ = .; 630531ada5SBence Szépkúti KEEP(*(pmf_svc_descs)) 640531ada5SBence Szépkúti __PMF_SVC_DESCS_END__ = .; 650531ada5SBence Szépkúti#endif /* ENABLE_PMF */ 660531ada5SBence Szépkúti 67c11ba852SSoby Mathew /* 68c11ba852SSoby Mathew * Ensure 4-byte alignment for cpu_ops so that its fields are also 69c11ba852SSoby Mathew * aligned. Also ensure cpu_ops inclusion. 70c11ba852SSoby Mathew */ 71c11ba852SSoby Mathew . = ALIGN(4); 72c11ba852SSoby Mathew __CPU_OPS_START__ = .; 73c11ba852SSoby Mathew KEEP(*(cpu_ops)) 74c11ba852SSoby Mathew __CPU_OPS_END__ = .; 75c11ba852SSoby Mathew 768e743bcdSJeenu Viswambharan /* Place pubsub sections for events */ 778e743bcdSJeenu Viswambharan . = ALIGN(8); 7809d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/pubsub_events.h> 798e743bcdSJeenu Viswambharan 805629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 81c11ba852SSoby Mathew __RODATA_END__ = .; 82c11ba852SSoby Mathew } >RAM 83c11ba852SSoby Mathew#else 84c11ba852SSoby Mathew ro . : { 85c11ba852SSoby Mathew __RO_START__ = .; 86c11ba852SSoby Mathew *entrypoint.o(.text*) 87c11ba852SSoby Mathew *(.text*) 88c11ba852SSoby Mathew *(.rodata*) 89c11ba852SSoby Mathew 90c11ba852SSoby Mathew /* Ensure 4-byte alignment for descriptors and ensure inclusion */ 91c11ba852SSoby Mathew . = ALIGN(4); 92c11ba852SSoby Mathew __RT_SVC_DESCS_START__ = .; 93c11ba852SSoby Mathew KEEP(*(rt_svc_descs)) 94c11ba852SSoby Mathew __RT_SVC_DESCS_END__ = .; 95c11ba852SSoby Mathew 96c11ba852SSoby Mathew /* 97c11ba852SSoby Mathew * Ensure 4-byte alignment for cpu_ops so that its fields are also 98c11ba852SSoby Mathew * aligned. Also ensure cpu_ops inclusion. 99c11ba852SSoby Mathew */ 100c11ba852SSoby Mathew . = ALIGN(4); 101c11ba852SSoby Mathew __CPU_OPS_START__ = .; 102c11ba852SSoby Mathew KEEP(*(cpu_ops)) 103c11ba852SSoby Mathew __CPU_OPS_END__ = .; 104c11ba852SSoby Mathew 1058e743bcdSJeenu Viswambharan /* Place pubsub sections for events */ 1068e743bcdSJeenu Viswambharan . = ALIGN(8); 10709d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/pubsub_events.h> 1088e743bcdSJeenu Viswambharan 1093bdf0e5dSYatharth Kochar *(.vectors) 110c11ba852SSoby Mathew __RO_END_UNALIGNED__ = .; 111c11ba852SSoby Mathew 112c11ba852SSoby Mathew /* 113c11ba852SSoby Mathew * Memory page(s) mapped to this section will be marked as 114c11ba852SSoby Mathew * read-only, executable. No RW data from the next section must 115c11ba852SSoby Mathew * creep in. Ensure the rest of the current memory block is unused. 116c11ba852SSoby Mathew */ 1175629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 118c11ba852SSoby Mathew __RO_END__ = .; 119c11ba852SSoby Mathew } >RAM 120c11ba852SSoby Mathew#endif 121c11ba852SSoby Mathew 122c11ba852SSoby Mathew ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__, 123c11ba852SSoby Mathew "cpu_ops not defined for this platform.") 124c11ba852SSoby Mathew /* 125c11ba852SSoby Mathew * Define a linker symbol to mark start of the RW memory area for this 126c11ba852SSoby Mathew * image. 127c11ba852SSoby Mathew */ 128c11ba852SSoby Mathew __RW_START__ = . ; 129c11ba852SSoby Mathew 130c11ba852SSoby Mathew .data . : { 131c11ba852SSoby Mathew __DATA_START__ = .; 132c11ba852SSoby Mathew *(.data*) 133c11ba852SSoby Mathew __DATA_END__ = .; 134c11ba852SSoby Mathew } >RAM 135c11ba852SSoby Mathew 1365744e874SSoby Mathew#ifdef BL32_PROGBITS_LIMIT 1375744e874SSoby Mathew ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.") 1385744e874SSoby Mathew#endif 1395744e874SSoby Mathew 140c11ba852SSoby Mathew stacks (NOLOAD) : { 141c11ba852SSoby Mathew __STACKS_START__ = .; 142c11ba852SSoby Mathew *(tzfw_normal_stacks) 143c11ba852SSoby Mathew __STACKS_END__ = .; 144c11ba852SSoby Mathew } >RAM 145c11ba852SSoby Mathew 146c11ba852SSoby Mathew /* 147c11ba852SSoby Mathew * The .bss section gets initialised to 0 at runtime. 148308d359bSDouglas Raillard * Its base address should be 8-byte aligned for better performance of the 149308d359bSDouglas Raillard * zero-initialization code. 150c11ba852SSoby Mathew */ 151308d359bSDouglas Raillard .bss (NOLOAD) : ALIGN(8) { 152c11ba852SSoby Mathew __BSS_START__ = .; 153c11ba852SSoby Mathew *(.bss*) 154c11ba852SSoby Mathew *(COMMON) 155c11ba852SSoby Mathew#if !USE_COHERENT_MEM 156c11ba852SSoby Mathew /* 157c11ba852SSoby Mathew * Bakery locks are stored in normal .bss memory 158c11ba852SSoby Mathew * 159c11ba852SSoby Mathew * Each lock's data is spread across multiple cache lines, one per CPU, 160c11ba852SSoby Mathew * but multiple locks can share the same cache line. 161c11ba852SSoby Mathew * The compiler will allocate enough memory for one CPU's bakery locks, 162c11ba852SSoby Mathew * the remaining cache lines are allocated by the linker script 163c11ba852SSoby Mathew */ 164c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 165c11ba852SSoby Mathew __BAKERY_LOCK_START__ = .; 166596929b9SVarun Wadekar __PERCPU_BAKERY_LOCK_START__ = .; 167c11ba852SSoby Mathew *(bakery_lock) 168c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 169596929b9SVarun Wadekar __PERCPU_BAKERY_LOCK_END__ = .; 170596929b9SVarun Wadekar __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); 171c11ba852SSoby Mathew . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 172c11ba852SSoby Mathew __BAKERY_LOCK_END__ = .; 173c11ba852SSoby Mathew#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE 174c11ba852SSoby Mathew ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE, 175c11ba852SSoby Mathew "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements"); 176c11ba852SSoby Mathew#endif 177c11ba852SSoby Mathew#endif 178c11ba852SSoby Mathew 179c11ba852SSoby Mathew#if ENABLE_PMF 180c11ba852SSoby Mathew /* 181c11ba852SSoby Mathew * Time-stamps are stored in normal .bss memory 182c11ba852SSoby Mathew * 183c11ba852SSoby Mathew * The compiler will allocate enough memory for one CPU's time-stamps, 1848aabea33SPaul Beesley * the remaining memory for other CPUs is allocated by the 185c11ba852SSoby Mathew * linker script 186c11ba852SSoby Mathew */ 187c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 188c11ba852SSoby Mathew __PMF_TIMESTAMP_START__ = .; 189c11ba852SSoby Mathew KEEP(*(pmf_timestamp_array)) 190c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 191c11ba852SSoby Mathew __PMF_PERCPU_TIMESTAMP_END__ = .; 192c11ba852SSoby Mathew __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); 193c11ba852SSoby Mathew . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 194c11ba852SSoby Mathew __PMF_TIMESTAMP_END__ = .; 195c11ba852SSoby Mathew#endif /* ENABLE_PMF */ 196c11ba852SSoby Mathew 197c11ba852SSoby Mathew __BSS_END__ = .; 198c11ba852SSoby Mathew } >RAM 199c11ba852SSoby Mathew 200*665e71b8SMasahiro Yamada XLAT_TABLE_SECTION >RAM 201c11ba852SSoby Mathew 202c11ba852SSoby Mathew __BSS_SIZE__ = SIZEOF(.bss); 203c11ba852SSoby Mathew 204c11ba852SSoby Mathew#if USE_COHERENT_MEM 205c11ba852SSoby Mathew /* 206c11ba852SSoby Mathew * The base address of the coherent memory section must be page-aligned (4K) 207c11ba852SSoby Mathew * to guarantee that the coherent data are stored on their own pages and 208c11ba852SSoby Mathew * are not mixed with normal data. This is required to set up the correct 209c11ba852SSoby Mathew * memory attributes for the coherent data page tables. 210c11ba852SSoby Mathew */ 211a2aedac2SAntonio Nino Diaz coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) { 212c11ba852SSoby Mathew __COHERENT_RAM_START__ = .; 213c11ba852SSoby Mathew /* 214c11ba852SSoby Mathew * Bakery locks are stored in coherent memory 215c11ba852SSoby Mathew * 216c11ba852SSoby Mathew * Each lock's data is contiguous and fully allocated by the compiler 217c11ba852SSoby Mathew */ 218c11ba852SSoby Mathew *(bakery_lock) 219c11ba852SSoby Mathew *(tzfw_coherent_mem) 220c11ba852SSoby Mathew __COHERENT_RAM_END_UNALIGNED__ = .; 221c11ba852SSoby Mathew /* 222c11ba852SSoby Mathew * Memory page(s) mapped to this section will be marked 223c11ba852SSoby Mathew * as device memory. No other unexpected data must creep in. 224c11ba852SSoby Mathew * Ensure the rest of the current memory page is unused. 225c11ba852SSoby Mathew */ 2265629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 227c11ba852SSoby Mathew __COHERENT_RAM_END__ = .; 228c11ba852SSoby Mathew } >RAM 229c11ba852SSoby Mathew 230c11ba852SSoby Mathew __COHERENT_RAM_UNALIGNED_SIZE__ = 231c11ba852SSoby Mathew __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__; 232c11ba852SSoby Mathew#endif 233c11ba852SSoby Mathew 234c11ba852SSoby Mathew /* 235c11ba852SSoby Mathew * Define a linker symbol to mark end of the RW memory area for this 236c11ba852SSoby Mathew * image. 237c11ba852SSoby Mathew */ 238c11ba852SSoby Mathew __RW_END__ = .; 239c11ba852SSoby Mathew 240c11ba852SSoby Mathew __BL32_END__ = .; 241c11ba852SSoby Mathew} 242