1c11ba852SSoby Mathew/* 28aabea33SPaul Beesley * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. 3c11ba852SSoby Mathew * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 5c11ba852SSoby Mathew */ 6c11ba852SSoby Mathew 7c11ba852SSoby Mathew#include <platform_def.h> 809d40e0eSAntonio Nino Diaz 909d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h> 10c11ba852SSoby Mathew 11c11ba852SSoby MathewOUTPUT_FORMAT(elf32-littlearm) 12c11ba852SSoby MathewOUTPUT_ARCH(arm) 13c11ba852SSoby MathewENTRY(sp_min_vector_table) 14c11ba852SSoby Mathew 15c11ba852SSoby MathewMEMORY { 16c11ba852SSoby Mathew RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE 17c11ba852SSoby Mathew} 18c11ba852SSoby Mathew 1914e09cc4SHeiko Stuebner#ifdef PLAT_SP_MIN_EXTRA_LD_SCRIPT 2014e09cc4SHeiko Stuebner#include <plat_sp_min.ld.S> 2114e09cc4SHeiko Stuebner#endif 22c11ba852SSoby Mathew 23c11ba852SSoby MathewSECTIONS 24c11ba852SSoby Mathew{ 25c11ba852SSoby Mathew . = BL32_BASE; 26a2aedac2SAntonio Nino Diaz ASSERT(. == ALIGN(PAGE_SIZE), 27c11ba852SSoby Mathew "BL32_BASE address is not aligned on a page boundary.") 28c11ba852SSoby Mathew 29c11ba852SSoby Mathew#if SEPARATE_CODE_AND_RODATA 30c11ba852SSoby Mathew .text . : { 31c11ba852SSoby Mathew __TEXT_START__ = .; 32c11ba852SSoby Mathew *entrypoint.o(.text*) 33c11ba852SSoby Mathew *(.text*) 343bdf0e5dSYatharth Kochar *(.vectors) 355629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 36c11ba852SSoby Mathew __TEXT_END__ = .; 37c11ba852SSoby Mathew } >RAM 38c11ba852SSoby Mathew 39ad925094SRoberto Vargas /* .ARM.extab and .ARM.exidx are only added because Clang need them */ 40ad925094SRoberto Vargas .ARM.extab . : { 41ad925094SRoberto Vargas *(.ARM.extab* .gnu.linkonce.armextab.*) 42ad925094SRoberto Vargas } >RAM 43ad925094SRoberto Vargas 44ad925094SRoberto Vargas .ARM.exidx . : { 45ad925094SRoberto Vargas *(.ARM.exidx* .gnu.linkonce.armexidx.*) 46ad925094SRoberto Vargas } >RAM 47ad925094SRoberto Vargas 48c11ba852SSoby Mathew .rodata . : { 49c11ba852SSoby Mathew __RODATA_START__ = .; 50c11ba852SSoby Mathew *(.rodata*) 51c11ba852SSoby Mathew 52c11ba852SSoby Mathew /* Ensure 4-byte alignment for descriptors and ensure inclusion */ 53c11ba852SSoby Mathew . = ALIGN(4); 54c11ba852SSoby Mathew __RT_SVC_DESCS_START__ = .; 55c11ba852SSoby Mathew KEEP(*(rt_svc_descs)) 56c11ba852SSoby Mathew __RT_SVC_DESCS_END__ = .; 57c11ba852SSoby Mathew 58*0531ada5SBence Szépkúti#if ENABLE_PMF 59*0531ada5SBence Szépkúti /* Ensure 4-byte alignment for descriptors and ensure inclusion */ 60*0531ada5SBence Szépkúti . = ALIGN(4); 61*0531ada5SBence Szépkúti __PMF_SVC_DESCS_START__ = .; 62*0531ada5SBence Szépkúti KEEP(*(pmf_svc_descs)) 63*0531ada5SBence Szépkúti __PMF_SVC_DESCS_END__ = .; 64*0531ada5SBence Szépkúti#endif /* ENABLE_PMF */ 65*0531ada5SBence Szépkúti 66c11ba852SSoby Mathew /* 67c11ba852SSoby Mathew * Ensure 4-byte alignment for cpu_ops so that its fields are also 68c11ba852SSoby Mathew * aligned. Also ensure cpu_ops inclusion. 69c11ba852SSoby Mathew */ 70c11ba852SSoby Mathew . = ALIGN(4); 71c11ba852SSoby Mathew __CPU_OPS_START__ = .; 72c11ba852SSoby Mathew KEEP(*(cpu_ops)) 73c11ba852SSoby Mathew __CPU_OPS_END__ = .; 74c11ba852SSoby Mathew 758e743bcdSJeenu Viswambharan /* Place pubsub sections for events */ 768e743bcdSJeenu Viswambharan . = ALIGN(8); 7709d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/pubsub_events.h> 788e743bcdSJeenu Viswambharan 795629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 80c11ba852SSoby Mathew __RODATA_END__ = .; 81c11ba852SSoby Mathew } >RAM 82c11ba852SSoby Mathew#else 83c11ba852SSoby Mathew ro . : { 84c11ba852SSoby Mathew __RO_START__ = .; 85c11ba852SSoby Mathew *entrypoint.o(.text*) 86c11ba852SSoby Mathew *(.text*) 87c11ba852SSoby Mathew *(.rodata*) 88c11ba852SSoby Mathew 89c11ba852SSoby Mathew /* Ensure 4-byte alignment for descriptors and ensure inclusion */ 90c11ba852SSoby Mathew . = ALIGN(4); 91c11ba852SSoby Mathew __RT_SVC_DESCS_START__ = .; 92c11ba852SSoby Mathew KEEP(*(rt_svc_descs)) 93c11ba852SSoby Mathew __RT_SVC_DESCS_END__ = .; 94c11ba852SSoby Mathew 95c11ba852SSoby Mathew /* 96c11ba852SSoby Mathew * Ensure 4-byte alignment for cpu_ops so that its fields are also 97c11ba852SSoby Mathew * aligned. Also ensure cpu_ops inclusion. 98c11ba852SSoby Mathew */ 99c11ba852SSoby Mathew . = ALIGN(4); 100c11ba852SSoby Mathew __CPU_OPS_START__ = .; 101c11ba852SSoby Mathew KEEP(*(cpu_ops)) 102c11ba852SSoby Mathew __CPU_OPS_END__ = .; 103c11ba852SSoby Mathew 1048e743bcdSJeenu Viswambharan /* Place pubsub sections for events */ 1058e743bcdSJeenu Viswambharan . = ALIGN(8); 10609d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/pubsub_events.h> 1078e743bcdSJeenu Viswambharan 1083bdf0e5dSYatharth Kochar *(.vectors) 109c11ba852SSoby Mathew __RO_END_UNALIGNED__ = .; 110c11ba852SSoby Mathew 111c11ba852SSoby Mathew /* 112c11ba852SSoby Mathew * Memory page(s) mapped to this section will be marked as 113c11ba852SSoby Mathew * read-only, executable. No RW data from the next section must 114c11ba852SSoby Mathew * creep in. Ensure the rest of the current memory block is unused. 115c11ba852SSoby Mathew */ 1165629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 117c11ba852SSoby Mathew __RO_END__ = .; 118c11ba852SSoby Mathew } >RAM 119c11ba852SSoby Mathew#endif 120c11ba852SSoby Mathew 121c11ba852SSoby Mathew ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__, 122c11ba852SSoby Mathew "cpu_ops not defined for this platform.") 123c11ba852SSoby Mathew /* 124c11ba852SSoby Mathew * Define a linker symbol to mark start of the RW memory area for this 125c11ba852SSoby Mathew * image. 126c11ba852SSoby Mathew */ 127c11ba852SSoby Mathew __RW_START__ = . ; 128c11ba852SSoby Mathew 129c11ba852SSoby Mathew .data . : { 130c11ba852SSoby Mathew __DATA_START__ = .; 131c11ba852SSoby Mathew *(.data*) 132c11ba852SSoby Mathew __DATA_END__ = .; 133c11ba852SSoby Mathew } >RAM 134c11ba852SSoby Mathew 1355744e874SSoby Mathew#ifdef BL32_PROGBITS_LIMIT 1365744e874SSoby Mathew ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.") 1375744e874SSoby Mathew#endif 1385744e874SSoby Mathew 139c11ba852SSoby Mathew stacks (NOLOAD) : { 140c11ba852SSoby Mathew __STACKS_START__ = .; 141c11ba852SSoby Mathew *(tzfw_normal_stacks) 142c11ba852SSoby Mathew __STACKS_END__ = .; 143c11ba852SSoby Mathew } >RAM 144c11ba852SSoby Mathew 145c11ba852SSoby Mathew /* 146c11ba852SSoby Mathew * The .bss section gets initialised to 0 at runtime. 147308d359bSDouglas Raillard * Its base address should be 8-byte aligned for better performance of the 148308d359bSDouglas Raillard * zero-initialization code. 149c11ba852SSoby Mathew */ 150308d359bSDouglas Raillard .bss (NOLOAD) : ALIGN(8) { 151c11ba852SSoby Mathew __BSS_START__ = .; 152c11ba852SSoby Mathew *(.bss*) 153c11ba852SSoby Mathew *(COMMON) 154c11ba852SSoby Mathew#if !USE_COHERENT_MEM 155c11ba852SSoby Mathew /* 156c11ba852SSoby Mathew * Bakery locks are stored in normal .bss memory 157c11ba852SSoby Mathew * 158c11ba852SSoby Mathew * Each lock's data is spread across multiple cache lines, one per CPU, 159c11ba852SSoby Mathew * but multiple locks can share the same cache line. 160c11ba852SSoby Mathew * The compiler will allocate enough memory for one CPU's bakery locks, 161c11ba852SSoby Mathew * the remaining cache lines are allocated by the linker script 162c11ba852SSoby Mathew */ 163c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 164c11ba852SSoby Mathew __BAKERY_LOCK_START__ = .; 165596929b9SVarun Wadekar __PERCPU_BAKERY_LOCK_START__ = .; 166c11ba852SSoby Mathew *(bakery_lock) 167c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 168596929b9SVarun Wadekar __PERCPU_BAKERY_LOCK_END__ = .; 169596929b9SVarun Wadekar __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); 170c11ba852SSoby Mathew . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 171c11ba852SSoby Mathew __BAKERY_LOCK_END__ = .; 172c11ba852SSoby Mathew#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE 173c11ba852SSoby Mathew ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE, 174c11ba852SSoby Mathew "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements"); 175c11ba852SSoby Mathew#endif 176c11ba852SSoby Mathew#endif 177c11ba852SSoby Mathew 178c11ba852SSoby Mathew#if ENABLE_PMF 179c11ba852SSoby Mathew /* 180c11ba852SSoby Mathew * Time-stamps are stored in normal .bss memory 181c11ba852SSoby Mathew * 182c11ba852SSoby Mathew * The compiler will allocate enough memory for one CPU's time-stamps, 1838aabea33SPaul Beesley * the remaining memory for other CPUs is allocated by the 184c11ba852SSoby Mathew * linker script 185c11ba852SSoby Mathew */ 186c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 187c11ba852SSoby Mathew __PMF_TIMESTAMP_START__ = .; 188c11ba852SSoby Mathew KEEP(*(pmf_timestamp_array)) 189c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 190c11ba852SSoby Mathew __PMF_PERCPU_TIMESTAMP_END__ = .; 191c11ba852SSoby Mathew __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); 192c11ba852SSoby Mathew . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 193c11ba852SSoby Mathew __PMF_TIMESTAMP_END__ = .; 194c11ba852SSoby Mathew#endif /* ENABLE_PMF */ 195c11ba852SSoby Mathew 196c11ba852SSoby Mathew __BSS_END__ = .; 197c11ba852SSoby Mathew } >RAM 198c11ba852SSoby Mathew 199c11ba852SSoby Mathew /* 200c11ba852SSoby Mathew * The xlat_table section is for full, aligned page tables (4K). 201c11ba852SSoby Mathew * Removing them from .bss avoids forcing 4K alignment on 202883d1b5dSAntonio Nino Diaz * the .bss section. The tables are initialized to zero by the translation 203883d1b5dSAntonio Nino Diaz * tables library. 204c11ba852SSoby Mathew */ 205c11ba852SSoby Mathew xlat_table (NOLOAD) : { 206c11ba852SSoby Mathew *(xlat_table) 207c11ba852SSoby Mathew } >RAM 208c11ba852SSoby Mathew 209c11ba852SSoby Mathew __BSS_SIZE__ = SIZEOF(.bss); 210c11ba852SSoby Mathew 211c11ba852SSoby Mathew#if USE_COHERENT_MEM 212c11ba852SSoby Mathew /* 213c11ba852SSoby Mathew * The base address of the coherent memory section must be page-aligned (4K) 214c11ba852SSoby Mathew * to guarantee that the coherent data are stored on their own pages and 215c11ba852SSoby Mathew * are not mixed with normal data. This is required to set up the correct 216c11ba852SSoby Mathew * memory attributes for the coherent data page tables. 217c11ba852SSoby Mathew */ 218a2aedac2SAntonio Nino Diaz coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) { 219c11ba852SSoby Mathew __COHERENT_RAM_START__ = .; 220c11ba852SSoby Mathew /* 221c11ba852SSoby Mathew * Bakery locks are stored in coherent memory 222c11ba852SSoby Mathew * 223c11ba852SSoby Mathew * Each lock's data is contiguous and fully allocated by the compiler 224c11ba852SSoby Mathew */ 225c11ba852SSoby Mathew *(bakery_lock) 226c11ba852SSoby Mathew *(tzfw_coherent_mem) 227c11ba852SSoby Mathew __COHERENT_RAM_END_UNALIGNED__ = .; 228c11ba852SSoby Mathew /* 229c11ba852SSoby Mathew * Memory page(s) mapped to this section will be marked 230c11ba852SSoby Mathew * as device memory. No other unexpected data must creep in. 231c11ba852SSoby Mathew * Ensure the rest of the current memory page is unused. 232c11ba852SSoby Mathew */ 2335629b2b1SRoberto Vargas . = ALIGN(PAGE_SIZE); 234c11ba852SSoby Mathew __COHERENT_RAM_END__ = .; 235c11ba852SSoby Mathew } >RAM 236c11ba852SSoby Mathew 237c11ba852SSoby Mathew __COHERENT_RAM_UNALIGNED_SIZE__ = 238c11ba852SSoby Mathew __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__; 239c11ba852SSoby Mathew#endif 240c11ba852SSoby Mathew 241c11ba852SSoby Mathew /* 242c11ba852SSoby Mathew * Define a linker symbol to mark end of the RW memory area for this 243c11ba852SSoby Mathew * image. 244c11ba852SSoby Mathew */ 245c11ba852SSoby Mathew __RW_END__ = .; 246c11ba852SSoby Mathew 247c11ba852SSoby Mathew __BL32_END__ = .; 248c11ba852SSoby Mathew} 249