1c11ba852SSoby Mathew/* 2c11ba852SSoby Mathew * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3c11ba852SSoby Mathew * 4c11ba852SSoby Mathew * Redistribution and use in source and binary forms, with or without 5c11ba852SSoby Mathew * modification, are permitted provided that the following conditions are met: 6c11ba852SSoby Mathew * 7c11ba852SSoby Mathew * Redistributions of source code must retain the above copyright notice, this 8c11ba852SSoby Mathew * list of conditions and the following disclaimer. 9c11ba852SSoby Mathew * 10c11ba852SSoby Mathew * Redistributions in binary form must reproduce the above copyright notice, 11c11ba852SSoby Mathew * this list of conditions and the following disclaimer in the documentation 12c11ba852SSoby Mathew * and/or other materials provided with the distribution. 13c11ba852SSoby Mathew * 14c11ba852SSoby Mathew * Neither the name of ARM nor the names of its contributors may be used 15c11ba852SSoby Mathew * to endorse or promote products derived from this software without specific 16c11ba852SSoby Mathew * prior written permission. 17c11ba852SSoby Mathew * 18c11ba852SSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19c11ba852SSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20c11ba852SSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21c11ba852SSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22c11ba852SSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23c11ba852SSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24c11ba852SSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25c11ba852SSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26c11ba852SSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27c11ba852SSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28c11ba852SSoby Mathew * POSSIBILITY OF SUCH DAMAGE. 29c11ba852SSoby Mathew */ 30c11ba852SSoby Mathew 31c11ba852SSoby Mathew#include <platform_def.h> 32c11ba852SSoby Mathew 33c11ba852SSoby MathewOUTPUT_FORMAT(elf32-littlearm) 34c11ba852SSoby MathewOUTPUT_ARCH(arm) 35c11ba852SSoby MathewENTRY(sp_min_vector_table) 36c11ba852SSoby Mathew 37c11ba852SSoby MathewMEMORY { 38c11ba852SSoby Mathew RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE 39c11ba852SSoby Mathew} 40c11ba852SSoby Mathew 41c11ba852SSoby Mathew 42c11ba852SSoby MathewSECTIONS 43c11ba852SSoby Mathew{ 44c11ba852SSoby Mathew . = BL32_BASE; 45c11ba852SSoby Mathew ASSERT(. == ALIGN(4096), 46c11ba852SSoby Mathew "BL32_BASE address is not aligned on a page boundary.") 47c11ba852SSoby Mathew 48c11ba852SSoby Mathew#if SEPARATE_CODE_AND_RODATA 49c11ba852SSoby Mathew .text . : { 50c11ba852SSoby Mathew __TEXT_START__ = .; 51c11ba852SSoby Mathew *entrypoint.o(.text*) 52c11ba852SSoby Mathew *(.text*) 53*3bdf0e5dSYatharth Kochar *(.vectors) 54c11ba852SSoby Mathew . = NEXT(4096); 55c11ba852SSoby Mathew __TEXT_END__ = .; 56c11ba852SSoby Mathew } >RAM 57c11ba852SSoby Mathew 58c11ba852SSoby Mathew .rodata . : { 59c11ba852SSoby Mathew __RODATA_START__ = .; 60c11ba852SSoby Mathew *(.rodata*) 61c11ba852SSoby Mathew 62c11ba852SSoby Mathew /* Ensure 4-byte alignment for descriptors and ensure inclusion */ 63c11ba852SSoby Mathew . = ALIGN(4); 64c11ba852SSoby Mathew __RT_SVC_DESCS_START__ = .; 65c11ba852SSoby Mathew KEEP(*(rt_svc_descs)) 66c11ba852SSoby Mathew __RT_SVC_DESCS_END__ = .; 67c11ba852SSoby Mathew 68c11ba852SSoby Mathew /* 69c11ba852SSoby Mathew * Ensure 4-byte alignment for cpu_ops so that its fields are also 70c11ba852SSoby Mathew * aligned. Also ensure cpu_ops inclusion. 71c11ba852SSoby Mathew */ 72c11ba852SSoby Mathew . = ALIGN(4); 73c11ba852SSoby Mathew __CPU_OPS_START__ = .; 74c11ba852SSoby Mathew KEEP(*(cpu_ops)) 75c11ba852SSoby Mathew __CPU_OPS_END__ = .; 76c11ba852SSoby Mathew 77c11ba852SSoby Mathew . = NEXT(4096); 78c11ba852SSoby Mathew __RODATA_END__ = .; 79c11ba852SSoby Mathew } >RAM 80c11ba852SSoby Mathew#else 81c11ba852SSoby Mathew ro . : { 82c11ba852SSoby Mathew __RO_START__ = .; 83c11ba852SSoby Mathew *entrypoint.o(.text*) 84c11ba852SSoby Mathew *(.text*) 85c11ba852SSoby Mathew *(.rodata*) 86c11ba852SSoby Mathew 87c11ba852SSoby Mathew /* Ensure 4-byte alignment for descriptors and ensure inclusion */ 88c11ba852SSoby Mathew . = ALIGN(4); 89c11ba852SSoby Mathew __RT_SVC_DESCS_START__ = .; 90c11ba852SSoby Mathew KEEP(*(rt_svc_descs)) 91c11ba852SSoby Mathew __RT_SVC_DESCS_END__ = .; 92c11ba852SSoby Mathew 93c11ba852SSoby Mathew /* 94c11ba852SSoby Mathew * Ensure 4-byte alignment for cpu_ops so that its fields are also 95c11ba852SSoby Mathew * aligned. Also ensure cpu_ops inclusion. 96c11ba852SSoby Mathew */ 97c11ba852SSoby Mathew . = ALIGN(4); 98c11ba852SSoby Mathew __CPU_OPS_START__ = .; 99c11ba852SSoby Mathew KEEP(*(cpu_ops)) 100c11ba852SSoby Mathew __CPU_OPS_END__ = .; 101c11ba852SSoby Mathew 102*3bdf0e5dSYatharth Kochar *(.vectors) 103c11ba852SSoby Mathew __RO_END_UNALIGNED__ = .; 104c11ba852SSoby Mathew 105c11ba852SSoby Mathew /* 106c11ba852SSoby Mathew * Memory page(s) mapped to this section will be marked as 107c11ba852SSoby Mathew * read-only, executable. No RW data from the next section must 108c11ba852SSoby Mathew * creep in. Ensure the rest of the current memory block is unused. 109c11ba852SSoby Mathew */ 110c11ba852SSoby Mathew . = NEXT(4096); 111c11ba852SSoby Mathew __RO_END__ = .; 112c11ba852SSoby Mathew } >RAM 113c11ba852SSoby Mathew#endif 114c11ba852SSoby Mathew 115c11ba852SSoby Mathew ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__, 116c11ba852SSoby Mathew "cpu_ops not defined for this platform.") 117c11ba852SSoby Mathew /* 118c11ba852SSoby Mathew * Define a linker symbol to mark start of the RW memory area for this 119c11ba852SSoby Mathew * image. 120c11ba852SSoby Mathew */ 121c11ba852SSoby Mathew __RW_START__ = . ; 122c11ba852SSoby Mathew 123c11ba852SSoby Mathew .data . : { 124c11ba852SSoby Mathew __DATA_START__ = .; 125c11ba852SSoby Mathew *(.data*) 126c11ba852SSoby Mathew __DATA_END__ = .; 127c11ba852SSoby Mathew } >RAM 128c11ba852SSoby Mathew 129c11ba852SSoby Mathew stacks (NOLOAD) : { 130c11ba852SSoby Mathew __STACKS_START__ = .; 131c11ba852SSoby Mathew *(tzfw_normal_stacks) 132c11ba852SSoby Mathew __STACKS_END__ = .; 133c11ba852SSoby Mathew } >RAM 134c11ba852SSoby Mathew 135c11ba852SSoby Mathew /* 136c11ba852SSoby Mathew * The .bss section gets initialised to 0 at runtime. 137c11ba852SSoby Mathew * Its base address must be 16-byte aligned. 138c11ba852SSoby Mathew */ 139c11ba852SSoby Mathew .bss (NOLOAD) : ALIGN(16) { 140c11ba852SSoby Mathew __BSS_START__ = .; 141c11ba852SSoby Mathew *(.bss*) 142c11ba852SSoby Mathew *(COMMON) 143c11ba852SSoby Mathew#if !USE_COHERENT_MEM 144c11ba852SSoby Mathew /* 145c11ba852SSoby Mathew * Bakery locks are stored in normal .bss memory 146c11ba852SSoby Mathew * 147c11ba852SSoby Mathew * Each lock's data is spread across multiple cache lines, one per CPU, 148c11ba852SSoby Mathew * but multiple locks can share the same cache line. 149c11ba852SSoby Mathew * The compiler will allocate enough memory for one CPU's bakery locks, 150c11ba852SSoby Mathew * the remaining cache lines are allocated by the linker script 151c11ba852SSoby Mathew */ 152c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 153c11ba852SSoby Mathew __BAKERY_LOCK_START__ = .; 154c11ba852SSoby Mathew *(bakery_lock) 155c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 156c11ba852SSoby Mathew __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__); 157c11ba852SSoby Mathew . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 158c11ba852SSoby Mathew __BAKERY_LOCK_END__ = .; 159c11ba852SSoby Mathew#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE 160c11ba852SSoby Mathew ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE, 161c11ba852SSoby Mathew "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements"); 162c11ba852SSoby Mathew#endif 163c11ba852SSoby Mathew#endif 164c11ba852SSoby Mathew 165c11ba852SSoby Mathew#if ENABLE_PMF 166c11ba852SSoby Mathew /* 167c11ba852SSoby Mathew * Time-stamps are stored in normal .bss memory 168c11ba852SSoby Mathew * 169c11ba852SSoby Mathew * The compiler will allocate enough memory for one CPU's time-stamps, 170c11ba852SSoby Mathew * the remaining memory for other CPU's is allocated by the 171c11ba852SSoby Mathew * linker script 172c11ba852SSoby Mathew */ 173c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 174c11ba852SSoby Mathew __PMF_TIMESTAMP_START__ = .; 175c11ba852SSoby Mathew KEEP(*(pmf_timestamp_array)) 176c11ba852SSoby Mathew . = ALIGN(CACHE_WRITEBACK_GRANULE); 177c11ba852SSoby Mathew __PMF_PERCPU_TIMESTAMP_END__ = .; 178c11ba852SSoby Mathew __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); 179c11ba852SSoby Mathew . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 180c11ba852SSoby Mathew __PMF_TIMESTAMP_END__ = .; 181c11ba852SSoby Mathew#endif /* ENABLE_PMF */ 182c11ba852SSoby Mathew 183c11ba852SSoby Mathew __BSS_END__ = .; 184c11ba852SSoby Mathew } >RAM 185c11ba852SSoby Mathew 186c11ba852SSoby Mathew /* 187c11ba852SSoby Mathew * The xlat_table section is for full, aligned page tables (4K). 188c11ba852SSoby Mathew * Removing them from .bss avoids forcing 4K alignment on 189c11ba852SSoby Mathew * the .bss section and eliminates the unecessary zero init 190c11ba852SSoby Mathew */ 191c11ba852SSoby Mathew xlat_table (NOLOAD) : { 192c11ba852SSoby Mathew *(xlat_table) 193c11ba852SSoby Mathew } >RAM 194c11ba852SSoby Mathew 195c11ba852SSoby Mathew __BSS_SIZE__ = SIZEOF(.bss); 196c11ba852SSoby Mathew 197c11ba852SSoby Mathew#if USE_COHERENT_MEM 198c11ba852SSoby Mathew /* 199c11ba852SSoby Mathew * The base address of the coherent memory section must be page-aligned (4K) 200c11ba852SSoby Mathew * to guarantee that the coherent data are stored on their own pages and 201c11ba852SSoby Mathew * are not mixed with normal data. This is required to set up the correct 202c11ba852SSoby Mathew * memory attributes for the coherent data page tables. 203c11ba852SSoby Mathew */ 204c11ba852SSoby Mathew coherent_ram (NOLOAD) : ALIGN(4096) { 205c11ba852SSoby Mathew __COHERENT_RAM_START__ = .; 206c11ba852SSoby Mathew /* 207c11ba852SSoby Mathew * Bakery locks are stored in coherent memory 208c11ba852SSoby Mathew * 209c11ba852SSoby Mathew * Each lock's data is contiguous and fully allocated by the compiler 210c11ba852SSoby Mathew */ 211c11ba852SSoby Mathew *(bakery_lock) 212c11ba852SSoby Mathew *(tzfw_coherent_mem) 213c11ba852SSoby Mathew __COHERENT_RAM_END_UNALIGNED__ = .; 214c11ba852SSoby Mathew /* 215c11ba852SSoby Mathew * Memory page(s) mapped to this section will be marked 216c11ba852SSoby Mathew * as device memory. No other unexpected data must creep in. 217c11ba852SSoby Mathew * Ensure the rest of the current memory page is unused. 218c11ba852SSoby Mathew */ 219c11ba852SSoby Mathew . = NEXT(4096); 220c11ba852SSoby Mathew __COHERENT_RAM_END__ = .; 221c11ba852SSoby Mathew } >RAM 222c11ba852SSoby Mathew 223c11ba852SSoby Mathew __COHERENT_RAM_UNALIGNED_SIZE__ = 224c11ba852SSoby Mathew __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__; 225c11ba852SSoby Mathew#endif 226c11ba852SSoby Mathew 227c11ba852SSoby Mathew /* 228c11ba852SSoby Mathew * Define a linker symbol to mark end of the RW memory area for this 229c11ba852SSoby Mathew * image. 230c11ba852SSoby Mathew */ 231c11ba852SSoby Mathew __RW_END__ = .; 232c11ba852SSoby Mathew 233c11ba852SSoby Mathew __BL32_END__ = .; 234c11ba852SSoby Mathew} 235