14f6ad66aSAchin Gupta/* 2a31d8983SYatharth Kochar * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. 34f6ad66aSAchin Gupta * 44f6ad66aSAchin Gupta * Redistribution and use in source and binary forms, with or without 54f6ad66aSAchin Gupta * modification, are permitted provided that the following conditions are met: 64f6ad66aSAchin Gupta * 74f6ad66aSAchin Gupta * Redistributions of source code must retain the above copyright notice, this 84f6ad66aSAchin Gupta * list of conditions and the following disclaimer. 94f6ad66aSAchin Gupta * 104f6ad66aSAchin Gupta * Redistributions in binary form must reproduce the above copyright notice, 114f6ad66aSAchin Gupta * this list of conditions and the following disclaimer in the documentation 124f6ad66aSAchin Gupta * and/or other materials provided with the distribution. 134f6ad66aSAchin Gupta * 144f6ad66aSAchin Gupta * Neither the name of ARM nor the names of its contributors may be used 154f6ad66aSAchin Gupta * to endorse or promote products derived from this software without specific 164f6ad66aSAchin Gupta * prior written permission. 174f6ad66aSAchin Gupta * 184f6ad66aSAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 194f6ad66aSAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 204f6ad66aSAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 214f6ad66aSAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 224f6ad66aSAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 234f6ad66aSAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 244f6ad66aSAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 254f6ad66aSAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 264f6ad66aSAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 274f6ad66aSAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 284f6ad66aSAchin Gupta * POSSIBILITY OF SUCH DAMAGE. 294f6ad66aSAchin Gupta */ 304f6ad66aSAchin Gupta 315f0cdb05SDan Handley#include <platform_def.h> 324f6ad66aSAchin Gupta 334f6ad66aSAchin GuptaOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) 344f6ad66aSAchin GuptaOUTPUT_ARCH(PLATFORM_LINKER_ARCH) 359f98aa1aSJeenu ViswambharanENTRY(bl31_entrypoint) 364f6ad66aSAchin Gupta 374f6ad66aSAchin Gupta 384f6ad66aSAchin GuptaMEMORY { 39d7fbf132SJuan Castillo RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE 404f6ad66aSAchin Gupta} 414f6ad66aSAchin Gupta 424f6ad66aSAchin Gupta 434f6ad66aSAchin GuptaSECTIONS 444f6ad66aSAchin Gupta{ 454f6ad66aSAchin Gupta . = BL31_BASE; 468d69a03fSSandrine Bailleux ASSERT(. == ALIGN(4096), 478d69a03fSSandrine Bailleux "BL31_BASE address is not aligned on a page boundary.") 484f6ad66aSAchin Gupta 49*5d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA 50*5d1c104fSSandrine Bailleux .text . : { 51*5d1c104fSSandrine Bailleux __TEXT_START__ = .; 52*5d1c104fSSandrine Bailleux *bl31_entrypoint.o(.text*) 53*5d1c104fSSandrine Bailleux *(.text*) 54*5d1c104fSSandrine Bailleux *(.vectors) 55*5d1c104fSSandrine Bailleux . = NEXT(4096); 56*5d1c104fSSandrine Bailleux __TEXT_END__ = .; 57*5d1c104fSSandrine Bailleux } >RAM 58*5d1c104fSSandrine Bailleux 59*5d1c104fSSandrine Bailleux .rodata . : { 60*5d1c104fSSandrine Bailleux __RODATA_START__ = .; 61*5d1c104fSSandrine Bailleux *(.rodata*) 62*5d1c104fSSandrine Bailleux 63*5d1c104fSSandrine Bailleux /* Ensure 8-byte alignment for descriptors and ensure inclusion */ 64*5d1c104fSSandrine Bailleux . = ALIGN(8); 65*5d1c104fSSandrine Bailleux __RT_SVC_DESCS_START__ = .; 66*5d1c104fSSandrine Bailleux KEEP(*(rt_svc_descs)) 67*5d1c104fSSandrine Bailleux __RT_SVC_DESCS_END__ = .; 68*5d1c104fSSandrine Bailleux 69*5d1c104fSSandrine Bailleux#if ENABLE_PMF 70*5d1c104fSSandrine Bailleux /* Ensure 8-byte alignment for descriptors and ensure inclusion */ 71*5d1c104fSSandrine Bailleux . = ALIGN(8); 72*5d1c104fSSandrine Bailleux __PMF_SVC_DESCS_START__ = .; 73*5d1c104fSSandrine Bailleux KEEP(*(pmf_svc_descs)) 74*5d1c104fSSandrine Bailleux __PMF_SVC_DESCS_END__ = .; 75*5d1c104fSSandrine Bailleux#endif /* ENABLE_PMF */ 76*5d1c104fSSandrine Bailleux 77*5d1c104fSSandrine Bailleux /* 78*5d1c104fSSandrine Bailleux * Ensure 8-byte alignment for cpu_ops so that its fields are also 79*5d1c104fSSandrine Bailleux * aligned. Also ensure cpu_ops inclusion. 80*5d1c104fSSandrine Bailleux */ 81*5d1c104fSSandrine Bailleux . = ALIGN(8); 82*5d1c104fSSandrine Bailleux __CPU_OPS_START__ = .; 83*5d1c104fSSandrine Bailleux KEEP(*(cpu_ops)) 84*5d1c104fSSandrine Bailleux __CPU_OPS_END__ = .; 85*5d1c104fSSandrine Bailleux 86*5d1c104fSSandrine Bailleux . = NEXT(4096); 87*5d1c104fSSandrine Bailleux __RODATA_END__ = .; 88*5d1c104fSSandrine Bailleux } >RAM 89*5d1c104fSSandrine Bailleux#else 908d69a03fSSandrine Bailleux ro . : { 918d69a03fSSandrine Bailleux __RO_START__ = .; 92dccc537aSAndrew Thoelke *bl31_entrypoint.o(.text*) 93dccc537aSAndrew Thoelke *(.text*) 948d69a03fSSandrine Bailleux *(.rodata*) 957421b465SAchin Gupta 96dccc537aSAndrew Thoelke /* Ensure 8-byte alignment for descriptors and ensure inclusion */ 977421b465SAchin Gupta . = ALIGN(8); 987421b465SAchin Gupta __RT_SVC_DESCS_START__ = .; 99dccc537aSAndrew Thoelke KEEP(*(rt_svc_descs)) 1007421b465SAchin Gupta __RT_SVC_DESCS_END__ = .; 1017421b465SAchin Gupta 102a31d8983SYatharth Kochar#if ENABLE_PMF 103a31d8983SYatharth Kochar /* Ensure 8-byte alignment for descriptors and ensure inclusion */ 104a31d8983SYatharth Kochar . = ALIGN(8); 105a31d8983SYatharth Kochar __PMF_SVC_DESCS_START__ = .; 106a31d8983SYatharth Kochar KEEP(*(pmf_svc_descs)) 107a31d8983SYatharth Kochar __PMF_SVC_DESCS_END__ = .; 108a31d8983SYatharth Kochar#endif /* ENABLE_PMF */ 109a31d8983SYatharth Kochar 1109b476841SSoby Mathew /* 1119b476841SSoby Mathew * Ensure 8-byte alignment for cpu_ops so that its fields are also 1129b476841SSoby Mathew * aligned. Also ensure cpu_ops inclusion. 1139b476841SSoby Mathew */ 1149b476841SSoby Mathew . = ALIGN(8); 1159b476841SSoby Mathew __CPU_OPS_START__ = .; 1169b476841SSoby Mathew KEEP(*(cpu_ops)) 1179b476841SSoby Mathew __CPU_OPS_END__ = .; 1189b476841SSoby Mathew 119b739f22aSAchin Gupta *(.vectors) 1208d69a03fSSandrine Bailleux __RO_END_UNALIGNED__ = .; 1218d69a03fSSandrine Bailleux /* 1228d69a03fSSandrine Bailleux * Memory page(s) mapped to this section will be marked as read-only, 1238d69a03fSSandrine Bailleux * executable. No RW data from the next section must creep in. 1248d69a03fSSandrine Bailleux * Ensure the rest of the current memory page is unused. 1258d69a03fSSandrine Bailleux */ 1268d69a03fSSandrine Bailleux . = NEXT(4096); 1278d69a03fSSandrine Bailleux __RO_END__ = .; 1284f6ad66aSAchin Gupta } >RAM 129*5d1c104fSSandrine Bailleux#endif 1304f6ad66aSAchin Gupta 1319b476841SSoby Mathew ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__, 1329b476841SSoby Mathew "cpu_ops not defined for this platform.") 1339b476841SSoby Mathew 13454dc71e7SAchin Gupta /* 13554dc71e7SAchin Gupta * Define a linker symbol to mark start of the RW memory area for this 13654dc71e7SAchin Gupta * image. 13754dc71e7SAchin Gupta */ 13854dc71e7SAchin Gupta __RW_START__ = . ; 13954dc71e7SAchin Gupta 1408d69a03fSSandrine Bailleux .data . : { 1418d69a03fSSandrine Bailleux __DATA_START__ = .; 142dccc537aSAndrew Thoelke *(.data*) 1438d69a03fSSandrine Bailleux __DATA_END__ = .; 1448d69a03fSSandrine Bailleux } >RAM 1458d69a03fSSandrine Bailleux 146a1b6db6cSSandrine Bailleux#ifdef BL31_PROGBITS_LIMIT 147d178637dSJuan Castillo ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.") 148a1b6db6cSSandrine Bailleux#endif 149a1b6db6cSSandrine Bailleux 1508d69a03fSSandrine Bailleux stacks (NOLOAD) : { 1518d69a03fSSandrine Bailleux __STACKS_START__ = .; 1524f6ad66aSAchin Gupta *(tzfw_normal_stacks) 1538d69a03fSSandrine Bailleux __STACKS_END__ = .; 1544f6ad66aSAchin Gupta } >RAM 1554f6ad66aSAchin Gupta 1568d69a03fSSandrine Bailleux /* 1578d69a03fSSandrine Bailleux * The .bss section gets initialised to 0 at runtime. 1588d69a03fSSandrine Bailleux * Its base address must be 16-byte aligned. 1598d69a03fSSandrine Bailleux */ 160ee7b35c4SAndrew Thoelke .bss (NOLOAD) : ALIGN(16) { 1618d69a03fSSandrine Bailleux __BSS_START__ = .; 162dccc537aSAndrew Thoelke *(.bss*) 1634f6ad66aSAchin Gupta *(COMMON) 164ee7b35c4SAndrew Thoelke#if !USE_COHERENT_MEM 165ee7b35c4SAndrew Thoelke /* 166ee7b35c4SAndrew Thoelke * Bakery locks are stored in normal .bss memory 167ee7b35c4SAndrew Thoelke * 168ee7b35c4SAndrew Thoelke * Each lock's data is spread across multiple cache lines, one per CPU, 169ee7b35c4SAndrew Thoelke * but multiple locks can share the same cache line. 170ee7b35c4SAndrew Thoelke * The compiler will allocate enough memory for one CPU's bakery locks, 171ee7b35c4SAndrew Thoelke * the remaining cache lines are allocated by the linker script 172ee7b35c4SAndrew Thoelke */ 173ee7b35c4SAndrew Thoelke . = ALIGN(CACHE_WRITEBACK_GRANULE); 174ee7b35c4SAndrew Thoelke __BAKERY_LOCK_START__ = .; 175ee7b35c4SAndrew Thoelke *(bakery_lock) 176ee7b35c4SAndrew Thoelke . = ALIGN(CACHE_WRITEBACK_GRANULE); 1777173f5f6SVikram Kanigiri __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__); 178ee7b35c4SAndrew Thoelke . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 179ee7b35c4SAndrew Thoelke __BAKERY_LOCK_END__ = .; 180ee7b35c4SAndrew Thoelke#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE 181ee7b35c4SAndrew Thoelke ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE, 182ee7b35c4SAndrew Thoelke "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements"); 183ee7b35c4SAndrew Thoelke#endif 184ee7b35c4SAndrew Thoelke#endif 185a31d8983SYatharth Kochar 186a31d8983SYatharth Kochar#if ENABLE_PMF 187a31d8983SYatharth Kochar /* 188a31d8983SYatharth Kochar * Time-stamps are stored in normal .bss memory 189a31d8983SYatharth Kochar * 190a31d8983SYatharth Kochar * The compiler will allocate enough memory for one CPU's time-stamps, 191a31d8983SYatharth Kochar * the remaining memory for other CPU's is allocated by the 192a31d8983SYatharth Kochar * linker script 193a31d8983SYatharth Kochar */ 194a31d8983SYatharth Kochar . = ALIGN(CACHE_WRITEBACK_GRANULE); 195a31d8983SYatharth Kochar __PMF_TIMESTAMP_START__ = .; 196a31d8983SYatharth Kochar KEEP(*(pmf_timestamp_array)) 197a31d8983SYatharth Kochar . = ALIGN(CACHE_WRITEBACK_GRANULE); 198a31d8983SYatharth Kochar __PMF_PERCPU_TIMESTAMP_END__ = .; 199a31d8983SYatharth Kochar __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); 200a31d8983SYatharth Kochar . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 201a31d8983SYatharth Kochar __PMF_TIMESTAMP_END__ = .; 202a31d8983SYatharth Kochar#endif /* ENABLE_PMF */ 2038d69a03fSSandrine Bailleux __BSS_END__ = .; 2044f6ad66aSAchin Gupta } >RAM 2054f6ad66aSAchin Gupta 2068d69a03fSSandrine Bailleux /* 207e3fff153SJeenu Viswambharan * The xlat_table section is for full, aligned page tables (4K). 208a0cd989dSAchin Gupta * Removing them from .bss avoids forcing 4K alignment on 209a0cd989dSAchin Gupta * the .bss section and eliminates the unecessary zero init 210a0cd989dSAchin Gupta */ 211a0cd989dSAchin Gupta xlat_table (NOLOAD) : { 212a0cd989dSAchin Gupta *(xlat_table) 213a0cd989dSAchin Gupta } >RAM 214a0cd989dSAchin Gupta 215ab8707e6SSoby Mathew#if USE_COHERENT_MEM 216a0cd989dSAchin Gupta /* 2178d69a03fSSandrine Bailleux * The base address of the coherent memory section must be page-aligned (4K) 2188d69a03fSSandrine Bailleux * to guarantee that the coherent data are stored on their own pages and 2198d69a03fSSandrine Bailleux * are not mixed with normal data. This is required to set up the correct 2208d69a03fSSandrine Bailleux * memory attributes for the coherent data page tables. 2218d69a03fSSandrine Bailleux */ 2228d69a03fSSandrine Bailleux coherent_ram (NOLOAD) : ALIGN(4096) { 2238d69a03fSSandrine Bailleux __COHERENT_RAM_START__ = .; 224ee7b35c4SAndrew Thoelke /* 225ee7b35c4SAndrew Thoelke * Bakery locks are stored in coherent memory 226ee7b35c4SAndrew Thoelke * 227ee7b35c4SAndrew Thoelke * Each lock's data is contiguous and fully allocated by the compiler 228ee7b35c4SAndrew Thoelke */ 229ee7b35c4SAndrew Thoelke *(bakery_lock) 2308d69a03fSSandrine Bailleux *(tzfw_coherent_mem) 2318d69a03fSSandrine Bailleux __COHERENT_RAM_END_UNALIGNED__ = .; 2328d69a03fSSandrine Bailleux /* 2338d69a03fSSandrine Bailleux * Memory page(s) mapped to this section will be marked 2348d69a03fSSandrine Bailleux * as device memory. No other unexpected data must creep in. 2358d69a03fSSandrine Bailleux * Ensure the rest of the current memory page is unused. 2368d69a03fSSandrine Bailleux */ 2378d69a03fSSandrine Bailleux . = NEXT(4096); 2388d69a03fSSandrine Bailleux __COHERENT_RAM_END__ = .; 2394f6ad66aSAchin Gupta } >RAM 240ab8707e6SSoby Mathew#endif 2414f6ad66aSAchin Gupta 24254dc71e7SAchin Gupta /* 24354dc71e7SAchin Gupta * Define a linker symbol to mark end of the RW memory area for this 24454dc71e7SAchin Gupta * image. 24554dc71e7SAchin Gupta */ 24654dc71e7SAchin Gupta __RW_END__ = .; 2478d69a03fSSandrine Bailleux __BL31_END__ = .; 2484f6ad66aSAchin Gupta 2498d69a03fSSandrine Bailleux __BSS_SIZE__ = SIZEOF(.bss); 250ab8707e6SSoby Mathew#if USE_COHERENT_MEM 2518d69a03fSSandrine Bailleux __COHERENT_RAM_UNALIGNED_SIZE__ = 2528d69a03fSSandrine Bailleux __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__; 253ab8707e6SSoby Mathew#endif 2544f6ad66aSAchin Gupta 255d178637dSJuan Castillo ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.") 2564f6ad66aSAchin Gupta} 257