14f6ad66aSAchin Gupta/* 2*a31d8983SYatharth Kochar * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. 34f6ad66aSAchin Gupta * 44f6ad66aSAchin Gupta * Redistribution and use in source and binary forms, with or without 54f6ad66aSAchin Gupta * modification, are permitted provided that the following conditions are met: 64f6ad66aSAchin Gupta * 74f6ad66aSAchin Gupta * Redistributions of source code must retain the above copyright notice, this 84f6ad66aSAchin Gupta * list of conditions and the following disclaimer. 94f6ad66aSAchin Gupta * 104f6ad66aSAchin Gupta * Redistributions in binary form must reproduce the above copyright notice, 114f6ad66aSAchin Gupta * this list of conditions and the following disclaimer in the documentation 124f6ad66aSAchin Gupta * and/or other materials provided with the distribution. 134f6ad66aSAchin Gupta * 144f6ad66aSAchin Gupta * Neither the name of ARM nor the names of its contributors may be used 154f6ad66aSAchin Gupta * to endorse or promote products derived from this software without specific 164f6ad66aSAchin Gupta * prior written permission. 174f6ad66aSAchin Gupta * 184f6ad66aSAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 194f6ad66aSAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 204f6ad66aSAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 214f6ad66aSAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 224f6ad66aSAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 234f6ad66aSAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 244f6ad66aSAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 254f6ad66aSAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 264f6ad66aSAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 274f6ad66aSAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 284f6ad66aSAchin Gupta * POSSIBILITY OF SUCH DAMAGE. 294f6ad66aSAchin Gupta */ 304f6ad66aSAchin Gupta 315f0cdb05SDan Handley#include <platform_def.h> 324f6ad66aSAchin Gupta 334f6ad66aSAchin GuptaOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) 344f6ad66aSAchin GuptaOUTPUT_ARCH(PLATFORM_LINKER_ARCH) 359f98aa1aSJeenu ViswambharanENTRY(bl31_entrypoint) 364f6ad66aSAchin Gupta 374f6ad66aSAchin Gupta 384f6ad66aSAchin GuptaMEMORY { 39d7fbf132SJuan Castillo RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE 404f6ad66aSAchin Gupta} 414f6ad66aSAchin Gupta 424f6ad66aSAchin Gupta 434f6ad66aSAchin GuptaSECTIONS 444f6ad66aSAchin Gupta{ 454f6ad66aSAchin Gupta . = BL31_BASE; 468d69a03fSSandrine Bailleux ASSERT(. == ALIGN(4096), 478d69a03fSSandrine Bailleux "BL31_BASE address is not aligned on a page boundary.") 484f6ad66aSAchin Gupta 498d69a03fSSandrine Bailleux ro . : { 508d69a03fSSandrine Bailleux __RO_START__ = .; 51dccc537aSAndrew Thoelke *bl31_entrypoint.o(.text*) 52dccc537aSAndrew Thoelke *(.text*) 538d69a03fSSandrine Bailleux *(.rodata*) 547421b465SAchin Gupta 55dccc537aSAndrew Thoelke /* Ensure 8-byte alignment for descriptors and ensure inclusion */ 567421b465SAchin Gupta . = ALIGN(8); 577421b465SAchin Gupta __RT_SVC_DESCS_START__ = .; 58dccc537aSAndrew Thoelke KEEP(*(rt_svc_descs)) 597421b465SAchin Gupta __RT_SVC_DESCS_END__ = .; 607421b465SAchin Gupta 61*a31d8983SYatharth Kochar#if ENABLE_PMF 62*a31d8983SYatharth Kochar /* Ensure 8-byte alignment for descriptors and ensure inclusion */ 63*a31d8983SYatharth Kochar . = ALIGN(8); 64*a31d8983SYatharth Kochar __PMF_SVC_DESCS_START__ = .; 65*a31d8983SYatharth Kochar KEEP(*(pmf_svc_descs)) 66*a31d8983SYatharth Kochar __PMF_SVC_DESCS_END__ = .; 67*a31d8983SYatharth Kochar#endif /* ENABLE_PMF */ 68*a31d8983SYatharth Kochar 699b476841SSoby Mathew /* 709b476841SSoby Mathew * Ensure 8-byte alignment for cpu_ops so that its fields are also 719b476841SSoby Mathew * aligned. Also ensure cpu_ops inclusion. 729b476841SSoby Mathew */ 739b476841SSoby Mathew . = ALIGN(8); 749b476841SSoby Mathew __CPU_OPS_START__ = .; 759b476841SSoby Mathew KEEP(*(cpu_ops)) 769b476841SSoby Mathew __CPU_OPS_END__ = .; 779b476841SSoby Mathew 78b739f22aSAchin Gupta *(.vectors) 798d69a03fSSandrine Bailleux __RO_END_UNALIGNED__ = .; 808d69a03fSSandrine Bailleux /* 818d69a03fSSandrine Bailleux * Memory page(s) mapped to this section will be marked as read-only, 828d69a03fSSandrine Bailleux * executable. No RW data from the next section must creep in. 838d69a03fSSandrine Bailleux * Ensure the rest of the current memory page is unused. 848d69a03fSSandrine Bailleux */ 858d69a03fSSandrine Bailleux . = NEXT(4096); 868d69a03fSSandrine Bailleux __RO_END__ = .; 874f6ad66aSAchin Gupta } >RAM 884f6ad66aSAchin Gupta 899b476841SSoby Mathew ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__, 909b476841SSoby Mathew "cpu_ops not defined for this platform.") 919b476841SSoby Mathew 9254dc71e7SAchin Gupta /* 9354dc71e7SAchin Gupta * Define a linker symbol to mark start of the RW memory area for this 9454dc71e7SAchin Gupta * image. 9554dc71e7SAchin Gupta */ 9654dc71e7SAchin Gupta __RW_START__ = . ; 9754dc71e7SAchin Gupta 988d69a03fSSandrine Bailleux .data . : { 998d69a03fSSandrine Bailleux __DATA_START__ = .; 100dccc537aSAndrew Thoelke *(.data*) 1018d69a03fSSandrine Bailleux __DATA_END__ = .; 1028d69a03fSSandrine Bailleux } >RAM 1038d69a03fSSandrine Bailleux 104a1b6db6cSSandrine Bailleux#ifdef BL31_PROGBITS_LIMIT 105d178637dSJuan Castillo ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.") 106a1b6db6cSSandrine Bailleux#endif 107a1b6db6cSSandrine Bailleux 1088d69a03fSSandrine Bailleux stacks (NOLOAD) : { 1098d69a03fSSandrine Bailleux __STACKS_START__ = .; 1104f6ad66aSAchin Gupta *(tzfw_normal_stacks) 1118d69a03fSSandrine Bailleux __STACKS_END__ = .; 1124f6ad66aSAchin Gupta } >RAM 1134f6ad66aSAchin Gupta 1148d69a03fSSandrine Bailleux /* 1158d69a03fSSandrine Bailleux * The .bss section gets initialised to 0 at runtime. 1168d69a03fSSandrine Bailleux * Its base address must be 16-byte aligned. 1178d69a03fSSandrine Bailleux */ 118ee7b35c4SAndrew Thoelke .bss (NOLOAD) : ALIGN(16) { 1198d69a03fSSandrine Bailleux __BSS_START__ = .; 120dccc537aSAndrew Thoelke *(.bss*) 1214f6ad66aSAchin Gupta *(COMMON) 122ee7b35c4SAndrew Thoelke#if !USE_COHERENT_MEM 123ee7b35c4SAndrew Thoelke /* 124ee7b35c4SAndrew Thoelke * Bakery locks are stored in normal .bss memory 125ee7b35c4SAndrew Thoelke * 126ee7b35c4SAndrew Thoelke * Each lock's data is spread across multiple cache lines, one per CPU, 127ee7b35c4SAndrew Thoelke * but multiple locks can share the same cache line. 128ee7b35c4SAndrew Thoelke * The compiler will allocate enough memory for one CPU's bakery locks, 129ee7b35c4SAndrew Thoelke * the remaining cache lines are allocated by the linker script 130ee7b35c4SAndrew Thoelke */ 131ee7b35c4SAndrew Thoelke . = ALIGN(CACHE_WRITEBACK_GRANULE); 132ee7b35c4SAndrew Thoelke __BAKERY_LOCK_START__ = .; 133ee7b35c4SAndrew Thoelke *(bakery_lock) 134ee7b35c4SAndrew Thoelke . = ALIGN(CACHE_WRITEBACK_GRANULE); 1357173f5f6SVikram Kanigiri __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__); 136ee7b35c4SAndrew Thoelke . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 137ee7b35c4SAndrew Thoelke __BAKERY_LOCK_END__ = .; 138ee7b35c4SAndrew Thoelke#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE 139ee7b35c4SAndrew Thoelke ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE, 140ee7b35c4SAndrew Thoelke "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements"); 141ee7b35c4SAndrew Thoelke#endif 142ee7b35c4SAndrew Thoelke#endif 143*a31d8983SYatharth Kochar 144*a31d8983SYatharth Kochar#if ENABLE_PMF 145*a31d8983SYatharth Kochar /* 146*a31d8983SYatharth Kochar * Time-stamps are stored in normal .bss memory 147*a31d8983SYatharth Kochar * 148*a31d8983SYatharth Kochar * The compiler will allocate enough memory for one CPU's time-stamps, 149*a31d8983SYatharth Kochar * the remaining memory for other CPU's is allocated by the 150*a31d8983SYatharth Kochar * linker script 151*a31d8983SYatharth Kochar */ 152*a31d8983SYatharth Kochar . = ALIGN(CACHE_WRITEBACK_GRANULE); 153*a31d8983SYatharth Kochar __PMF_TIMESTAMP_START__ = .; 154*a31d8983SYatharth Kochar KEEP(*(pmf_timestamp_array)) 155*a31d8983SYatharth Kochar . = ALIGN(CACHE_WRITEBACK_GRANULE); 156*a31d8983SYatharth Kochar __PMF_PERCPU_TIMESTAMP_END__ = .; 157*a31d8983SYatharth Kochar __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); 158*a31d8983SYatharth Kochar . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); 159*a31d8983SYatharth Kochar __PMF_TIMESTAMP_END__ = .; 160*a31d8983SYatharth Kochar#endif /* ENABLE_PMF */ 1618d69a03fSSandrine Bailleux __BSS_END__ = .; 1624f6ad66aSAchin Gupta } >RAM 1634f6ad66aSAchin Gupta 1648d69a03fSSandrine Bailleux /* 165e3fff153SJeenu Viswambharan * The xlat_table section is for full, aligned page tables (4K). 166a0cd989dSAchin Gupta * Removing them from .bss avoids forcing 4K alignment on 167a0cd989dSAchin Gupta * the .bss section and eliminates the unecessary zero init 168a0cd989dSAchin Gupta */ 169a0cd989dSAchin Gupta xlat_table (NOLOAD) : { 170a0cd989dSAchin Gupta *(xlat_table) 171a0cd989dSAchin Gupta } >RAM 172a0cd989dSAchin Gupta 173ab8707e6SSoby Mathew#if USE_COHERENT_MEM 174a0cd989dSAchin Gupta /* 1758d69a03fSSandrine Bailleux * The base address of the coherent memory section must be page-aligned (4K) 1768d69a03fSSandrine Bailleux * to guarantee that the coherent data are stored on their own pages and 1778d69a03fSSandrine Bailleux * are not mixed with normal data. This is required to set up the correct 1788d69a03fSSandrine Bailleux * memory attributes for the coherent data page tables. 1798d69a03fSSandrine Bailleux */ 1808d69a03fSSandrine Bailleux coherent_ram (NOLOAD) : ALIGN(4096) { 1818d69a03fSSandrine Bailleux __COHERENT_RAM_START__ = .; 182ee7b35c4SAndrew Thoelke /* 183ee7b35c4SAndrew Thoelke * Bakery locks are stored in coherent memory 184ee7b35c4SAndrew Thoelke * 185ee7b35c4SAndrew Thoelke * Each lock's data is contiguous and fully allocated by the compiler 186ee7b35c4SAndrew Thoelke */ 187ee7b35c4SAndrew Thoelke *(bakery_lock) 1888d69a03fSSandrine Bailleux *(tzfw_coherent_mem) 1898d69a03fSSandrine Bailleux __COHERENT_RAM_END_UNALIGNED__ = .; 1908d69a03fSSandrine Bailleux /* 1918d69a03fSSandrine Bailleux * Memory page(s) mapped to this section will be marked 1928d69a03fSSandrine Bailleux * as device memory. No other unexpected data must creep in. 1938d69a03fSSandrine Bailleux * Ensure the rest of the current memory page is unused. 1948d69a03fSSandrine Bailleux */ 1958d69a03fSSandrine Bailleux . = NEXT(4096); 1968d69a03fSSandrine Bailleux __COHERENT_RAM_END__ = .; 1974f6ad66aSAchin Gupta } >RAM 198ab8707e6SSoby Mathew#endif 1994f6ad66aSAchin Gupta 20054dc71e7SAchin Gupta /* 20154dc71e7SAchin Gupta * Define a linker symbol to mark end of the RW memory area for this 20254dc71e7SAchin Gupta * image. 20354dc71e7SAchin Gupta */ 20454dc71e7SAchin Gupta __RW_END__ = .; 2058d69a03fSSandrine Bailleux __BL31_END__ = .; 2064f6ad66aSAchin Gupta 2078d69a03fSSandrine Bailleux __BSS_SIZE__ = SIZEOF(.bss); 208ab8707e6SSoby Mathew#if USE_COHERENT_MEM 2098d69a03fSSandrine Bailleux __COHERENT_RAM_UNALIGNED_SIZE__ = 2108d69a03fSSandrine Bailleux __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__; 211ab8707e6SSoby Mathew#endif 2124f6ad66aSAchin Gupta 213d178637dSJuan Castillo ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.") 2144f6ad66aSAchin Gupta} 215