xref: /rk3399_ARM-atf/bl1/bl1.ld.S (revision 51faada71a219a8b94cd8d8e423f0f22e9da4d8f)
14f6ad66aSAchin Gupta/*
2308d359bSDouglas Raillard * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
34f6ad66aSAchin Gupta *
44f6ad66aSAchin Gupta * Redistribution and use in source and binary forms, with or without
54f6ad66aSAchin Gupta * modification, are permitted provided that the following conditions are met:
64f6ad66aSAchin Gupta *
74f6ad66aSAchin Gupta * Redistributions of source code must retain the above copyright notice, this
84f6ad66aSAchin Gupta * list of conditions and the following disclaimer.
94f6ad66aSAchin Gupta *
104f6ad66aSAchin Gupta * Redistributions in binary form must reproduce the above copyright notice,
114f6ad66aSAchin Gupta * this list of conditions and the following disclaimer in the documentation
124f6ad66aSAchin Gupta * and/or other materials provided with the distribution.
134f6ad66aSAchin Gupta *
144f6ad66aSAchin Gupta * Neither the name of ARM nor the names of its contributors may be used
154f6ad66aSAchin Gupta * to endorse or promote products derived from this software without specific
164f6ad66aSAchin Gupta * prior written permission.
174f6ad66aSAchin Gupta *
184f6ad66aSAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
194f6ad66aSAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
204f6ad66aSAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
214f6ad66aSAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
224f6ad66aSAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
234f6ad66aSAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
244f6ad66aSAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
254f6ad66aSAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
264f6ad66aSAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
274f6ad66aSAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
284f6ad66aSAchin Gupta * POSSIBILITY OF SUCH DAMAGE.
294f6ad66aSAchin Gupta */
304f6ad66aSAchin Gupta
315f0cdb05SDan Handley#include <platform_def.h>
324f6ad66aSAchin Gupta
334f6ad66aSAchin GuptaOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
344f6ad66aSAchin GuptaOUTPUT_ARCH(PLATFORM_LINKER_ARCH)
359f98aa1aSJeenu ViswambharanENTRY(bl1_entrypoint)
364f6ad66aSAchin Gupta
374f6ad66aSAchin GuptaMEMORY {
38d7fbf132SJuan Castillo    ROM (rx): ORIGIN = BL1_RO_BASE, LENGTH = BL1_RO_LIMIT - BL1_RO_BASE
39d7fbf132SJuan Castillo    RAM (rwx): ORIGIN = BL1_RW_BASE, LENGTH = BL1_RW_LIMIT - BL1_RW_BASE
404f6ad66aSAchin Gupta}
414f6ad66aSAchin Gupta
424f6ad66aSAchin GuptaSECTIONS
434f6ad66aSAchin Gupta{
444f59d835SSandrine Bailleux    . = BL1_RO_BASE;
454f59d835SSandrine Bailleux    ASSERT(. == ALIGN(4096),
464f59d835SSandrine Bailleux           "BL1_RO_BASE address is not aligned on a page boundary.")
474f59d835SSandrine Bailleux
485d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA
495d1c104fSSandrine Bailleux    .text . : {
505d1c104fSSandrine Bailleux        __TEXT_START__ = .;
515d1c104fSSandrine Bailleux        *bl1_entrypoint.o(.text*)
525d1c104fSSandrine Bailleux        *(.text*)
535d1c104fSSandrine Bailleux        *(.vectors)
545d1c104fSSandrine Bailleux        . = NEXT(4096);
555d1c104fSSandrine Bailleux        __TEXT_END__ = .;
565d1c104fSSandrine Bailleux     } >ROM
575d1c104fSSandrine Bailleux
585d1c104fSSandrine Bailleux    .rodata . : {
595d1c104fSSandrine Bailleux        __RODATA_START__ = .;
605d1c104fSSandrine Bailleux        *(.rodata*)
615d1c104fSSandrine Bailleux
625d1c104fSSandrine Bailleux        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
635d1c104fSSandrine Bailleux        . = ALIGN(8);
645d1c104fSSandrine Bailleux        __PARSER_LIB_DESCS_START__ = .;
655d1c104fSSandrine Bailleux        KEEP(*(.img_parser_lib_descs))
665d1c104fSSandrine Bailleux        __PARSER_LIB_DESCS_END__ = .;
675d1c104fSSandrine Bailleux
685d1c104fSSandrine Bailleux        /*
695d1c104fSSandrine Bailleux         * Ensure 8-byte alignment for cpu_ops so that its fields are also
705d1c104fSSandrine Bailleux         * aligned. Also ensure cpu_ops inclusion.
715d1c104fSSandrine Bailleux         */
725d1c104fSSandrine Bailleux        . = ALIGN(8);
735d1c104fSSandrine Bailleux        __CPU_OPS_START__ = .;
745d1c104fSSandrine Bailleux        KEEP(*(cpu_ops))
755d1c104fSSandrine Bailleux        __CPU_OPS_END__ = .;
765d1c104fSSandrine Bailleux
775d1c104fSSandrine Bailleux        /*
785d1c104fSSandrine Bailleux         * No need to pad out the .rodata section to a page boundary. Next is
795d1c104fSSandrine Bailleux         * the .data section, which can mapped in ROM with the same memory
805d1c104fSSandrine Bailleux         * attributes as the .rodata section.
815d1c104fSSandrine Bailleux         */
825d1c104fSSandrine Bailleux        __RODATA_END__ = .;
835d1c104fSSandrine Bailleux    } >ROM
845d1c104fSSandrine Bailleux#else
854f59d835SSandrine Bailleux    ro . : {
868d69a03fSSandrine Bailleux        __RO_START__ = .;
87dccc537aSAndrew Thoelke        *bl1_entrypoint.o(.text*)
88dccc537aSAndrew Thoelke        *(.text*)
898d69a03fSSandrine Bailleux        *(.rodata*)
909b476841SSoby Mathew
9105799ae0SJuan Castillo        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
9205799ae0SJuan Castillo        . = ALIGN(8);
9305799ae0SJuan Castillo        __PARSER_LIB_DESCS_START__ = .;
9405799ae0SJuan Castillo        KEEP(*(.img_parser_lib_descs))
9505799ae0SJuan Castillo        __PARSER_LIB_DESCS_END__ = .;
9605799ae0SJuan Castillo
979b476841SSoby Mathew        /*
989b476841SSoby Mathew         * Ensure 8-byte alignment for cpu_ops so that its fields are also
999b476841SSoby Mathew         * aligned. Also ensure cpu_ops inclusion.
1009b476841SSoby Mathew         */
1019b476841SSoby Mathew        . = ALIGN(8);
1029b476841SSoby Mathew        __CPU_OPS_START__ = .;
1039b476841SSoby Mathew        KEEP(*(cpu_ops))
1049b476841SSoby Mathew        __CPU_OPS_END__ = .;
1059b476841SSoby Mathew
106b739f22aSAchin Gupta        *(.vectors)
1078d69a03fSSandrine Bailleux        __RO_END__ = .;
1084f6ad66aSAchin Gupta    } >ROM
1095d1c104fSSandrine Bailleux#endif
1104f6ad66aSAchin Gupta
1119b476841SSoby Mathew    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
1129b476841SSoby Mathew           "cpu_ops not defined for this platform.")
1139b476841SSoby Mathew
114*51faada7SDouglas Raillard    . = BL1_RW_BASE;
115*51faada7SDouglas Raillard    ASSERT(BL1_RW_BASE == ALIGN(4096),
116*51faada7SDouglas Raillard           "BL1_RW_BASE address is not aligned on a page boundary.")
117*51faada7SDouglas Raillard
1188d69a03fSSandrine Bailleux    /*
1198d69a03fSSandrine Bailleux     * The .data section gets copied from ROM to RAM at runtime.
120*51faada7SDouglas Raillard     * Its LMA should be 16-byte aligned to allow efficient copying of 16-bytes
121*51faada7SDouglas Raillard     * aligned regions in it.
1224f59d835SSandrine Bailleux     * Its VMA must be page-aligned as it marks the first read/write page.
123*51faada7SDouglas Raillard     *
124*51faada7SDouglas Raillard     * It must be placed at a lower address than the stacks if the stack
125*51faada7SDouglas Raillard     * protector is enabled. Alternatively, the .data.stack_protector_canary
126*51faada7SDouglas Raillard     * section can be placed independently of the main .data section.
1278d69a03fSSandrine Bailleux     */
1284f59d835SSandrine Bailleux    .data . : ALIGN(16) {
1294f6ad66aSAchin Gupta        __DATA_RAM_START__ = .;
130dccc537aSAndrew Thoelke        *(.data*)
1318d69a03fSSandrine Bailleux        __DATA_RAM_END__ = .;
1324f6ad66aSAchin Gupta    } >RAM AT>ROM
1334f6ad66aSAchin Gupta
1344f59d835SSandrine Bailleux    stacks . (NOLOAD) : {
1358d69a03fSSandrine Bailleux        __STACKS_START__ = .;
1364f6ad66aSAchin Gupta        *(tzfw_normal_stacks)
1378d69a03fSSandrine Bailleux        __STACKS_END__ = .;
1384f6ad66aSAchin Gupta    } >RAM
1394f6ad66aSAchin Gupta
1408d69a03fSSandrine Bailleux    /*
1418d69a03fSSandrine Bailleux     * The .bss section gets initialised to 0 at runtime.
142308d359bSDouglas Raillard     * Its base address should be 16-byte aligned for better performance of the
143308d359bSDouglas Raillard     * zero-initialization code.
1448d69a03fSSandrine Bailleux     */
1458d69a03fSSandrine Bailleux    .bss : ALIGN(16) {
1468d69a03fSSandrine Bailleux        __BSS_START__ = .;
147dccc537aSAndrew Thoelke        *(.bss*)
1488d69a03fSSandrine Bailleux        *(COMMON)
1498d69a03fSSandrine Bailleux        __BSS_END__ = .;
1508d69a03fSSandrine Bailleux    } >RAM
1514f6ad66aSAchin Gupta
1528d69a03fSSandrine Bailleux    /*
153a0cd989dSAchin Gupta     * The xlat_table section is for full, aligned page tables (4K).
15474cbb839SJeenu Viswambharan     * Removing them from .bss avoids forcing 4K alignment on
15574cbb839SJeenu Viswambharan     * the .bss section and eliminates the unecessary zero init
15674cbb839SJeenu Viswambharan     */
15774cbb839SJeenu Viswambharan    xlat_table (NOLOAD) : {
15874cbb839SJeenu Viswambharan        *(xlat_table)
15974cbb839SJeenu Viswambharan    } >RAM
16074cbb839SJeenu Viswambharan
161ab8707e6SSoby Mathew#if USE_COHERENT_MEM
16274cbb839SJeenu Viswambharan    /*
1638d69a03fSSandrine Bailleux     * The base address of the coherent memory section must be page-aligned (4K)
1648d69a03fSSandrine Bailleux     * to guarantee that the coherent data are stored on their own pages and
1658d69a03fSSandrine Bailleux     * are not mixed with normal data.  This is required to set up the correct
1668d69a03fSSandrine Bailleux     * memory attributes for the coherent data page tables.
1678d69a03fSSandrine Bailleux     */
1688d69a03fSSandrine Bailleux    coherent_ram (NOLOAD) : ALIGN(4096) {
1698d69a03fSSandrine Bailleux        __COHERENT_RAM_START__ = .;
1708d69a03fSSandrine Bailleux        *(tzfw_coherent_mem)
1718d69a03fSSandrine Bailleux        __COHERENT_RAM_END_UNALIGNED__ = .;
1728d69a03fSSandrine Bailleux        /*
1738d69a03fSSandrine Bailleux         * Memory page(s) mapped to this section will be marked
1748d69a03fSSandrine Bailleux         * as device memory.  No other unexpected data must creep in.
1758d69a03fSSandrine Bailleux         * Ensure the rest of the current memory page is unused.
1768d69a03fSSandrine Bailleux         */
1778d69a03fSSandrine Bailleux        . = NEXT(4096);
1788d69a03fSSandrine Bailleux        __COHERENT_RAM_END__ = .;
1798d69a03fSSandrine Bailleux    } >RAM
180ab8707e6SSoby Mathew#endif
1814f6ad66aSAchin Gupta
1828d69a03fSSandrine Bailleux    __BL1_RAM_START__ = ADDR(.data);
1838d69a03fSSandrine Bailleux    __BL1_RAM_END__ = .;
1844f6ad66aSAchin Gupta
1858d69a03fSSandrine Bailleux    __DATA_ROM_START__ = LOADADDR(.data);
1868d69a03fSSandrine Bailleux    __DATA_SIZE__ = SIZEOF(.data);
187c02fcc4aSSandrine Bailleux
188a37255a2SSandrine Bailleux    /*
189a37255a2SSandrine Bailleux     * The .data section is the last PROGBITS section so its end marks the end
190c02fcc4aSSandrine Bailleux     * of BL1's actual content in Trusted ROM.
191a37255a2SSandrine Bailleux     */
192c02fcc4aSSandrine Bailleux    __BL1_ROM_END__ =  __DATA_ROM_START__ + __DATA_SIZE__;
193c02fcc4aSSandrine Bailleux    ASSERT(__BL1_ROM_END__ <= BL1_RO_LIMIT,
194c02fcc4aSSandrine Bailleux           "BL1's ROM content has exceeded its limit.")
1958d69a03fSSandrine Bailleux
1968d69a03fSSandrine Bailleux    __BSS_SIZE__ = SIZEOF(.bss);
1978d69a03fSSandrine Bailleux
198ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1998d69a03fSSandrine Bailleux    __COHERENT_RAM_UNALIGNED_SIZE__ =
2008d69a03fSSandrine Bailleux        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
201ab8707e6SSoby Mathew#endif
2028d69a03fSSandrine Bailleux
203a37255a2SSandrine Bailleux    ASSERT(. <= BL1_RW_LIMIT, "BL1's RW section has exceeded its limit.")
2044f6ad66aSAchin Gupta}
205