xref: /rk3399_ARM-atf/bl32/tsp/tsp.ld.S (revision 665e71b8ea28162ec7737c1411bca3ea89e5957e)
17c88f3f6SAchin Gupta/*
2d974301dSMasahiro Yamada * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
37c88f3f6SAchin Gupta *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
57c88f3f6SAchin Gupta */
67c88f3f6SAchin Gupta
7*665e71b8SMasahiro Yamada#include <common/bl_common.ld.h>
809d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h>
95f0cdb05SDan Handley#include <platform_def.h>
107c88f3f6SAchin Gupta
117c88f3f6SAchin GuptaOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
127c88f3f6SAchin GuptaOUTPUT_ARCH(PLATFORM_LINKER_ARCH)
139f98aa1aSJeenu ViswambharanENTRY(tsp_entrypoint)
149f98aa1aSJeenu Viswambharan
157c88f3f6SAchin Gupta
167c88f3f6SAchin GuptaMEMORY {
172467f70fSSandrine Bailleux    RAM (rwx): ORIGIN = TSP_SEC_MEM_BASE, LENGTH = TSP_SEC_MEM_SIZE
187c88f3f6SAchin Gupta}
197c88f3f6SAchin Gupta
207c88f3f6SAchin Gupta
217c88f3f6SAchin GuptaSECTIONS
227c88f3f6SAchin Gupta{
237c88f3f6SAchin Gupta    . = BL32_BASE;
24a2aedac2SAntonio Nino Diaz    ASSERT(. == ALIGN(PAGE_SIZE),
257c88f3f6SAchin Gupta           "BL32_BASE address is not aligned on a page boundary.")
267c88f3f6SAchin Gupta
275d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA
285d1c104fSSandrine Bailleux    .text . : {
295d1c104fSSandrine Bailleux        __TEXT_START__ = .;
305d1c104fSSandrine Bailleux        *tsp_entrypoint.o(.text*)
315d1c104fSSandrine Bailleux        *(.text*)
325d1c104fSSandrine Bailleux        *(.vectors)
335629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
345d1c104fSSandrine Bailleux        __TEXT_END__ = .;
355d1c104fSSandrine Bailleux    } >RAM
365d1c104fSSandrine Bailleux
375d1c104fSSandrine Bailleux    .rodata . : {
385d1c104fSSandrine Bailleux        __RODATA_START__ = .;
395d1c104fSSandrine Bailleux        *(.rodata*)
40d974301dSMasahiro Yamada
41d974301dSMasahiro Yamada        /*
42d974301dSMasahiro Yamada         * Keep the .got section in the RO section as it is patched
43d974301dSMasahiro Yamada         * prior to enabling the MMU and having the .got in RO is better for
44d974301dSMasahiro Yamada         * security. GOT is a table of addresses so ensure 8-byte alignment.
45d974301dSMasahiro Yamada         */
46d974301dSMasahiro Yamada        . = ALIGN(8);
47d974301dSMasahiro Yamada        __GOT_START__ = .;
48d974301dSMasahiro Yamada        *(.got)
49d974301dSMasahiro Yamada        __GOT_END__ = .;
50d974301dSMasahiro Yamada
515629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
525d1c104fSSandrine Bailleux        __RODATA_END__ = .;
535d1c104fSSandrine Bailleux    } >RAM
545d1c104fSSandrine Bailleux#else
557c88f3f6SAchin Gupta    ro . : {
567c88f3f6SAchin Gupta        __RO_START__ = .;
57dccc537aSAndrew Thoelke        *tsp_entrypoint.o(.text*)
58dccc537aSAndrew Thoelke        *(.text*)
597c88f3f6SAchin Gupta        *(.rodata*)
60d974301dSMasahiro Yamada
61d974301dSMasahiro Yamada        /*
62d974301dSMasahiro Yamada         * Keep the .got section in the RO section as it is patched
63d974301dSMasahiro Yamada         * prior to enabling the MMU and having the .got in RO is better for
64d974301dSMasahiro Yamada         * security. GOT is a table of addresses so ensure 8-byte alignment.
65d974301dSMasahiro Yamada         */
66d974301dSMasahiro Yamada        . = ALIGN(8);
67d974301dSMasahiro Yamada        __GOT_START__ = .;
68d974301dSMasahiro Yamada        *(.got)
69d974301dSMasahiro Yamada        __GOT_END__ = .;
70d974301dSMasahiro Yamada
717c88f3f6SAchin Gupta        *(.vectors)
72d974301dSMasahiro Yamada
737c88f3f6SAchin Gupta        __RO_END_UNALIGNED__ = .;
747c88f3f6SAchin Gupta        /*
757c88f3f6SAchin Gupta         * Memory page(s) mapped to this section will be marked as
767c88f3f6SAchin Gupta         * read-only, executable.  No RW data from the next section must
777c88f3f6SAchin Gupta         * creep in.  Ensure the rest of the current memory page is unused.
787c88f3f6SAchin Gupta         */
795629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
807c88f3f6SAchin Gupta        __RO_END__ = .;
817c88f3f6SAchin Gupta    } >RAM
825d1c104fSSandrine Bailleux#endif
837c88f3f6SAchin Gupta
8454dc71e7SAchin Gupta    /*
8554dc71e7SAchin Gupta     * Define a linker symbol to mark start of the RW memory area for this
8654dc71e7SAchin Gupta     * image.
8754dc71e7SAchin Gupta     */
8854dc71e7SAchin Gupta    __RW_START__ = . ;
8954dc71e7SAchin Gupta
907c88f3f6SAchin Gupta    .data . : {
917c88f3f6SAchin Gupta        __DATA_START__ = .;
92dccc537aSAndrew Thoelke        *(.data*)
937c88f3f6SAchin Gupta        __DATA_END__ = .;
947c88f3f6SAchin Gupta    } >RAM
957c88f3f6SAchin Gupta
96d974301dSMasahiro Yamada    /*
97d974301dSMasahiro Yamada     * .rela.dyn needs to come after .data for the read-elf utility to parse
98d974301dSMasahiro Yamada     * this section correctly. Ensure 8-byte alignment so that the fields of
99d974301dSMasahiro Yamada     * RELA data structure are aligned.
100d974301dSMasahiro Yamada     */
101d974301dSMasahiro Yamada    . = ALIGN(8);
102d974301dSMasahiro Yamada    __RELA_START__ = .;
103d974301dSMasahiro Yamada    .rela.dyn . : {
104d974301dSMasahiro Yamada    } >RAM
105d974301dSMasahiro Yamada    __RELA_END__ = .;
106d974301dSMasahiro Yamada
1075a06bb7eSDan Handley#ifdef TSP_PROGBITS_LIMIT
1085a06bb7eSDan Handley    ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.")
109a1b6db6cSSandrine Bailleux#endif
110a1b6db6cSSandrine Bailleux
1117c88f3f6SAchin Gupta    stacks (NOLOAD) : {
1127c88f3f6SAchin Gupta        __STACKS_START__ = .;
1137c88f3f6SAchin Gupta        *(tzfw_normal_stacks)
1147c88f3f6SAchin Gupta        __STACKS_END__ = .;
1157c88f3f6SAchin Gupta    } >RAM
1167c88f3f6SAchin Gupta
1177c88f3f6SAchin Gupta    /*
1187c88f3f6SAchin Gupta     * The .bss section gets initialised to 0 at runtime.
119308d359bSDouglas Raillard     * Its base address should be 16-byte aligned for better performance of the
120308d359bSDouglas Raillard     * zero-initialization code.
1217c88f3f6SAchin Gupta     */
1227c88f3f6SAchin Gupta    .bss : ALIGN(16) {
1237c88f3f6SAchin Gupta        __BSS_START__ = .;
124dccc537aSAndrew Thoelke        *(SORT_BY_ALIGNMENT(.bss*))
1257c88f3f6SAchin Gupta        *(COMMON)
1267c88f3f6SAchin Gupta        __BSS_END__ = .;
1277c88f3f6SAchin Gupta    } >RAM
1287c88f3f6SAchin Gupta
129*665e71b8SMasahiro Yamada    XLAT_TABLE_SECTION >RAM
1307c88f3f6SAchin Gupta
131ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1327c88f3f6SAchin Gupta    /*
1337c88f3f6SAchin Gupta     * The base address of the coherent memory section must be page-aligned (4K)
1347c88f3f6SAchin Gupta     * to guarantee that the coherent data are stored on their own pages and
1357c88f3f6SAchin Gupta     * are not mixed with normal data.  This is required to set up the correct
1367c88f3f6SAchin Gupta     * memory attributes for the coherent data page tables.
1377c88f3f6SAchin Gupta     */
138a2aedac2SAntonio Nino Diaz    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
1397c88f3f6SAchin Gupta        __COHERENT_RAM_START__ = .;
1407c88f3f6SAchin Gupta        *(tzfw_coherent_mem)
1417c88f3f6SAchin Gupta        __COHERENT_RAM_END_UNALIGNED__ = .;
1427c88f3f6SAchin Gupta        /*
1437c88f3f6SAchin Gupta         * Memory page(s) mapped to this section will be marked
1447c88f3f6SAchin Gupta         * as device memory.  No other unexpected data must creep in.
1457c88f3f6SAchin Gupta         * Ensure the rest of the current memory page is unused.
1467c88f3f6SAchin Gupta         */
1475629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
1487c88f3f6SAchin Gupta        __COHERENT_RAM_END__ = .;
1497c88f3f6SAchin Gupta    } >RAM
150ab8707e6SSoby Mathew#endif
1517c88f3f6SAchin Gupta
15254dc71e7SAchin Gupta    /*
15354dc71e7SAchin Gupta     * Define a linker symbol to mark the end of the RW memory area for this
15454dc71e7SAchin Gupta     * image.
15554dc71e7SAchin Gupta     */
15654dc71e7SAchin Gupta    __RW_END__ = .;
15753514b29SSandrine Bailleux    __BL32_END__ = .;
1587c88f3f6SAchin Gupta
159d974301dSMasahiro Yamada    /DISCARD/ : {
160d974301dSMasahiro Yamada        *(.dynsym .dynstr .hash .gnu.hash)
161d974301dSMasahiro Yamada    }
162d974301dSMasahiro Yamada
1637c88f3f6SAchin Gupta    __BSS_SIZE__ = SIZEOF(.bss);
164ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1657c88f3f6SAchin Gupta    __COHERENT_RAM_UNALIGNED_SIZE__ =
1667c88f3f6SAchin Gupta        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
167ab8707e6SSoby Mathew#endif
1687c88f3f6SAchin Gupta
169d178637dSJuan Castillo    ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.")
1707c88f3f6SAchin Gupta}
171