xref: /rk3399_ARM-atf/bl32/tsp/tsp.ld.S (revision d974301d221762a7a0c24bf1d682fa8fe198a83d)
17c88f3f6SAchin Gupta/*
2*d974301dSMasahiro Yamada * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
37c88f3f6SAchin Gupta *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
57c88f3f6SAchin Gupta */
67c88f3f6SAchin Gupta
709d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h>
85f0cdb05SDan Handley#include <platform_def.h>
97c88f3f6SAchin Gupta
107c88f3f6SAchin GuptaOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
117c88f3f6SAchin GuptaOUTPUT_ARCH(PLATFORM_LINKER_ARCH)
129f98aa1aSJeenu ViswambharanENTRY(tsp_entrypoint)
139f98aa1aSJeenu Viswambharan
147c88f3f6SAchin Gupta
157c88f3f6SAchin GuptaMEMORY {
162467f70fSSandrine Bailleux    RAM (rwx): ORIGIN = TSP_SEC_MEM_BASE, LENGTH = TSP_SEC_MEM_SIZE
177c88f3f6SAchin Gupta}
187c88f3f6SAchin Gupta
197c88f3f6SAchin Gupta
207c88f3f6SAchin GuptaSECTIONS
217c88f3f6SAchin Gupta{
227c88f3f6SAchin Gupta    . = BL32_BASE;
23a2aedac2SAntonio Nino Diaz    ASSERT(. == ALIGN(PAGE_SIZE),
247c88f3f6SAchin Gupta           "BL32_BASE address is not aligned on a page boundary.")
257c88f3f6SAchin Gupta
265d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA
275d1c104fSSandrine Bailleux    .text . : {
285d1c104fSSandrine Bailleux        __TEXT_START__ = .;
295d1c104fSSandrine Bailleux        *tsp_entrypoint.o(.text*)
305d1c104fSSandrine Bailleux        *(.text*)
315d1c104fSSandrine Bailleux        *(.vectors)
325629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
335d1c104fSSandrine Bailleux        __TEXT_END__ = .;
345d1c104fSSandrine Bailleux    } >RAM
355d1c104fSSandrine Bailleux
365d1c104fSSandrine Bailleux    .rodata . : {
375d1c104fSSandrine Bailleux        __RODATA_START__ = .;
385d1c104fSSandrine Bailleux        *(.rodata*)
39*d974301dSMasahiro Yamada
40*d974301dSMasahiro Yamada        /*
41*d974301dSMasahiro Yamada         * Keep the .got section in the RO section as it is patched
42*d974301dSMasahiro Yamada         * prior to enabling the MMU and having the .got in RO is better for
43*d974301dSMasahiro Yamada         * security. GOT is a table of addresses so ensure 8-byte alignment.
44*d974301dSMasahiro Yamada         */
45*d974301dSMasahiro Yamada        . = ALIGN(8);
46*d974301dSMasahiro Yamada        __GOT_START__ = .;
47*d974301dSMasahiro Yamada        *(.got)
48*d974301dSMasahiro Yamada        __GOT_END__ = .;
49*d974301dSMasahiro Yamada
505629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
515d1c104fSSandrine Bailleux        __RODATA_END__ = .;
525d1c104fSSandrine Bailleux    } >RAM
535d1c104fSSandrine Bailleux#else
547c88f3f6SAchin Gupta    ro . : {
557c88f3f6SAchin Gupta        __RO_START__ = .;
56dccc537aSAndrew Thoelke        *tsp_entrypoint.o(.text*)
57dccc537aSAndrew Thoelke        *(.text*)
587c88f3f6SAchin Gupta        *(.rodata*)
59*d974301dSMasahiro Yamada
60*d974301dSMasahiro Yamada        /*
61*d974301dSMasahiro Yamada         * Keep the .got section in the RO section as it is patched
62*d974301dSMasahiro Yamada         * prior to enabling the MMU and having the .got in RO is better for
63*d974301dSMasahiro Yamada         * security. GOT is a table of addresses so ensure 8-byte alignment.
64*d974301dSMasahiro Yamada         */
65*d974301dSMasahiro Yamada        . = ALIGN(8);
66*d974301dSMasahiro Yamada        __GOT_START__ = .;
67*d974301dSMasahiro Yamada        *(.got)
68*d974301dSMasahiro Yamada        __GOT_END__ = .;
69*d974301dSMasahiro Yamada
707c88f3f6SAchin Gupta        *(.vectors)
71*d974301dSMasahiro Yamada
727c88f3f6SAchin Gupta        __RO_END_UNALIGNED__ = .;
737c88f3f6SAchin Gupta        /*
747c88f3f6SAchin Gupta         * Memory page(s) mapped to this section will be marked as
757c88f3f6SAchin Gupta         * read-only, executable.  No RW data from the next section must
767c88f3f6SAchin Gupta         * creep in.  Ensure the rest of the current memory page is unused.
777c88f3f6SAchin Gupta         */
785629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
797c88f3f6SAchin Gupta        __RO_END__ = .;
807c88f3f6SAchin Gupta    } >RAM
815d1c104fSSandrine Bailleux#endif
827c88f3f6SAchin Gupta
8354dc71e7SAchin Gupta    /*
8454dc71e7SAchin Gupta     * Define a linker symbol to mark start of the RW memory area for this
8554dc71e7SAchin Gupta     * image.
8654dc71e7SAchin Gupta     */
8754dc71e7SAchin Gupta    __RW_START__ = . ;
8854dc71e7SAchin Gupta
897c88f3f6SAchin Gupta    .data . : {
907c88f3f6SAchin Gupta        __DATA_START__ = .;
91dccc537aSAndrew Thoelke        *(.data*)
927c88f3f6SAchin Gupta        __DATA_END__ = .;
937c88f3f6SAchin Gupta    } >RAM
947c88f3f6SAchin Gupta
95*d974301dSMasahiro Yamada    /*
96*d974301dSMasahiro Yamada     * .rela.dyn needs to come after .data for the read-elf utility to parse
97*d974301dSMasahiro Yamada     * this section correctly. Ensure 8-byte alignment so that the fields of
98*d974301dSMasahiro Yamada     * RELA data structure are aligned.
99*d974301dSMasahiro Yamada     */
100*d974301dSMasahiro Yamada    . = ALIGN(8);
101*d974301dSMasahiro Yamada    __RELA_START__ = .;
102*d974301dSMasahiro Yamada    .rela.dyn . : {
103*d974301dSMasahiro Yamada    } >RAM
104*d974301dSMasahiro Yamada    __RELA_END__ = .;
105*d974301dSMasahiro Yamada
1065a06bb7eSDan Handley#ifdef TSP_PROGBITS_LIMIT
1075a06bb7eSDan Handley    ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.")
108a1b6db6cSSandrine Bailleux#endif
109a1b6db6cSSandrine Bailleux
1107c88f3f6SAchin Gupta    stacks (NOLOAD) : {
1117c88f3f6SAchin Gupta        __STACKS_START__ = .;
1127c88f3f6SAchin Gupta        *(tzfw_normal_stacks)
1137c88f3f6SAchin Gupta        __STACKS_END__ = .;
1147c88f3f6SAchin Gupta    } >RAM
1157c88f3f6SAchin Gupta
1167c88f3f6SAchin Gupta    /*
1177c88f3f6SAchin Gupta     * The .bss section gets initialised to 0 at runtime.
118308d359bSDouglas Raillard     * Its base address should be 16-byte aligned for better performance of the
119308d359bSDouglas Raillard     * zero-initialization code.
1207c88f3f6SAchin Gupta     */
1217c88f3f6SAchin Gupta    .bss : ALIGN(16) {
1227c88f3f6SAchin Gupta        __BSS_START__ = .;
123dccc537aSAndrew Thoelke        *(SORT_BY_ALIGNMENT(.bss*))
1247c88f3f6SAchin Gupta        *(COMMON)
1257c88f3f6SAchin Gupta        __BSS_END__ = .;
1267c88f3f6SAchin Gupta    } >RAM
1277c88f3f6SAchin Gupta
1287c88f3f6SAchin Gupta    /*
1297c88f3f6SAchin Gupta     * The xlat_table section is for full, aligned page tables (4K).
1307c88f3f6SAchin Gupta     * Removing them from .bss avoids forcing 4K alignment on
131883d1b5dSAntonio Nino Diaz     * the .bss section. The tables are initialized to zero by the translation
132883d1b5dSAntonio Nino Diaz     * tables library.
1337c88f3f6SAchin Gupta     */
1347c88f3f6SAchin Gupta    xlat_table (NOLOAD) : {
1357c88f3f6SAchin Gupta        *(xlat_table)
1367c88f3f6SAchin Gupta    } >RAM
1377c88f3f6SAchin Gupta
138ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1397c88f3f6SAchin Gupta    /*
1407c88f3f6SAchin Gupta     * The base address of the coherent memory section must be page-aligned (4K)
1417c88f3f6SAchin Gupta     * to guarantee that the coherent data are stored on their own pages and
1427c88f3f6SAchin Gupta     * are not mixed with normal data.  This is required to set up the correct
1437c88f3f6SAchin Gupta     * memory attributes for the coherent data page tables.
1447c88f3f6SAchin Gupta     */
145a2aedac2SAntonio Nino Diaz    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
1467c88f3f6SAchin Gupta        __COHERENT_RAM_START__ = .;
1477c88f3f6SAchin Gupta        *(tzfw_coherent_mem)
1487c88f3f6SAchin Gupta        __COHERENT_RAM_END_UNALIGNED__ = .;
1497c88f3f6SAchin Gupta        /*
1507c88f3f6SAchin Gupta         * Memory page(s) mapped to this section will be marked
1517c88f3f6SAchin Gupta         * as device memory.  No other unexpected data must creep in.
1527c88f3f6SAchin Gupta         * Ensure the rest of the current memory page is unused.
1537c88f3f6SAchin Gupta         */
1545629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
1557c88f3f6SAchin Gupta        __COHERENT_RAM_END__ = .;
1567c88f3f6SAchin Gupta    } >RAM
157ab8707e6SSoby Mathew#endif
1587c88f3f6SAchin Gupta
15954dc71e7SAchin Gupta    /*
16054dc71e7SAchin Gupta     * Define a linker symbol to mark the end of the RW memory area for this
16154dc71e7SAchin Gupta     * image.
16254dc71e7SAchin Gupta     */
16354dc71e7SAchin Gupta    __RW_END__ = .;
16453514b29SSandrine Bailleux    __BL32_END__ = .;
1657c88f3f6SAchin Gupta
166*d974301dSMasahiro Yamada    /DISCARD/ : {
167*d974301dSMasahiro Yamada        *(.dynsym .dynstr .hash .gnu.hash)
168*d974301dSMasahiro Yamada    }
169*d974301dSMasahiro Yamada
1707c88f3f6SAchin Gupta    __BSS_SIZE__ = SIZEOF(.bss);
171ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1727c88f3f6SAchin Gupta    __COHERENT_RAM_UNALIGNED_SIZE__ =
1737c88f3f6SAchin Gupta        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
174ab8707e6SSoby Mathew#endif
1757c88f3f6SAchin Gupta
176d178637dSJuan Castillo    ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.")
1777c88f3f6SAchin Gupta}
178