xref: /rk3399_ARM-atf/bl32/tsp/tsp.ld.S (revision 82cb2c1ad9897473743f08437d0a3995bed561b9)
17c88f3f6SAchin Gupta/*
2308d359bSDouglas Raillard * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
37c88f3f6SAchin Gupta *
4*82cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
57c88f3f6SAchin Gupta */
67c88f3f6SAchin Gupta
75f0cdb05SDan Handley#include <platform_def.h>
87c88f3f6SAchin Gupta
97c88f3f6SAchin GuptaOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
107c88f3f6SAchin GuptaOUTPUT_ARCH(PLATFORM_LINKER_ARCH)
119f98aa1aSJeenu ViswambharanENTRY(tsp_entrypoint)
129f98aa1aSJeenu Viswambharan
137c88f3f6SAchin Gupta
147c88f3f6SAchin GuptaMEMORY {
152467f70fSSandrine Bailleux    RAM (rwx): ORIGIN = TSP_SEC_MEM_BASE, LENGTH = TSP_SEC_MEM_SIZE
167c88f3f6SAchin Gupta}
177c88f3f6SAchin Gupta
187c88f3f6SAchin Gupta
197c88f3f6SAchin GuptaSECTIONS
207c88f3f6SAchin Gupta{
217c88f3f6SAchin Gupta    . = BL32_BASE;
227c88f3f6SAchin Gupta    ASSERT(. == ALIGN(4096),
237c88f3f6SAchin Gupta           "BL32_BASE address is not aligned on a page boundary.")
247c88f3f6SAchin Gupta
255d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA
265d1c104fSSandrine Bailleux    .text . : {
275d1c104fSSandrine Bailleux        __TEXT_START__ = .;
285d1c104fSSandrine Bailleux        *tsp_entrypoint.o(.text*)
295d1c104fSSandrine Bailleux        *(.text*)
305d1c104fSSandrine Bailleux        *(.vectors)
315d1c104fSSandrine Bailleux        . = NEXT(4096);
325d1c104fSSandrine Bailleux        __TEXT_END__ = .;
335d1c104fSSandrine Bailleux    } >RAM
345d1c104fSSandrine Bailleux
355d1c104fSSandrine Bailleux    .rodata . : {
365d1c104fSSandrine Bailleux        __RODATA_START__ = .;
375d1c104fSSandrine Bailleux        *(.rodata*)
385d1c104fSSandrine Bailleux        . = NEXT(4096);
395d1c104fSSandrine Bailleux        __RODATA_END__ = .;
405d1c104fSSandrine Bailleux    } >RAM
415d1c104fSSandrine Bailleux#else
427c88f3f6SAchin Gupta    ro . : {
437c88f3f6SAchin Gupta        __RO_START__ = .;
44dccc537aSAndrew Thoelke        *tsp_entrypoint.o(.text*)
45dccc537aSAndrew Thoelke        *(.text*)
467c88f3f6SAchin Gupta        *(.rodata*)
477c88f3f6SAchin Gupta        *(.vectors)
487c88f3f6SAchin Gupta        __RO_END_UNALIGNED__ = .;
497c88f3f6SAchin Gupta        /*
507c88f3f6SAchin Gupta         * Memory page(s) mapped to this section will be marked as
517c88f3f6SAchin Gupta         * read-only, executable.  No RW data from the next section must
527c88f3f6SAchin Gupta         * creep in.  Ensure the rest of the current memory page is unused.
537c88f3f6SAchin Gupta         */
547c88f3f6SAchin Gupta        . = NEXT(4096);
557c88f3f6SAchin Gupta        __RO_END__ = .;
567c88f3f6SAchin Gupta    } >RAM
575d1c104fSSandrine Bailleux#endif
587c88f3f6SAchin Gupta
5954dc71e7SAchin Gupta    /*
6054dc71e7SAchin Gupta     * Define a linker symbol to mark start of the RW memory area for this
6154dc71e7SAchin Gupta     * image.
6254dc71e7SAchin Gupta     */
6354dc71e7SAchin Gupta    __RW_START__ = . ;
6454dc71e7SAchin Gupta
657c88f3f6SAchin Gupta    .data . : {
667c88f3f6SAchin Gupta        __DATA_START__ = .;
67dccc537aSAndrew Thoelke        *(.data*)
687c88f3f6SAchin Gupta        __DATA_END__ = .;
697c88f3f6SAchin Gupta    } >RAM
707c88f3f6SAchin Gupta
715a06bb7eSDan Handley#ifdef TSP_PROGBITS_LIMIT
725a06bb7eSDan Handley    ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.")
73a1b6db6cSSandrine Bailleux#endif
74a1b6db6cSSandrine Bailleux
757c88f3f6SAchin Gupta    stacks (NOLOAD) : {
767c88f3f6SAchin Gupta        __STACKS_START__ = .;
777c88f3f6SAchin Gupta        *(tzfw_normal_stacks)
787c88f3f6SAchin Gupta        __STACKS_END__ = .;
797c88f3f6SAchin Gupta    } >RAM
807c88f3f6SAchin Gupta
817c88f3f6SAchin Gupta    /*
827c88f3f6SAchin Gupta     * The .bss section gets initialised to 0 at runtime.
83308d359bSDouglas Raillard     * Its base address should be 16-byte aligned for better performance of the
84308d359bSDouglas Raillard     * zero-initialization code.
857c88f3f6SAchin Gupta     */
867c88f3f6SAchin Gupta    .bss : ALIGN(16) {
877c88f3f6SAchin Gupta        __BSS_START__ = .;
88dccc537aSAndrew Thoelke        *(SORT_BY_ALIGNMENT(.bss*))
897c88f3f6SAchin Gupta        *(COMMON)
907c88f3f6SAchin Gupta        __BSS_END__ = .;
917c88f3f6SAchin Gupta    } >RAM
927c88f3f6SAchin Gupta
937c88f3f6SAchin Gupta    /*
947c88f3f6SAchin Gupta     * The xlat_table section is for full, aligned page tables (4K).
957c88f3f6SAchin Gupta     * Removing them from .bss avoids forcing 4K alignment on
967c88f3f6SAchin Gupta     * the .bss section and eliminates the unecessary zero init
977c88f3f6SAchin Gupta     */
987c88f3f6SAchin Gupta    xlat_table (NOLOAD) : {
997c88f3f6SAchin Gupta        *(xlat_table)
1007c88f3f6SAchin Gupta    } >RAM
1017c88f3f6SAchin Gupta
102ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1037c88f3f6SAchin Gupta    /*
1047c88f3f6SAchin Gupta     * The base address of the coherent memory section must be page-aligned (4K)
1057c88f3f6SAchin Gupta     * to guarantee that the coherent data are stored on their own pages and
1067c88f3f6SAchin Gupta     * are not mixed with normal data.  This is required to set up the correct
1077c88f3f6SAchin Gupta     * memory attributes for the coherent data page tables.
1087c88f3f6SAchin Gupta     */
1097c88f3f6SAchin Gupta    coherent_ram (NOLOAD) : ALIGN(4096) {
1107c88f3f6SAchin Gupta        __COHERENT_RAM_START__ = .;
1117c88f3f6SAchin Gupta        *(tzfw_coherent_mem)
1127c88f3f6SAchin Gupta        __COHERENT_RAM_END_UNALIGNED__ = .;
1137c88f3f6SAchin Gupta        /*
1147c88f3f6SAchin Gupta         * Memory page(s) mapped to this section will be marked
1157c88f3f6SAchin Gupta         * as device memory.  No other unexpected data must creep in.
1167c88f3f6SAchin Gupta         * Ensure the rest of the current memory page is unused.
1177c88f3f6SAchin Gupta         */
1187c88f3f6SAchin Gupta        . = NEXT(4096);
1197c88f3f6SAchin Gupta        __COHERENT_RAM_END__ = .;
1207c88f3f6SAchin Gupta    } >RAM
121ab8707e6SSoby Mathew#endif
1227c88f3f6SAchin Gupta
12354dc71e7SAchin Gupta    /*
12454dc71e7SAchin Gupta     * Define a linker symbol to mark the end of the RW memory area for this
12554dc71e7SAchin Gupta     * image.
12654dc71e7SAchin Gupta     */
12754dc71e7SAchin Gupta    __RW_END__ = .;
12853514b29SSandrine Bailleux    __BL32_END__ = .;
1297c88f3f6SAchin Gupta
1307c88f3f6SAchin Gupta    __BSS_SIZE__ = SIZEOF(.bss);
131ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1327c88f3f6SAchin Gupta    __COHERENT_RAM_UNALIGNED_SIZE__ =
1337c88f3f6SAchin Gupta        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
134ab8707e6SSoby Mathew#endif
1357c88f3f6SAchin Gupta
136d178637dSJuan Castillo    ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.")
1377c88f3f6SAchin Gupta}
138