xref: /rk3399_ARM-atf/bl32/tsp/tsp.ld.S (revision 54dc71e7ec9f2a069907e8c0c24b5c8f8cc5f66a)
17c88f3f6SAchin Gupta/*
27c88f3f6SAchin Gupta * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
37c88f3f6SAchin Gupta *
47c88f3f6SAchin Gupta * Redistribution and use in source and binary forms, with or without
57c88f3f6SAchin Gupta * modification, are permitted provided that the following conditions are met:
67c88f3f6SAchin Gupta *
77c88f3f6SAchin Gupta * Redistributions of source code must retain the above copyright notice, this
87c88f3f6SAchin Gupta * list of conditions and the following disclaimer.
97c88f3f6SAchin Gupta *
107c88f3f6SAchin Gupta * Redistributions in binary form must reproduce the above copyright notice,
117c88f3f6SAchin Gupta * this list of conditions and the following disclaimer in the documentation
127c88f3f6SAchin Gupta * and/or other materials provided with the distribution.
137c88f3f6SAchin Gupta *
147c88f3f6SAchin Gupta * Neither the name of ARM nor the names of its contributors may be used
157c88f3f6SAchin Gupta * to endorse or promote products derived from this software without specific
167c88f3f6SAchin Gupta * prior written permission.
177c88f3f6SAchin Gupta *
187c88f3f6SAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
197c88f3f6SAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
207c88f3f6SAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
217c88f3f6SAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
227c88f3f6SAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
237c88f3f6SAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
247c88f3f6SAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
257c88f3f6SAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
267c88f3f6SAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
277c88f3f6SAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
287c88f3f6SAchin Gupta * POSSIBILITY OF SUCH DAMAGE.
297c88f3f6SAchin Gupta */
307c88f3f6SAchin Gupta
315f0cdb05SDan Handley#include <platform_def.h>
327c88f3f6SAchin Gupta
337c88f3f6SAchin GuptaOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
347c88f3f6SAchin GuptaOUTPUT_ARCH(PLATFORM_LINKER_ARCH)
359f98aa1aSJeenu ViswambharanENTRY(tsp_entrypoint)
369f98aa1aSJeenu Viswambharan
377c88f3f6SAchin Gupta
387c88f3f6SAchin GuptaMEMORY {
392467f70fSSandrine Bailleux    RAM (rwx): ORIGIN = TSP_SEC_MEM_BASE, LENGTH = TSP_SEC_MEM_SIZE
407c88f3f6SAchin Gupta}
417c88f3f6SAchin Gupta
427c88f3f6SAchin Gupta
437c88f3f6SAchin GuptaSECTIONS
447c88f3f6SAchin Gupta{
457c88f3f6SAchin Gupta    . = BL32_BASE;
467c88f3f6SAchin Gupta    ASSERT(. == ALIGN(4096),
477c88f3f6SAchin Gupta           "BL32_BASE address is not aligned on a page boundary.")
487c88f3f6SAchin Gupta
497c88f3f6SAchin Gupta    ro . : {
507c88f3f6SAchin Gupta        __RO_START__ = .;
51dccc537aSAndrew Thoelke        *tsp_entrypoint.o(.text*)
52dccc537aSAndrew Thoelke        *(.text*)
537c88f3f6SAchin Gupta        *(.rodata*)
547c88f3f6SAchin Gupta        *(.vectors)
557c88f3f6SAchin Gupta        __RO_END_UNALIGNED__ = .;
567c88f3f6SAchin Gupta        /*
577c88f3f6SAchin Gupta         * Memory page(s) mapped to this section will be marked as
587c88f3f6SAchin Gupta         * read-only, executable.  No RW data from the next section must
597c88f3f6SAchin Gupta         * creep in.  Ensure the rest of the current memory page is unused.
607c88f3f6SAchin Gupta         */
617c88f3f6SAchin Gupta        . = NEXT(4096);
627c88f3f6SAchin Gupta        __RO_END__ = .;
637c88f3f6SAchin Gupta    } >RAM
647c88f3f6SAchin Gupta
65*54dc71e7SAchin Gupta    /*
66*54dc71e7SAchin Gupta     * Define a linker symbol to mark start of the RW memory area for this
67*54dc71e7SAchin Gupta     * image.
68*54dc71e7SAchin Gupta     */
69*54dc71e7SAchin Gupta    __RW_START__ = . ;
70*54dc71e7SAchin Gupta
717c88f3f6SAchin Gupta    .data . : {
727c88f3f6SAchin Gupta        __DATA_START__ = .;
73dccc537aSAndrew Thoelke        *(.data*)
747c88f3f6SAchin Gupta        __DATA_END__ = .;
757c88f3f6SAchin Gupta    } >RAM
767c88f3f6SAchin Gupta
775a06bb7eSDan Handley#ifdef TSP_PROGBITS_LIMIT
785a06bb7eSDan Handley    ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.")
79a1b6db6cSSandrine Bailleux#endif
80a1b6db6cSSandrine Bailleux
817c88f3f6SAchin Gupta    stacks (NOLOAD) : {
827c88f3f6SAchin Gupta        __STACKS_START__ = .;
837c88f3f6SAchin Gupta        *(tzfw_normal_stacks)
847c88f3f6SAchin Gupta        __STACKS_END__ = .;
857c88f3f6SAchin Gupta    } >RAM
867c88f3f6SAchin Gupta
877c88f3f6SAchin Gupta    /*
887c88f3f6SAchin Gupta     * The .bss section gets initialised to 0 at runtime.
897c88f3f6SAchin Gupta     * Its base address must be 16-byte aligned.
907c88f3f6SAchin Gupta     */
917c88f3f6SAchin Gupta    .bss : ALIGN(16) {
927c88f3f6SAchin Gupta        __BSS_START__ = .;
93dccc537aSAndrew Thoelke        *(SORT_BY_ALIGNMENT(.bss*))
947c88f3f6SAchin Gupta        *(COMMON)
957c88f3f6SAchin Gupta        __BSS_END__ = .;
967c88f3f6SAchin Gupta    } >RAM
977c88f3f6SAchin Gupta
987c88f3f6SAchin Gupta    /*
997c88f3f6SAchin Gupta     * The xlat_table section is for full, aligned page tables (4K).
1007c88f3f6SAchin Gupta     * Removing them from .bss avoids forcing 4K alignment on
1017c88f3f6SAchin Gupta     * the .bss section and eliminates the unecessary zero init
1027c88f3f6SAchin Gupta     */
1037c88f3f6SAchin Gupta    xlat_table (NOLOAD) : {
1047c88f3f6SAchin Gupta        *(xlat_table)
1057c88f3f6SAchin Gupta    } >RAM
1067c88f3f6SAchin Gupta
107ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1087c88f3f6SAchin Gupta    /*
1097c88f3f6SAchin Gupta     * The base address of the coherent memory section must be page-aligned (4K)
1107c88f3f6SAchin Gupta     * to guarantee that the coherent data are stored on their own pages and
1117c88f3f6SAchin Gupta     * are not mixed with normal data.  This is required to set up the correct
1127c88f3f6SAchin Gupta     * memory attributes for the coherent data page tables.
1137c88f3f6SAchin Gupta     */
1147c88f3f6SAchin Gupta    coherent_ram (NOLOAD) : ALIGN(4096) {
1157c88f3f6SAchin Gupta        __COHERENT_RAM_START__ = .;
1167c88f3f6SAchin Gupta        *(tzfw_coherent_mem)
1177c88f3f6SAchin Gupta        __COHERENT_RAM_END_UNALIGNED__ = .;
1187c88f3f6SAchin Gupta        /*
1197c88f3f6SAchin Gupta         * Memory page(s) mapped to this section will be marked
1207c88f3f6SAchin Gupta         * as device memory.  No other unexpected data must creep in.
1217c88f3f6SAchin Gupta         * Ensure the rest of the current memory page is unused.
1227c88f3f6SAchin Gupta         */
1237c88f3f6SAchin Gupta        . = NEXT(4096);
1247c88f3f6SAchin Gupta        __COHERENT_RAM_END__ = .;
1257c88f3f6SAchin Gupta    } >RAM
126ab8707e6SSoby Mathew#endif
1277c88f3f6SAchin Gupta
128*54dc71e7SAchin Gupta    /*
129*54dc71e7SAchin Gupta     * Define a linker symbol to mark the end of the RW memory area for this
130*54dc71e7SAchin Gupta     * image.
131*54dc71e7SAchin Gupta     */
132*54dc71e7SAchin Gupta    __RW_END__ = .;
13353514b29SSandrine Bailleux    __BL32_END__ = .;
1347c88f3f6SAchin Gupta
1357c88f3f6SAchin Gupta    __BSS_SIZE__ = SIZEOF(.bss);
136ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1377c88f3f6SAchin Gupta    __COHERENT_RAM_UNALIGNED_SIZE__ =
1387c88f3f6SAchin Gupta        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
139ab8707e6SSoby Mathew#endif
1407c88f3f6SAchin Gupta
141a37255a2SSandrine Bailleux    ASSERT(. <= BL32_LIMIT, "BL3-2 image has exceeded its limit.")
1427c88f3f6SAchin Gupta}
143