xref: /rk3399_ARM-atf/bl32/tsp/tsp.ld.S (revision e8ad6168b0153e09f1a54ee887555db7833019df)
17c88f3f6SAchin Gupta/*
2d974301dSMasahiro Yamada * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
37c88f3f6SAchin Gupta *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
57c88f3f6SAchin Gupta */
67c88f3f6SAchin Gupta
7665e71b8SMasahiro Yamada#include <common/bl_common.ld.h>
809d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h>
97c88f3f6SAchin Gupta
107c88f3f6SAchin GuptaOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
117c88f3f6SAchin GuptaOUTPUT_ARCH(PLATFORM_LINKER_ARCH)
129f98aa1aSJeenu ViswambharanENTRY(tsp_entrypoint)
139f98aa1aSJeenu Viswambharan
147c88f3f6SAchin Gupta
157c88f3f6SAchin GuptaMEMORY {
162467f70fSSandrine Bailleux    RAM (rwx): ORIGIN = TSP_SEC_MEM_BASE, LENGTH = TSP_SEC_MEM_SIZE
177c88f3f6SAchin Gupta}
187c88f3f6SAchin Gupta
197c88f3f6SAchin Gupta
207c88f3f6SAchin GuptaSECTIONS
217c88f3f6SAchin Gupta{
227c88f3f6SAchin Gupta    . = BL32_BASE;
23a2aedac2SAntonio Nino Diaz    ASSERT(. == ALIGN(PAGE_SIZE),
247c88f3f6SAchin Gupta           "BL32_BASE address is not aligned on a page boundary.")
257c88f3f6SAchin Gupta
265d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA
275d1c104fSSandrine Bailleux    .text . : {
285d1c104fSSandrine Bailleux        __TEXT_START__ = .;
295d1c104fSSandrine Bailleux        *tsp_entrypoint.o(.text*)
305d1c104fSSandrine Bailleux        *(.text*)
315d1c104fSSandrine Bailleux        *(.vectors)
325629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
335d1c104fSSandrine Bailleux        __TEXT_END__ = .;
345d1c104fSSandrine Bailleux    } >RAM
355d1c104fSSandrine Bailleux
365d1c104fSSandrine Bailleux    .rodata . : {
375d1c104fSSandrine Bailleux        __RODATA_START__ = .;
385d1c104fSSandrine Bailleux        *(.rodata*)
39d974301dSMasahiro Yamada
400a0a7a9aSMasahiro Yamada	RODATA_COMMON
41d974301dSMasahiro Yamada
425629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
435d1c104fSSandrine Bailleux        __RODATA_END__ = .;
445d1c104fSSandrine Bailleux    } >RAM
455d1c104fSSandrine Bailleux#else
467c88f3f6SAchin Gupta    ro . : {
477c88f3f6SAchin Gupta        __RO_START__ = .;
48dccc537aSAndrew Thoelke        *tsp_entrypoint.o(.text*)
49dccc537aSAndrew Thoelke        *(.text*)
507c88f3f6SAchin Gupta        *(.rodata*)
51d974301dSMasahiro Yamada
520a0a7a9aSMasahiro Yamada	RODATA_COMMON
53d974301dSMasahiro Yamada
547c88f3f6SAchin Gupta        *(.vectors)
55d974301dSMasahiro Yamada
567c88f3f6SAchin Gupta        __RO_END_UNALIGNED__ = .;
577c88f3f6SAchin Gupta        /*
587c88f3f6SAchin Gupta         * Memory page(s) mapped to this section will be marked as
597c88f3f6SAchin Gupta         * read-only, executable.  No RW data from the next section must
607c88f3f6SAchin Gupta         * creep in.  Ensure the rest of the current memory page is unused.
617c88f3f6SAchin Gupta         */
625629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
637c88f3f6SAchin Gupta        __RO_END__ = .;
647c88f3f6SAchin Gupta    } >RAM
655d1c104fSSandrine Bailleux#endif
667c88f3f6SAchin Gupta
6754dc71e7SAchin Gupta    /*
6854dc71e7SAchin Gupta     * Define a linker symbol to mark start of the RW memory area for this
6954dc71e7SAchin Gupta     * image.
7054dc71e7SAchin Gupta     */
7154dc71e7SAchin Gupta    __RW_START__ = . ;
7254dc71e7SAchin Gupta
73caa3e7e0SMasahiro Yamada    DATA_SECTION >RAM
74*e8ad6168SMasahiro Yamada    RELA_SECTION >RAM
75d974301dSMasahiro Yamada
765a06bb7eSDan Handley#ifdef TSP_PROGBITS_LIMIT
775a06bb7eSDan Handley    ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.")
78a1b6db6cSSandrine Bailleux#endif
79a1b6db6cSSandrine Bailleux
80a926a9f6SMasahiro Yamada    STACK_SECTION >RAM
81a7739bc7SMasahiro Yamada    BSS_SECTION >RAM
82665e71b8SMasahiro Yamada    XLAT_TABLE_SECTION >RAM
837c88f3f6SAchin Gupta
84ab8707e6SSoby Mathew#if USE_COHERENT_MEM
857c88f3f6SAchin Gupta    /*
867c88f3f6SAchin Gupta     * The base address of the coherent memory section must be page-aligned (4K)
877c88f3f6SAchin Gupta     * to guarantee that the coherent data are stored on their own pages and
887c88f3f6SAchin Gupta     * are not mixed with normal data.  This is required to set up the correct
897c88f3f6SAchin Gupta     * memory attributes for the coherent data page tables.
907c88f3f6SAchin Gupta     */
91a2aedac2SAntonio Nino Diaz    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
927c88f3f6SAchin Gupta        __COHERENT_RAM_START__ = .;
937c88f3f6SAchin Gupta        *(tzfw_coherent_mem)
947c88f3f6SAchin Gupta        __COHERENT_RAM_END_UNALIGNED__ = .;
957c88f3f6SAchin Gupta        /*
967c88f3f6SAchin Gupta         * Memory page(s) mapped to this section will be marked
977c88f3f6SAchin Gupta         * as device memory.  No other unexpected data must creep in.
987c88f3f6SAchin Gupta         * Ensure the rest of the current memory page is unused.
997c88f3f6SAchin Gupta         */
1005629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
1017c88f3f6SAchin Gupta        __COHERENT_RAM_END__ = .;
1027c88f3f6SAchin Gupta    } >RAM
103ab8707e6SSoby Mathew#endif
1047c88f3f6SAchin Gupta
10554dc71e7SAchin Gupta    /*
10654dc71e7SAchin Gupta     * Define a linker symbol to mark the end of the RW memory area for this
10754dc71e7SAchin Gupta     * image.
10854dc71e7SAchin Gupta     */
10954dc71e7SAchin Gupta    __RW_END__ = .;
11053514b29SSandrine Bailleux    __BL32_END__ = .;
1117c88f3f6SAchin Gupta
112d974301dSMasahiro Yamada    /DISCARD/ : {
113d974301dSMasahiro Yamada        *(.dynsym .dynstr .hash .gnu.hash)
114d974301dSMasahiro Yamada    }
115d974301dSMasahiro Yamada
1167c88f3f6SAchin Gupta    __BSS_SIZE__ = SIZEOF(.bss);
117ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1187c88f3f6SAchin Gupta    __COHERENT_RAM_UNALIGNED_SIZE__ =
1197c88f3f6SAchin Gupta        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
120ab8707e6SSoby Mathew#endif
1217c88f3f6SAchin Gupta
122d178637dSJuan Castillo    ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.")
1237c88f3f6SAchin Gupta}
124