xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision caa3e7e0a4aeb657873bbd2c002c0e33a614eb1d)
14f6ad66aSAchin Gupta/*
2c367b75eSMadhukar Pappireddy * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
34f6ad66aSAchin Gupta *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
54f6ad66aSAchin Gupta */
64f6ad66aSAchin Gupta
7665e71b8SMasahiro Yamada#include <common/bl_common.ld.h>
809d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h>
94f6ad66aSAchin Gupta
104f6ad66aSAchin GuptaOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
114f6ad66aSAchin GuptaOUTPUT_ARCH(PLATFORM_LINKER_ARCH)
129f98aa1aSJeenu ViswambharanENTRY(bl31_entrypoint)
134f6ad66aSAchin Gupta
144f6ad66aSAchin Gupta
154f6ad66aSAchin GuptaMEMORY {
16d7fbf132SJuan Castillo    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
17f8578e64SSamuel Holland#if SEPARATE_NOBITS_REGION
18f8578e64SSamuel Holland    NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
19f8578e64SSamuel Holland#else
20f8578e64SSamuel Holland#define NOBITS RAM
21f8578e64SSamuel Holland#endif
224f6ad66aSAchin Gupta}
234f6ad66aSAchin Gupta
24ec693569SCaesar Wang#ifdef PLAT_EXTRA_LD_SCRIPT
25ec693569SCaesar Wang#include <plat.ld.S>
26ec693569SCaesar Wang#endif
274f6ad66aSAchin Gupta
284f6ad66aSAchin GuptaSECTIONS
294f6ad66aSAchin Gupta{
304f6ad66aSAchin Gupta    . = BL31_BASE;
31a2aedac2SAntonio Nino Diaz    ASSERT(. == ALIGN(PAGE_SIZE),
328d69a03fSSandrine Bailleux           "BL31_BASE address is not aligned on a page boundary.")
334f6ad66aSAchin Gupta
34931f7c61SSoby Mathew    __BL31_START__ = .;
35931f7c61SSoby Mathew
365d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA
375d1c104fSSandrine Bailleux    .text . : {
385d1c104fSSandrine Bailleux        __TEXT_START__ = .;
395d1c104fSSandrine Bailleux        *bl31_entrypoint.o(.text*)
40ebd6efaeSSamuel Holland        *(SORT_BY_ALIGNMENT(.text*))
415d1c104fSSandrine Bailleux        *(.vectors)
425629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
435d1c104fSSandrine Bailleux        __TEXT_END__ = .;
445d1c104fSSandrine Bailleux    } >RAM
455d1c104fSSandrine Bailleux
465d1c104fSSandrine Bailleux    .rodata . : {
475d1c104fSSandrine Bailleux        __RODATA_START__ = .;
48ebd6efaeSSamuel Holland        *(SORT_BY_ALIGNMENT(.rodata*))
495d1c104fSSandrine Bailleux
500a0a7a9aSMasahiro Yamada	RODATA_COMMON
51931f7c61SSoby Mathew
528e743bcdSJeenu Viswambharan        /* Place pubsub sections for events */
538e743bcdSJeenu Viswambharan        . = ALIGN(8);
5409d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/pubsub_events.h>
558e743bcdSJeenu Viswambharan
565629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
575d1c104fSSandrine Bailleux        __RODATA_END__ = .;
585d1c104fSSandrine Bailleux    } >RAM
595d1c104fSSandrine Bailleux#else
608d69a03fSSandrine Bailleux    ro . : {
618d69a03fSSandrine Bailleux        __RO_START__ = .;
62dccc537aSAndrew Thoelke        *bl31_entrypoint.o(.text*)
63ebd6efaeSSamuel Holland        *(SORT_BY_ALIGNMENT(.text*))
64ebd6efaeSSamuel Holland        *(SORT_BY_ALIGNMENT(.rodata*))
657421b465SAchin Gupta
660a0a7a9aSMasahiro Yamada	RODATA_COMMON
675bfac4fcSSoby Mathew
688e743bcdSJeenu Viswambharan        /* Place pubsub sections for events */
698e743bcdSJeenu Viswambharan        . = ALIGN(8);
7009d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/pubsub_events.h>
718e743bcdSJeenu Viswambharan
72b739f22aSAchin Gupta        *(.vectors)
738d69a03fSSandrine Bailleux        __RO_END_UNALIGNED__ = .;
748d69a03fSSandrine Bailleux        /*
758d69a03fSSandrine Bailleux         * Memory page(s) mapped to this section will be marked as read-only,
768d69a03fSSandrine Bailleux         * executable.  No RW data from the next section must creep in.
778d69a03fSSandrine Bailleux         * Ensure the rest of the current memory page is unused.
788d69a03fSSandrine Bailleux         */
795629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
808d69a03fSSandrine Bailleux        __RO_END__ = .;
814f6ad66aSAchin Gupta    } >RAM
825d1c104fSSandrine Bailleux#endif
834f6ad66aSAchin Gupta
849b476841SSoby Mathew    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
859b476841SSoby Mathew           "cpu_ops not defined for this platform.")
869b476841SSoby Mathew
87538b0020SPaul Beesley#if SPM_MM
8832e83537SArd Biesheuvel#ifndef SPM_SHIM_EXCEPTIONS_VMA
8932e83537SArd Biesheuvel#define SPM_SHIM_EXCEPTIONS_VMA         RAM
9032e83537SArd Biesheuvel#endif
9132e83537SArd Biesheuvel
922fccb228SAntonio Nino Diaz    /*
932fccb228SAntonio Nino Diaz     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
942fccb228SAntonio Nino Diaz     * address, but we need to place them in a separate page so that we can set
952fccb228SAntonio Nino Diaz     * individual permissions to them, so the actual alignment needed is 4K.
962fccb228SAntonio Nino Diaz     *
972fccb228SAntonio Nino Diaz     * There's no need to include this into the RO section of BL31 because it
982fccb228SAntonio Nino Diaz     * doesn't need to be accessed by BL31.
992fccb228SAntonio Nino Diaz     */
100a2aedac2SAntonio Nino Diaz    spm_shim_exceptions : ALIGN(PAGE_SIZE) {
1012fccb228SAntonio Nino Diaz        __SPM_SHIM_EXCEPTIONS_START__ = .;
1022fccb228SAntonio Nino Diaz        *(.spm_shim_exceptions)
1035629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
1042fccb228SAntonio Nino Diaz        __SPM_SHIM_EXCEPTIONS_END__ = .;
10532e83537SArd Biesheuvel    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
10632e83537SArd Biesheuvel
10732e83537SArd Biesheuvel    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
10832e83537SArd Biesheuvel    . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
1092fccb228SAntonio Nino Diaz#endif
1102fccb228SAntonio Nino Diaz
11154dc71e7SAchin Gupta    /*
11254dc71e7SAchin Gupta     * Define a linker symbol to mark start of the RW memory area for this
11354dc71e7SAchin Gupta     * image.
11454dc71e7SAchin Gupta     */
11554dc71e7SAchin Gupta    __RW_START__ = . ;
11654dc71e7SAchin Gupta
117*caa3e7e0SMasahiro Yamada    DATA_SECTION >RAM
1188d69a03fSSandrine Bailleux
119931f7c61SSoby Mathew    /*
120931f7c61SSoby Mathew     * .rela.dyn needs to come after .data for the read-elf utility to parse
1215bfac4fcSSoby Mathew     * this section correctly. Ensure 8-byte alignment so that the fields of
1225bfac4fcSSoby Mathew     * RELA data structure are aligned.
123931f7c61SSoby Mathew     */
1245bfac4fcSSoby Mathew    . = ALIGN(8);
125931f7c61SSoby Mathew    __RELA_START__ = .;
126931f7c61SSoby Mathew    .rela.dyn . : {
127931f7c61SSoby Mathew    } >RAM
128931f7c61SSoby Mathew    __RELA_END__ = .;
129931f7c61SSoby Mathew
130a1b6db6cSSandrine Bailleux#ifdef BL31_PROGBITS_LIMIT
131d178637dSJuan Castillo    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
132a1b6db6cSSandrine Bailleux#endif
133a1b6db6cSSandrine Bailleux
134f8578e64SSamuel Holland#if SEPARATE_NOBITS_REGION
135f8578e64SSamuel Holland    /*
136f8578e64SSamuel Holland     * Define a linker symbol to mark end of the RW memory area for this
137f8578e64SSamuel Holland     * image.
138f8578e64SSamuel Holland     */
139c367b75eSMadhukar Pappireddy    . = ALIGN(PAGE_SIZE);
140f8578e64SSamuel Holland    __RW_END__ = .;
141f8578e64SSamuel Holland    __BL31_END__ = .;
142f8578e64SSamuel Holland
143f8578e64SSamuel Holland    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
144f8578e64SSamuel Holland
145f8578e64SSamuel Holland    . = BL31_NOBITS_BASE;
146f8578e64SSamuel Holland    ASSERT(. == ALIGN(PAGE_SIZE),
147f8578e64SSamuel Holland           "BL31 NOBITS base address is not aligned on a page boundary.")
148f8578e64SSamuel Holland
149f8578e64SSamuel Holland    __NOBITS_START__ = .;
150f8578e64SSamuel Holland#endif
151f8578e64SSamuel Holland
152a926a9f6SMasahiro Yamada    STACK_SECTION >NOBITS
153a7739bc7SMasahiro Yamada    BSS_SECTION >NOBITS
154665e71b8SMasahiro Yamada    XLAT_TABLE_SECTION >NOBITS
155a0cd989dSAchin Gupta
156ab8707e6SSoby Mathew#if USE_COHERENT_MEM
157a0cd989dSAchin Gupta    /*
1588d69a03fSSandrine Bailleux     * The base address of the coherent memory section must be page-aligned (4K)
1598d69a03fSSandrine Bailleux     * to guarantee that the coherent data are stored on their own pages and
1608d69a03fSSandrine Bailleux     * are not mixed with normal data.  This is required to set up the correct
1618d69a03fSSandrine Bailleux     * memory attributes for the coherent data page tables.
1628d69a03fSSandrine Bailleux     */
163a2aedac2SAntonio Nino Diaz    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
1648d69a03fSSandrine Bailleux        __COHERENT_RAM_START__ = .;
165ee7b35c4SAndrew Thoelke        /*
166ee7b35c4SAndrew Thoelke         * Bakery locks are stored in coherent memory
167ee7b35c4SAndrew Thoelke         *
168ee7b35c4SAndrew Thoelke         * Each lock's data is contiguous and fully allocated by the compiler
169ee7b35c4SAndrew Thoelke         */
170ee7b35c4SAndrew Thoelke        *(bakery_lock)
1718d69a03fSSandrine Bailleux        *(tzfw_coherent_mem)
1728d69a03fSSandrine Bailleux        __COHERENT_RAM_END_UNALIGNED__ = .;
1738d69a03fSSandrine Bailleux        /*
1748d69a03fSSandrine Bailleux         * Memory page(s) mapped to this section will be marked
1758d69a03fSSandrine Bailleux         * as device memory.  No other unexpected data must creep in.
1768d69a03fSSandrine Bailleux         * Ensure the rest of the current memory page is unused.
1778d69a03fSSandrine Bailleux         */
1785629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
1798d69a03fSSandrine Bailleux        __COHERENT_RAM_END__ = .;
180f8578e64SSamuel Holland    } >NOBITS
181ab8707e6SSoby Mathew#endif
1824f6ad66aSAchin Gupta
183f8578e64SSamuel Holland#if SEPARATE_NOBITS_REGION
184f8578e64SSamuel Holland    /*
185f8578e64SSamuel Holland     * Define a linker symbol to mark end of the NOBITS memory area for this
186f8578e64SSamuel Holland     * image.
187f8578e64SSamuel Holland     */
188f8578e64SSamuel Holland    __NOBITS_END__ = .;
189f8578e64SSamuel Holland
190f8578e64SSamuel Holland    ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
191f8578e64SSamuel Holland#else
19254dc71e7SAchin Gupta    /*
19354dc71e7SAchin Gupta     * Define a linker symbol to mark end of the RW memory area for this
19454dc71e7SAchin Gupta     * image.
19554dc71e7SAchin Gupta     */
19654dc71e7SAchin Gupta    __RW_END__ = .;
1978d69a03fSSandrine Bailleux    __BL31_END__ = .;
1984f6ad66aSAchin Gupta
199511046eaSMasahiro Yamada    /DISCARD/ : {
200511046eaSMasahiro Yamada        *(.dynsym .dynstr .hash .gnu.hash)
201511046eaSMasahiro Yamada    }
202511046eaSMasahiro Yamada
203d178637dSJuan Castillo    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
204f8578e64SSamuel Holland#endif
2054f6ad66aSAchin Gupta}
206