xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision a926a9f60aa94a034b0a06eed296996363245d30)
14f6ad66aSAchin Gupta/*
2c367b75eSMadhukar Pappireddy * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
34f6ad66aSAchin Gupta *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
54f6ad66aSAchin Gupta */
64f6ad66aSAchin Gupta
7665e71b8SMasahiro Yamada#include <common/bl_common.ld.h>
809d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h>
94f6ad66aSAchin Gupta
104f6ad66aSAchin GuptaOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
114f6ad66aSAchin GuptaOUTPUT_ARCH(PLATFORM_LINKER_ARCH)
129f98aa1aSJeenu ViswambharanENTRY(bl31_entrypoint)
134f6ad66aSAchin Gupta
144f6ad66aSAchin Gupta
154f6ad66aSAchin GuptaMEMORY {
16d7fbf132SJuan Castillo    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
17f8578e64SSamuel Holland#if SEPARATE_NOBITS_REGION
18f8578e64SSamuel Holland    NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
19f8578e64SSamuel Holland#else
20f8578e64SSamuel Holland#define NOBITS RAM
21f8578e64SSamuel Holland#endif
224f6ad66aSAchin Gupta}
234f6ad66aSAchin Gupta
24ec693569SCaesar Wang#ifdef PLAT_EXTRA_LD_SCRIPT
25ec693569SCaesar Wang#include <plat.ld.S>
26ec693569SCaesar Wang#endif
274f6ad66aSAchin Gupta
284f6ad66aSAchin GuptaSECTIONS
294f6ad66aSAchin Gupta{
304f6ad66aSAchin Gupta    . = BL31_BASE;
31a2aedac2SAntonio Nino Diaz    ASSERT(. == ALIGN(PAGE_SIZE),
328d69a03fSSandrine Bailleux           "BL31_BASE address is not aligned on a page boundary.")
334f6ad66aSAchin Gupta
34931f7c61SSoby Mathew    __BL31_START__ = .;
35931f7c61SSoby Mathew
365d1c104fSSandrine Bailleux#if SEPARATE_CODE_AND_RODATA
375d1c104fSSandrine Bailleux    .text . : {
385d1c104fSSandrine Bailleux        __TEXT_START__ = .;
395d1c104fSSandrine Bailleux        *bl31_entrypoint.o(.text*)
40ebd6efaeSSamuel Holland        *(SORT_BY_ALIGNMENT(.text*))
415d1c104fSSandrine Bailleux        *(.vectors)
425629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
435d1c104fSSandrine Bailleux        __TEXT_END__ = .;
445d1c104fSSandrine Bailleux    } >RAM
455d1c104fSSandrine Bailleux
465d1c104fSSandrine Bailleux    .rodata . : {
475d1c104fSSandrine Bailleux        __RODATA_START__ = .;
48ebd6efaeSSamuel Holland        *(SORT_BY_ALIGNMENT(.rodata*))
495d1c104fSSandrine Bailleux
500a0a7a9aSMasahiro Yamada	RODATA_COMMON
51931f7c61SSoby Mathew
528e743bcdSJeenu Viswambharan        /* Place pubsub sections for events */
538e743bcdSJeenu Viswambharan        . = ALIGN(8);
5409d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/pubsub_events.h>
558e743bcdSJeenu Viswambharan
565629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
575d1c104fSSandrine Bailleux        __RODATA_END__ = .;
585d1c104fSSandrine Bailleux    } >RAM
595d1c104fSSandrine Bailleux#else
608d69a03fSSandrine Bailleux    ro . : {
618d69a03fSSandrine Bailleux        __RO_START__ = .;
62dccc537aSAndrew Thoelke        *bl31_entrypoint.o(.text*)
63ebd6efaeSSamuel Holland        *(SORT_BY_ALIGNMENT(.text*))
64ebd6efaeSSamuel Holland        *(SORT_BY_ALIGNMENT(.rodata*))
657421b465SAchin Gupta
660a0a7a9aSMasahiro Yamada	RODATA_COMMON
675bfac4fcSSoby Mathew
688e743bcdSJeenu Viswambharan        /* Place pubsub sections for events */
698e743bcdSJeenu Viswambharan        . = ALIGN(8);
7009d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/pubsub_events.h>
718e743bcdSJeenu Viswambharan
72b739f22aSAchin Gupta        *(.vectors)
738d69a03fSSandrine Bailleux        __RO_END_UNALIGNED__ = .;
748d69a03fSSandrine Bailleux        /*
758d69a03fSSandrine Bailleux         * Memory page(s) mapped to this section will be marked as read-only,
768d69a03fSSandrine Bailleux         * executable.  No RW data from the next section must creep in.
778d69a03fSSandrine Bailleux         * Ensure the rest of the current memory page is unused.
788d69a03fSSandrine Bailleux         */
795629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
808d69a03fSSandrine Bailleux        __RO_END__ = .;
814f6ad66aSAchin Gupta    } >RAM
825d1c104fSSandrine Bailleux#endif
834f6ad66aSAchin Gupta
849b476841SSoby Mathew    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
859b476841SSoby Mathew           "cpu_ops not defined for this platform.")
869b476841SSoby Mathew
87538b0020SPaul Beesley#if SPM_MM
8832e83537SArd Biesheuvel#ifndef SPM_SHIM_EXCEPTIONS_VMA
8932e83537SArd Biesheuvel#define SPM_SHIM_EXCEPTIONS_VMA         RAM
9032e83537SArd Biesheuvel#endif
9132e83537SArd Biesheuvel
922fccb228SAntonio Nino Diaz    /*
932fccb228SAntonio Nino Diaz     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
942fccb228SAntonio Nino Diaz     * address, but we need to place them in a separate page so that we can set
952fccb228SAntonio Nino Diaz     * individual permissions to them, so the actual alignment needed is 4K.
962fccb228SAntonio Nino Diaz     *
972fccb228SAntonio Nino Diaz     * There's no need to include this into the RO section of BL31 because it
982fccb228SAntonio Nino Diaz     * doesn't need to be accessed by BL31.
992fccb228SAntonio Nino Diaz     */
100a2aedac2SAntonio Nino Diaz    spm_shim_exceptions : ALIGN(PAGE_SIZE) {
1012fccb228SAntonio Nino Diaz        __SPM_SHIM_EXCEPTIONS_START__ = .;
1022fccb228SAntonio Nino Diaz        *(.spm_shim_exceptions)
1035629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
1042fccb228SAntonio Nino Diaz        __SPM_SHIM_EXCEPTIONS_END__ = .;
10532e83537SArd Biesheuvel    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
10632e83537SArd Biesheuvel
10732e83537SArd Biesheuvel    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
10832e83537SArd Biesheuvel    . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
1092fccb228SAntonio Nino Diaz#endif
1102fccb228SAntonio Nino Diaz
11154dc71e7SAchin Gupta    /*
11254dc71e7SAchin Gupta     * Define a linker symbol to mark start of the RW memory area for this
11354dc71e7SAchin Gupta     * image.
11454dc71e7SAchin Gupta     */
11554dc71e7SAchin Gupta    __RW_START__ = . ;
11654dc71e7SAchin Gupta
11751faada7SDouglas Raillard    /*
11851faada7SDouglas Raillard     * .data must be placed at a lower address than the stacks if the stack
11951faada7SDouglas Raillard     * protector is enabled. Alternatively, the .data.stack_protector_canary
12051faada7SDouglas Raillard     * section can be placed independently of the main .data section.
12151faada7SDouglas Raillard     */
1228d69a03fSSandrine Bailleux   .data . : {
1238d69a03fSSandrine Bailleux        __DATA_START__ = .;
124ebd6efaeSSamuel Holland        *(SORT_BY_ALIGNMENT(.data*))
1258d69a03fSSandrine Bailleux        __DATA_END__ = .;
1268d69a03fSSandrine Bailleux    } >RAM
1278d69a03fSSandrine Bailleux
128931f7c61SSoby Mathew    /*
129931f7c61SSoby Mathew     * .rela.dyn needs to come after .data for the read-elf utility to parse
1305bfac4fcSSoby Mathew     * this section correctly. Ensure 8-byte alignment so that the fields of
1315bfac4fcSSoby Mathew     * RELA data structure are aligned.
132931f7c61SSoby Mathew     */
1335bfac4fcSSoby Mathew    . = ALIGN(8);
134931f7c61SSoby Mathew    __RELA_START__ = .;
135931f7c61SSoby Mathew    .rela.dyn . : {
136931f7c61SSoby Mathew    } >RAM
137931f7c61SSoby Mathew    __RELA_END__ = .;
138931f7c61SSoby Mathew
139a1b6db6cSSandrine Bailleux#ifdef BL31_PROGBITS_LIMIT
140d178637dSJuan Castillo    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
141a1b6db6cSSandrine Bailleux#endif
142a1b6db6cSSandrine Bailleux
143f8578e64SSamuel Holland#if SEPARATE_NOBITS_REGION
144f8578e64SSamuel Holland    /*
145f8578e64SSamuel Holland     * Define a linker symbol to mark end of the RW memory area for this
146f8578e64SSamuel Holland     * image.
147f8578e64SSamuel Holland     */
148c367b75eSMadhukar Pappireddy    . = ALIGN(PAGE_SIZE);
149f8578e64SSamuel Holland    __RW_END__ = .;
150f8578e64SSamuel Holland    __BL31_END__ = .;
151f8578e64SSamuel Holland
152f8578e64SSamuel Holland    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
153f8578e64SSamuel Holland
154f8578e64SSamuel Holland    . = BL31_NOBITS_BASE;
155f8578e64SSamuel Holland    ASSERT(. == ALIGN(PAGE_SIZE),
156f8578e64SSamuel Holland           "BL31 NOBITS base address is not aligned on a page boundary.")
157f8578e64SSamuel Holland
158f8578e64SSamuel Holland    __NOBITS_START__ = .;
159f8578e64SSamuel Holland#endif
160f8578e64SSamuel Holland
161*a926a9f6SMasahiro Yamada    STACK_SECTION >NOBITS
162a7739bc7SMasahiro Yamada    BSS_SECTION >NOBITS
163665e71b8SMasahiro Yamada    XLAT_TABLE_SECTION >NOBITS
164a0cd989dSAchin Gupta
165ab8707e6SSoby Mathew#if USE_COHERENT_MEM
166a0cd989dSAchin Gupta    /*
1678d69a03fSSandrine Bailleux     * The base address of the coherent memory section must be page-aligned (4K)
1688d69a03fSSandrine Bailleux     * to guarantee that the coherent data are stored on their own pages and
1698d69a03fSSandrine Bailleux     * are not mixed with normal data.  This is required to set up the correct
1708d69a03fSSandrine Bailleux     * memory attributes for the coherent data page tables.
1718d69a03fSSandrine Bailleux     */
172a2aedac2SAntonio Nino Diaz    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
1738d69a03fSSandrine Bailleux        __COHERENT_RAM_START__ = .;
174ee7b35c4SAndrew Thoelke        /*
175ee7b35c4SAndrew Thoelke         * Bakery locks are stored in coherent memory
176ee7b35c4SAndrew Thoelke         *
177ee7b35c4SAndrew Thoelke         * Each lock's data is contiguous and fully allocated by the compiler
178ee7b35c4SAndrew Thoelke         */
179ee7b35c4SAndrew Thoelke        *(bakery_lock)
1808d69a03fSSandrine Bailleux        *(tzfw_coherent_mem)
1818d69a03fSSandrine Bailleux        __COHERENT_RAM_END_UNALIGNED__ = .;
1828d69a03fSSandrine Bailleux        /*
1838d69a03fSSandrine Bailleux         * Memory page(s) mapped to this section will be marked
1848d69a03fSSandrine Bailleux         * as device memory.  No other unexpected data must creep in.
1858d69a03fSSandrine Bailleux         * Ensure the rest of the current memory page is unused.
1868d69a03fSSandrine Bailleux         */
1875629b2b1SRoberto Vargas        . = ALIGN(PAGE_SIZE);
1888d69a03fSSandrine Bailleux        __COHERENT_RAM_END__ = .;
189f8578e64SSamuel Holland    } >NOBITS
190ab8707e6SSoby Mathew#endif
1914f6ad66aSAchin Gupta
192f8578e64SSamuel Holland#if SEPARATE_NOBITS_REGION
193f8578e64SSamuel Holland    /*
194f8578e64SSamuel Holland     * Define a linker symbol to mark end of the NOBITS memory area for this
195f8578e64SSamuel Holland     * image.
196f8578e64SSamuel Holland     */
197f8578e64SSamuel Holland    __NOBITS_END__ = .;
198f8578e64SSamuel Holland
199f8578e64SSamuel Holland    ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
200f8578e64SSamuel Holland#else
20154dc71e7SAchin Gupta    /*
20254dc71e7SAchin Gupta     * Define a linker symbol to mark end of the RW memory area for this
20354dc71e7SAchin Gupta     * image.
20454dc71e7SAchin Gupta     */
20554dc71e7SAchin Gupta    __RW_END__ = .;
2068d69a03fSSandrine Bailleux    __BL31_END__ = .;
2074f6ad66aSAchin Gupta
208511046eaSMasahiro Yamada    /DISCARD/ : {
209511046eaSMasahiro Yamada        *(.dynsym .dynstr .hash .gnu.hash)
210511046eaSMasahiro Yamada    }
211511046eaSMasahiro Yamada
212d178637dSJuan Castillo    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
213f8578e64SSamuel Holland#endif
2144f6ad66aSAchin Gupta}
215