xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision afe9fcc3d262ca279a747c8ab6fa8bacf79c76fb)
1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <common/bl_common.ld.h>
8#include <lib/xlat_tables/xlat_tables_defs.h>
9
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
12ENTRY(bl31_entrypoint)
13
14MEMORY {
15    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
16
17#if SEPARATE_NOBITS_REGION
18    NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
19#else /* SEPARATE_NOBITS_REGION */
20#   define NOBITS RAM
21#endif /* SEPARATE_NOBITS_REGION */
22}
23
24#ifdef PLAT_EXTRA_LD_SCRIPT
25#   include <plat.ld.S>
26#endif /* PLAT_EXTRA_LD_SCRIPT */
27
28SECTIONS {
29    . = BL31_BASE;
30
31    ASSERT(. == ALIGN(PAGE_SIZE),
32        "BL31_BASE address is not aligned on a page boundary.")
33
34    __BL31_START__ = .;
35
36#if SEPARATE_CODE_AND_RODATA
37    .text . : {
38        __TEXT_START__ = .;
39
40        *bl31_entrypoint.o(.text*)
41        *(SORT_BY_ALIGNMENT(SORT(.text*)))
42        *(.vectors)
43
44        . = ALIGN(PAGE_SIZE);
45
46        __TEXT_END__ = .;
47    } >RAM
48
49    .rodata . : {
50        __RODATA_START__ = .;
51
52        *(SORT_BY_ALIGNMENT(.rodata*))
53
54#   if PLAT_EXTRA_RODATA_INCLUDES
55#       include <plat.ld.rodata.inc>
56#   endif /* PLAT_EXTRA_RODATA_INCLUDES */
57
58        RODATA_COMMON
59
60        . = ALIGN(8);
61
62#   include <lib/el3_runtime/pubsub_events.h>
63
64        . = ALIGN(PAGE_SIZE);
65
66        __RODATA_END__ = .;
67    } >RAM
68#else /* SEPARATE_CODE_AND_RODATA */
69    .ro . : {
70        __RO_START__ = .;
71
72        *bl31_entrypoint.o(.text*)
73        *(SORT_BY_ALIGNMENT(.text*))
74        *(SORT_BY_ALIGNMENT(.rodata*))
75
76        RODATA_COMMON
77
78        . = ALIGN(8);
79
80#   include <lib/el3_runtime/pubsub_events.h>
81
82        *(.vectors)
83
84        __RO_END_UNALIGNED__ = .;
85
86        /*
87         * Memory page(s) mapped to this section will be marked as read-only,
88         * executable. No RW data from the next section must creep in. Ensure
89         * that the rest of the current memory page is unused.
90         */
91        . = ALIGN(PAGE_SIZE);
92
93        __RO_END__ = .;
94    } >RAM
95#endif /* SEPARATE_CODE_AND_RODATA */
96
97    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
98        "cpu_ops not defined for this platform.")
99
100#if SPM_MM
101#   ifndef SPM_SHIM_EXCEPTIONS_VMA
102#       define SPM_SHIM_EXCEPTIONS_VMA RAM
103#   endif /* SPM_SHIM_EXCEPTIONS_VMA */
104
105    /*
106     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
107     * address but we need to place them in a separate page so that we can set
108     * individual permissions on them, so the actual alignment needed is the
109     * page size.
110     *
111     * There's no need to include this into the RO section of BL31 because it
112     * doesn't need to be accessed by BL31.
113     */
114    .spm_shim_exceptions : ALIGN(PAGE_SIZE) {
115        __SPM_SHIM_EXCEPTIONS_START__ = .;
116
117        *(.spm_shim_exceptions)
118
119        . = ALIGN(PAGE_SIZE);
120
121        __SPM_SHIM_EXCEPTIONS_END__ = .;
122    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
123
124    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(.spm_shim_exceptions));
125
126    . = LOADADDR(.spm_shim_exceptions) + SIZEOF(.spm_shim_exceptions);
127#endif /* SPM_MM */
128
129    __RW_START__ = .;
130
131    DATA_SECTION >RAM
132    RELA_SECTION >RAM
133
134#ifdef BL31_PROGBITS_LIMIT
135    ASSERT(
136        . <= BL31_PROGBITS_LIMIT,
137        "BL31 progbits has exceeded its limit. Consider disabling some features."
138    )
139#endif /* BL31_PROGBITS_LIMIT */
140
141#if SEPARATE_NOBITS_REGION
142    . = ALIGN(PAGE_SIZE);
143
144    __RW_END__ = .;
145    __BL31_END__ = .;
146
147    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
148
149    . = BL31_NOBITS_BASE;
150
151    ASSERT(. == ALIGN(PAGE_SIZE),
152        "BL31 NOBITS base address is not aligned on a page boundary.")
153
154    __NOBITS_START__ = .;
155#endif /* SEPARATE_NOBITS_REGION */
156
157    STACK_SECTION >NOBITS
158    BSS_SECTION >NOBITS
159    XLAT_TABLE_SECTION >NOBITS
160
161#if USE_COHERENT_MEM
162    /*
163     * The base address of the coherent memory section must be page-aligned to
164     * guarantee that the coherent data are stored on their own pages and are
165     * not mixed with normal data.  This is required to set up the correct
166     * memory attributes for the coherent data page tables.
167     */
168    .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
169        __COHERENT_RAM_START__ = .;
170
171        /*
172         * Bakery locks are stored in coherent memory. Each lock's data is
173         * contiguous and fully allocated by the compiler.
174         */
175        *(.bakery_lock)
176        *(.tzfw_coherent_mem)
177
178        __COHERENT_RAM_END_UNALIGNED__ = .;
179
180        /*
181         * Memory page(s) mapped to this section will be marked as device
182         * memory. No other unexpected data must creep in. Ensure the rest of
183         * the current memory page is unused.
184         */
185        . = ALIGN(PAGE_SIZE);
186
187        __COHERENT_RAM_END__ = .;
188    } >NOBITS
189#endif /* USE_COHERENT_MEM */
190
191#if SEPARATE_NOBITS_REGION
192    __NOBITS_END__ = .;
193
194    ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
195#else /* SEPARATE_NOBITS_REGION */
196    __RW_END__ = .;
197    __BL31_END__ = .;
198
199    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
200#endif /* SEPARATE_NOBITS_REGION */
201
202    /DISCARD/ : {
203        *(.dynsym .dynstr .hash .gnu.hash)
204    }
205}
206