xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision 2fe75a2de087ec23162c5fd25ba439bd330ea50c)
1/*
2 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <lib/xlat_tables/xlat_tables_defs.h>
10
11OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
12OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
13ENTRY(bl31_entrypoint)
14
15
16MEMORY {
17    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
18#if SEPARATE_NOBITS_REGION
19    NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
20#else
21#define NOBITS RAM
22#endif
23}
24
25#ifdef PLAT_EXTRA_LD_SCRIPT
26#include <plat.ld.S>
27#endif
28
29SECTIONS
30{
31    . = BL31_BASE;
32    ASSERT(. == ALIGN(PAGE_SIZE),
33           "BL31_BASE address is not aligned on a page boundary.")
34
35    __BL31_START__ = .;
36
37#if SEPARATE_CODE_AND_RODATA
38    .text . : {
39        __TEXT_START__ = .;
40        *bl31_entrypoint.o(.text*)
41        *(SORT_BY_ALIGNMENT(.text*))
42        *(.vectors)
43        . = ALIGN(PAGE_SIZE);
44        __TEXT_END__ = .;
45    } >RAM
46
47    .rodata . : {
48        __RODATA_START__ = .;
49        *(SORT_BY_ALIGNMENT(.rodata*))
50
51        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
52        . = ALIGN(8);
53        __RT_SVC_DESCS_START__ = .;
54        KEEP(*(rt_svc_descs))
55        __RT_SVC_DESCS_END__ = .;
56
57#if ENABLE_PMF
58        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
59        . = ALIGN(8);
60        __PMF_SVC_DESCS_START__ = .;
61        KEEP(*(pmf_svc_descs))
62        __PMF_SVC_DESCS_END__ = .;
63#endif /* ENABLE_PMF */
64
65        /*
66         * Ensure 8-byte alignment for cpu_ops so that its fields are also
67         * aligned. Also ensure cpu_ops inclusion.
68         */
69        . = ALIGN(8);
70        __CPU_OPS_START__ = .;
71        KEEP(*(cpu_ops))
72        __CPU_OPS_END__ = .;
73
74        /*
75         * Keep the .got section in the RO section as it is patched
76         * prior to enabling the MMU and having the .got in RO is better for
77         * security. GOT is a table of addresses so ensure 8-byte alignment.
78         */
79        . = ALIGN(8);
80        __GOT_START__ = .;
81        *(.got)
82        __GOT_END__ = .;
83
84        /* Place pubsub sections for events */
85        . = ALIGN(8);
86#include <lib/el3_runtime/pubsub_events.h>
87
88        . = ALIGN(PAGE_SIZE);
89        __RODATA_END__ = .;
90    } >RAM
91#else
92    ro . : {
93        __RO_START__ = .;
94        *bl31_entrypoint.o(.text*)
95        *(SORT_BY_ALIGNMENT(.text*))
96        *(SORT_BY_ALIGNMENT(.rodata*))
97
98        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
99        . = ALIGN(8);
100        __RT_SVC_DESCS_START__ = .;
101        KEEP(*(rt_svc_descs))
102        __RT_SVC_DESCS_END__ = .;
103
104#if ENABLE_PMF
105        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
106        . = ALIGN(8);
107        __PMF_SVC_DESCS_START__ = .;
108        KEEP(*(pmf_svc_descs))
109        __PMF_SVC_DESCS_END__ = .;
110#endif /* ENABLE_PMF */
111
112        /*
113         * Ensure 8-byte alignment for cpu_ops so that its fields are also
114         * aligned. Also ensure cpu_ops inclusion.
115         */
116        . = ALIGN(8);
117        __CPU_OPS_START__ = .;
118        KEEP(*(cpu_ops))
119        __CPU_OPS_END__ = .;
120
121        /*
122         * Keep the .got section in the RO section as it is patched
123         * prior to enabling the MMU and having the .got in RO is better for
124         * security. GOT is a table of addresses so ensure 8-byte alignment.
125         */
126        . = ALIGN(8);
127        __GOT_START__ = .;
128        *(.got)
129        __GOT_END__ = .;
130
131        /* Place pubsub sections for events */
132        . = ALIGN(8);
133#include <lib/el3_runtime/pubsub_events.h>
134
135        *(.vectors)
136        __RO_END_UNALIGNED__ = .;
137        /*
138         * Memory page(s) mapped to this section will be marked as read-only,
139         * executable.  No RW data from the next section must creep in.
140         * Ensure the rest of the current memory page is unused.
141         */
142        . = ALIGN(PAGE_SIZE);
143        __RO_END__ = .;
144    } >RAM
145#endif
146
147    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
148           "cpu_ops not defined for this platform.")
149
150#if SPM_MM
151#ifndef SPM_SHIM_EXCEPTIONS_VMA
152#define SPM_SHIM_EXCEPTIONS_VMA         RAM
153#endif
154
155    /*
156     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
157     * address, but we need to place them in a separate page so that we can set
158     * individual permissions to them, so the actual alignment needed is 4K.
159     *
160     * There's no need to include this into the RO section of BL31 because it
161     * doesn't need to be accessed by BL31.
162     */
163    spm_shim_exceptions : ALIGN(PAGE_SIZE) {
164        __SPM_SHIM_EXCEPTIONS_START__ = .;
165        *(.spm_shim_exceptions)
166        . = ALIGN(PAGE_SIZE);
167        __SPM_SHIM_EXCEPTIONS_END__ = .;
168    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
169
170    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
171    . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
172#endif
173
174    /*
175     * Define a linker symbol to mark start of the RW memory area for this
176     * image.
177     */
178    __RW_START__ = . ;
179
180    /*
181     * .data must be placed at a lower address than the stacks if the stack
182     * protector is enabled. Alternatively, the .data.stack_protector_canary
183     * section can be placed independently of the main .data section.
184     */
185   .data . : {
186        __DATA_START__ = .;
187        *(SORT_BY_ALIGNMENT(.data*))
188        __DATA_END__ = .;
189    } >RAM
190
191    /*
192     * .rela.dyn needs to come after .data for the read-elf utility to parse
193     * this section correctly. Ensure 8-byte alignment so that the fields of
194     * RELA data structure are aligned.
195     */
196    . = ALIGN(8);
197    __RELA_START__ = .;
198    .rela.dyn . : {
199    } >RAM
200    __RELA_END__ = .;
201
202#ifdef BL31_PROGBITS_LIMIT
203    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
204#endif
205
206#if SEPARATE_NOBITS_REGION
207    /*
208     * Define a linker symbol to mark end of the RW memory area for this
209     * image.
210     */
211    . = ALIGN(PAGE_SIZE);
212    __RW_END__ = .;
213    __BL31_END__ = .;
214
215    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
216
217    . = BL31_NOBITS_BASE;
218    ASSERT(. == ALIGN(PAGE_SIZE),
219           "BL31 NOBITS base address is not aligned on a page boundary.")
220
221    __NOBITS_START__ = .;
222#endif
223
224    stacks (NOLOAD) : {
225        __STACKS_START__ = .;
226        *(tzfw_normal_stacks)
227        __STACKS_END__ = .;
228    } >NOBITS
229
230    /*
231     * The .bss section gets initialised to 0 at runtime.
232     * Its base address should be 16-byte aligned for better performance of the
233     * zero-initialization code.
234     */
235    .bss (NOLOAD) : ALIGN(16) {
236        __BSS_START__ = .;
237        *(SORT_BY_ALIGNMENT(.bss*))
238        *(COMMON)
239#if !USE_COHERENT_MEM
240        /*
241         * Bakery locks are stored in normal .bss memory
242         *
243         * Each lock's data is spread across multiple cache lines, one per CPU,
244         * but multiple locks can share the same cache line.
245         * The compiler will allocate enough memory for one CPU's bakery locks,
246         * the remaining cache lines are allocated by the linker script
247         */
248        . = ALIGN(CACHE_WRITEBACK_GRANULE);
249        __BAKERY_LOCK_START__ = .;
250        __PERCPU_BAKERY_LOCK_START__ = .;
251        *(bakery_lock)
252        . = ALIGN(CACHE_WRITEBACK_GRANULE);
253        __PERCPU_BAKERY_LOCK_END__ = .;
254        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__);
255        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
256        __BAKERY_LOCK_END__ = .;
257
258	/*
259	 * If BL31 doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
260	 * will be zero. For this reason, the only two valid values for
261	 * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
262	 * PLAT_PERCPU_BAKERY_LOCK_SIZE.
263	 */
264#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
265    ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE),
266        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
267#endif
268#endif
269
270#if ENABLE_PMF
271        /*
272         * Time-stamps are stored in normal .bss memory
273         *
274         * The compiler will allocate enough memory for one CPU's time-stamps,
275         * the remaining memory for other CPUs is allocated by the
276         * linker script
277         */
278        . = ALIGN(CACHE_WRITEBACK_GRANULE);
279        __PMF_TIMESTAMP_START__ = .;
280        KEEP(*(pmf_timestamp_array))
281        . = ALIGN(CACHE_WRITEBACK_GRANULE);
282        __PMF_PERCPU_TIMESTAMP_END__ = .;
283        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
284        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
285        __PMF_TIMESTAMP_END__ = .;
286#endif /* ENABLE_PMF */
287        __BSS_END__ = .;
288    } >NOBITS
289
290    /*
291     * The xlat_table section is for full, aligned page tables (4K).
292     * Removing them from .bss avoids forcing 4K alignment on
293     * the .bss section. The tables are initialized to zero by the translation
294     * tables library.
295     */
296    xlat_table (NOLOAD) : {
297        *(xlat_table)
298    } >NOBITS
299
300#if USE_COHERENT_MEM
301    /*
302     * The base address of the coherent memory section must be page-aligned (4K)
303     * to guarantee that the coherent data are stored on their own pages and
304     * are not mixed with normal data.  This is required to set up the correct
305     * memory attributes for the coherent data page tables.
306     */
307    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
308        __COHERENT_RAM_START__ = .;
309        /*
310         * Bakery locks are stored in coherent memory
311         *
312         * Each lock's data is contiguous and fully allocated by the compiler
313         */
314        *(bakery_lock)
315        *(tzfw_coherent_mem)
316        __COHERENT_RAM_END_UNALIGNED__ = .;
317        /*
318         * Memory page(s) mapped to this section will be marked
319         * as device memory.  No other unexpected data must creep in.
320         * Ensure the rest of the current memory page is unused.
321         */
322        . = ALIGN(PAGE_SIZE);
323        __COHERENT_RAM_END__ = .;
324    } >NOBITS
325#endif
326
327#if SEPARATE_NOBITS_REGION
328    /*
329     * Define a linker symbol to mark end of the NOBITS memory area for this
330     * image.
331     */
332    __NOBITS_END__ = .;
333
334    ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
335#else
336    /*
337     * Define a linker symbol to mark end of the RW memory area for this
338     * image.
339     */
340    __RW_END__ = .;
341    __BL31_END__ = .;
342
343    /DISCARD/ : {
344        *(.dynsym .dynstr .hash .gnu.hash)
345    }
346
347    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
348#endif
349}
350