xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision f29d1e0c72e6665ba4c8ab11bad83f59669ea0d9)
1/*
2 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <common/bl_common.ld.h>
10#include <lib/xlat_tables/xlat_tables_defs.h>
11
12OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
13OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
14ENTRY(bl31_entrypoint)
15
16
17MEMORY {
18    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
19#if SEPARATE_NOBITS_REGION
20    NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
21#else
22#define NOBITS RAM
23#endif
24}
25
26#ifdef PLAT_EXTRA_LD_SCRIPT
27#include <plat.ld.S>
28#endif
29
30SECTIONS
31{
32    . = BL31_BASE;
33    ASSERT(. == ALIGN(PAGE_SIZE),
34           "BL31_BASE address is not aligned on a page boundary.")
35
36    __BL31_START__ = .;
37
38#if SEPARATE_CODE_AND_RODATA
39    .text . : {
40        __TEXT_START__ = .;
41        *bl31_entrypoint.o(.text*)
42        *(SORT_BY_ALIGNMENT(.text*))
43        *(.vectors)
44        . = ALIGN(PAGE_SIZE);
45        __TEXT_END__ = .;
46    } >RAM
47
48    .rodata . : {
49        __RODATA_START__ = .;
50        *(SORT_BY_ALIGNMENT(.rodata*))
51
52        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
53        . = ALIGN(8);
54        __RT_SVC_DESCS_START__ = .;
55        KEEP(*(rt_svc_descs))
56        __RT_SVC_DESCS_END__ = .;
57
58        . = ALIGN(8);
59         __FCONF_POPULATOR_START__ = .;
60        KEEP(*(.fconf_populator))
61         __FCONF_POPULATOR_END__ = .;
62
63#if ENABLE_PMF
64        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
65        . = ALIGN(8);
66        __PMF_SVC_DESCS_START__ = .;
67        KEEP(*(pmf_svc_descs))
68        __PMF_SVC_DESCS_END__ = .;
69#endif /* ENABLE_PMF */
70
71        /*
72         * Ensure 8-byte alignment for cpu_ops so that its fields are also
73         * aligned. Also ensure cpu_ops inclusion.
74         */
75        . = ALIGN(8);
76        __CPU_OPS_START__ = .;
77        KEEP(*(cpu_ops))
78        __CPU_OPS_END__ = .;
79
80        /*
81         * Keep the .got section in the RO section as it is patched
82         * prior to enabling the MMU and having the .got in RO is better for
83         * security. GOT is a table of addresses so ensure 8-byte alignment.
84         */
85        . = ALIGN(8);
86        __GOT_START__ = .;
87        *(.got)
88        __GOT_END__ = .;
89
90        /* Place pubsub sections for events */
91        . = ALIGN(8);
92#include <lib/el3_runtime/pubsub_events.h>
93
94        . = ALIGN(PAGE_SIZE);
95        __RODATA_END__ = .;
96    } >RAM
97#else
98    ro . : {
99        __RO_START__ = .;
100        *bl31_entrypoint.o(.text*)
101        *(SORT_BY_ALIGNMENT(.text*))
102        *(SORT_BY_ALIGNMENT(.rodata*))
103
104        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
105        . = ALIGN(8);
106        __RT_SVC_DESCS_START__ = .;
107        KEEP(*(rt_svc_descs))
108        __RT_SVC_DESCS_END__ = .;
109
110        . = ALIGN(8);
111         __FCONF_POPULATOR_START__ = .;
112        KEEP(*(.fconf_populator))
113         __FCONF_POPULATOR_END__ = .;
114
115#if ENABLE_PMF
116        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
117        . = ALIGN(8);
118        __PMF_SVC_DESCS_START__ = .;
119        KEEP(*(pmf_svc_descs))
120        __PMF_SVC_DESCS_END__ = .;
121#endif /* ENABLE_PMF */
122
123        /*
124         * Ensure 8-byte alignment for cpu_ops so that its fields are also
125         * aligned. Also ensure cpu_ops inclusion.
126         */
127        . = ALIGN(8);
128        __CPU_OPS_START__ = .;
129        KEEP(*(cpu_ops))
130        __CPU_OPS_END__ = .;
131
132        /*
133         * Keep the .got section in the RO section as it is patched
134         * prior to enabling the MMU and having the .got in RO is better for
135         * security. GOT is a table of addresses so ensure 8-byte alignment.
136         */
137        . = ALIGN(8);
138        __GOT_START__ = .;
139        *(.got)
140        __GOT_END__ = .;
141
142        /* Place pubsub sections for events */
143        . = ALIGN(8);
144#include <lib/el3_runtime/pubsub_events.h>
145
146        *(.vectors)
147        __RO_END_UNALIGNED__ = .;
148        /*
149         * Memory page(s) mapped to this section will be marked as read-only,
150         * executable.  No RW data from the next section must creep in.
151         * Ensure the rest of the current memory page is unused.
152         */
153        . = ALIGN(PAGE_SIZE);
154        __RO_END__ = .;
155    } >RAM
156#endif
157
158    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
159           "cpu_ops not defined for this platform.")
160
161#if SPM_MM
162#ifndef SPM_SHIM_EXCEPTIONS_VMA
163#define SPM_SHIM_EXCEPTIONS_VMA         RAM
164#endif
165
166    /*
167     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
168     * address, but we need to place them in a separate page so that we can set
169     * individual permissions to them, so the actual alignment needed is 4K.
170     *
171     * There's no need to include this into the RO section of BL31 because it
172     * doesn't need to be accessed by BL31.
173     */
174    spm_shim_exceptions : ALIGN(PAGE_SIZE) {
175        __SPM_SHIM_EXCEPTIONS_START__ = .;
176        *(.spm_shim_exceptions)
177        . = ALIGN(PAGE_SIZE);
178        __SPM_SHIM_EXCEPTIONS_END__ = .;
179    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
180
181    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
182    . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
183#endif
184
185    /*
186     * Define a linker symbol to mark start of the RW memory area for this
187     * image.
188     */
189    __RW_START__ = . ;
190
191    /*
192     * .data must be placed at a lower address than the stacks if the stack
193     * protector is enabled. Alternatively, the .data.stack_protector_canary
194     * section can be placed independently of the main .data section.
195     */
196   .data . : {
197        __DATA_START__ = .;
198        *(SORT_BY_ALIGNMENT(.data*))
199        __DATA_END__ = .;
200    } >RAM
201
202    /*
203     * .rela.dyn needs to come after .data for the read-elf utility to parse
204     * this section correctly. Ensure 8-byte alignment so that the fields of
205     * RELA data structure are aligned.
206     */
207    . = ALIGN(8);
208    __RELA_START__ = .;
209    .rela.dyn . : {
210    } >RAM
211    __RELA_END__ = .;
212
213#ifdef BL31_PROGBITS_LIMIT
214    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
215#endif
216
217#if SEPARATE_NOBITS_REGION
218    /*
219     * Define a linker symbol to mark end of the RW memory area for this
220     * image.
221     */
222    . = ALIGN(PAGE_SIZE);
223    __RW_END__ = .;
224    __BL31_END__ = .;
225
226    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
227
228    . = BL31_NOBITS_BASE;
229    ASSERT(. == ALIGN(PAGE_SIZE),
230           "BL31 NOBITS base address is not aligned on a page boundary.")
231
232    __NOBITS_START__ = .;
233#endif
234
235    stacks (NOLOAD) : {
236        __STACKS_START__ = .;
237        *(tzfw_normal_stacks)
238        __STACKS_END__ = .;
239    } >NOBITS
240
241    /*
242     * The .bss section gets initialised to 0 at runtime.
243     * Its base address should be 16-byte aligned for better performance of the
244     * zero-initialization code.
245     */
246    .bss (NOLOAD) : ALIGN(16) {
247        __BSS_START__ = .;
248        *(SORT_BY_ALIGNMENT(.bss*))
249        *(COMMON)
250#if !USE_COHERENT_MEM
251        /*
252         * Bakery locks are stored in normal .bss memory
253         *
254         * Each lock's data is spread across multiple cache lines, one per CPU,
255         * but multiple locks can share the same cache line.
256         * The compiler will allocate enough memory for one CPU's bakery locks,
257         * the remaining cache lines are allocated by the linker script
258         */
259        . = ALIGN(CACHE_WRITEBACK_GRANULE);
260        __BAKERY_LOCK_START__ = .;
261        __PERCPU_BAKERY_LOCK_START__ = .;
262        *(bakery_lock)
263        . = ALIGN(CACHE_WRITEBACK_GRANULE);
264        __PERCPU_BAKERY_LOCK_END__ = .;
265        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__);
266        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
267        __BAKERY_LOCK_END__ = .;
268
269	/*
270	 * If BL31 doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
271	 * will be zero. For this reason, the only two valid values for
272	 * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
273	 * PLAT_PERCPU_BAKERY_LOCK_SIZE.
274	 */
275#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
276    ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE),
277        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
278#endif
279#endif
280
281#if ENABLE_PMF
282        /*
283         * Time-stamps are stored in normal .bss memory
284         *
285         * The compiler will allocate enough memory for one CPU's time-stamps,
286         * the remaining memory for other CPUs is allocated by the
287         * linker script
288         */
289        . = ALIGN(CACHE_WRITEBACK_GRANULE);
290        __PMF_TIMESTAMP_START__ = .;
291        KEEP(*(pmf_timestamp_array))
292        . = ALIGN(CACHE_WRITEBACK_GRANULE);
293        __PMF_PERCPU_TIMESTAMP_END__ = .;
294        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
295        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
296        __PMF_TIMESTAMP_END__ = .;
297#endif /* ENABLE_PMF */
298        __BSS_END__ = .;
299    } >NOBITS
300
301    XLAT_TABLE_SECTION >NOBITS
302
303#if USE_COHERENT_MEM
304    /*
305     * The base address of the coherent memory section must be page-aligned (4K)
306     * to guarantee that the coherent data are stored on their own pages and
307     * are not mixed with normal data.  This is required to set up the correct
308     * memory attributes for the coherent data page tables.
309     */
310    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
311        __COHERENT_RAM_START__ = .;
312        /*
313         * Bakery locks are stored in coherent memory
314         *
315         * Each lock's data is contiguous and fully allocated by the compiler
316         */
317        *(bakery_lock)
318        *(tzfw_coherent_mem)
319        __COHERENT_RAM_END_UNALIGNED__ = .;
320        /*
321         * Memory page(s) mapped to this section will be marked
322         * as device memory.  No other unexpected data must creep in.
323         * Ensure the rest of the current memory page is unused.
324         */
325        . = ALIGN(PAGE_SIZE);
326        __COHERENT_RAM_END__ = .;
327    } >NOBITS
328#endif
329
330#if SEPARATE_NOBITS_REGION
331    /*
332     * Define a linker symbol to mark end of the NOBITS memory area for this
333     * image.
334     */
335    __NOBITS_END__ = .;
336
337    ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
338#else
339    /*
340     * Define a linker symbol to mark end of the RW memory area for this
341     * image.
342     */
343    __RW_END__ = .;
344    __BL31_END__ = .;
345
346    /DISCARD/ : {
347        *(.dynsym .dynstr .hash .gnu.hash)
348    }
349
350    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
351#endif
352}
353