xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision 26d1e0c330981505315408c2537b87854d15d720)
1/*
2 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <lib/xlat_tables/xlat_tables_defs.h>
10
11OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
12OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
13ENTRY(bl31_entrypoint)
14
15
16MEMORY {
17    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
18#if SEPARATE_NOBITS_REGION
19    NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
20#else
21#define NOBITS RAM
22#endif
23}
24
25#ifdef PLAT_EXTRA_LD_SCRIPT
26#include <plat.ld.S>
27#endif
28
29SECTIONS
30{
31    . = BL31_BASE;
32    ASSERT(. == ALIGN(PAGE_SIZE),
33           "BL31_BASE address is not aligned on a page boundary.")
34
35    __BL31_START__ = .;
36
37#if SEPARATE_CODE_AND_RODATA
38    .text . : {
39        __TEXT_START__ = .;
40        *bl31_entrypoint.o(.text*)
41        *(SORT_BY_ALIGNMENT(.text*))
42        *(.vectors)
43        . = ALIGN(PAGE_SIZE);
44        __TEXT_END__ = .;
45    } >RAM
46
47    .rodata . : {
48        __RODATA_START__ = .;
49        *(SORT_BY_ALIGNMENT(.rodata*))
50
51        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
52        . = ALIGN(8);
53        __RT_SVC_DESCS_START__ = .;
54        KEEP(*(rt_svc_descs))
55        __RT_SVC_DESCS_END__ = .;
56
57        . = ALIGN(8);
58         __FCONF_POPULATOR_START__ = .;
59        KEEP(*(.fconf_populator))
60         __FCONF_POPULATOR_END__ = .;
61
62#if ENABLE_PMF
63        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
64        . = ALIGN(8);
65        __PMF_SVC_DESCS_START__ = .;
66        KEEP(*(pmf_svc_descs))
67        __PMF_SVC_DESCS_END__ = .;
68#endif /* ENABLE_PMF */
69
70        /*
71         * Ensure 8-byte alignment for cpu_ops so that its fields are also
72         * aligned. Also ensure cpu_ops inclusion.
73         */
74        . = ALIGN(8);
75        __CPU_OPS_START__ = .;
76        KEEP(*(cpu_ops))
77        __CPU_OPS_END__ = .;
78
79        /*
80         * Keep the .got section in the RO section as it is patched
81         * prior to enabling the MMU and having the .got in RO is better for
82         * security. GOT is a table of addresses so ensure 8-byte alignment.
83         */
84        . = ALIGN(8);
85        __GOT_START__ = .;
86        *(.got)
87        __GOT_END__ = .;
88
89        /* Place pubsub sections for events */
90        . = ALIGN(8);
91#include <lib/el3_runtime/pubsub_events.h>
92
93        . = ALIGN(PAGE_SIZE);
94        __RODATA_END__ = .;
95    } >RAM
96#else
97    ro . : {
98        __RO_START__ = .;
99        *bl31_entrypoint.o(.text*)
100        *(SORT_BY_ALIGNMENT(.text*))
101        *(SORT_BY_ALIGNMENT(.rodata*))
102
103        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
104        . = ALIGN(8);
105        __RT_SVC_DESCS_START__ = .;
106        KEEP(*(rt_svc_descs))
107        __RT_SVC_DESCS_END__ = .;
108
109        . = ALIGN(8);
110         __FCONF_POPULATOR_START__ = .;
111        KEEP(*(.fconf_populator))
112         __FCONF_POPULATOR_END__ = .;
113
114#if ENABLE_PMF
115        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
116        . = ALIGN(8);
117        __PMF_SVC_DESCS_START__ = .;
118        KEEP(*(pmf_svc_descs))
119        __PMF_SVC_DESCS_END__ = .;
120#endif /* ENABLE_PMF */
121
122        /*
123         * Ensure 8-byte alignment for cpu_ops so that its fields are also
124         * aligned. Also ensure cpu_ops inclusion.
125         */
126        . = ALIGN(8);
127        __CPU_OPS_START__ = .;
128        KEEP(*(cpu_ops))
129        __CPU_OPS_END__ = .;
130
131        /*
132         * Keep the .got section in the RO section as it is patched
133         * prior to enabling the MMU and having the .got in RO is better for
134         * security. GOT is a table of addresses so ensure 8-byte alignment.
135         */
136        . = ALIGN(8);
137        __GOT_START__ = .;
138        *(.got)
139        __GOT_END__ = .;
140
141        /* Place pubsub sections for events */
142        . = ALIGN(8);
143#include <lib/el3_runtime/pubsub_events.h>
144
145        *(.vectors)
146        __RO_END_UNALIGNED__ = .;
147        /*
148         * Memory page(s) mapped to this section will be marked as read-only,
149         * executable.  No RW data from the next section must creep in.
150         * Ensure the rest of the current memory page is unused.
151         */
152        . = ALIGN(PAGE_SIZE);
153        __RO_END__ = .;
154    } >RAM
155#endif
156
157    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
158           "cpu_ops not defined for this platform.")
159
160#if SPM_MM
161#ifndef SPM_SHIM_EXCEPTIONS_VMA
162#define SPM_SHIM_EXCEPTIONS_VMA         RAM
163#endif
164
165    /*
166     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
167     * address, but we need to place them in a separate page so that we can set
168     * individual permissions to them, so the actual alignment needed is 4K.
169     *
170     * There's no need to include this into the RO section of BL31 because it
171     * doesn't need to be accessed by BL31.
172     */
173    spm_shim_exceptions : ALIGN(PAGE_SIZE) {
174        __SPM_SHIM_EXCEPTIONS_START__ = .;
175        *(.spm_shim_exceptions)
176        . = ALIGN(PAGE_SIZE);
177        __SPM_SHIM_EXCEPTIONS_END__ = .;
178    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
179
180    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
181    . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
182#endif
183
184    /*
185     * Define a linker symbol to mark start of the RW memory area for this
186     * image.
187     */
188    __RW_START__ = . ;
189
190    /*
191     * .data must be placed at a lower address than the stacks if the stack
192     * protector is enabled. Alternatively, the .data.stack_protector_canary
193     * section can be placed independently of the main .data section.
194     */
195   .data . : {
196        __DATA_START__ = .;
197        *(SORT_BY_ALIGNMENT(.data*))
198        __DATA_END__ = .;
199    } >RAM
200
201    /*
202     * .rela.dyn needs to come after .data for the read-elf utility to parse
203     * this section correctly. Ensure 8-byte alignment so that the fields of
204     * RELA data structure are aligned.
205     */
206    . = ALIGN(8);
207    __RELA_START__ = .;
208    .rela.dyn . : {
209    } >RAM
210    __RELA_END__ = .;
211
212#ifdef BL31_PROGBITS_LIMIT
213    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
214#endif
215
216#if SEPARATE_NOBITS_REGION
217    /*
218     * Define a linker symbol to mark end of the RW memory area for this
219     * image.
220     */
221    . = ALIGN(PAGE_SIZE);
222    __RW_END__ = .;
223    __BL31_END__ = .;
224
225    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
226
227    . = BL31_NOBITS_BASE;
228    ASSERT(. == ALIGN(PAGE_SIZE),
229           "BL31 NOBITS base address is not aligned on a page boundary.")
230
231    __NOBITS_START__ = .;
232#endif
233
234    stacks (NOLOAD) : {
235        __STACKS_START__ = .;
236        *(tzfw_normal_stacks)
237        __STACKS_END__ = .;
238    } >NOBITS
239
240    /*
241     * The .bss section gets initialised to 0 at runtime.
242     * Its base address should be 16-byte aligned for better performance of the
243     * zero-initialization code.
244     */
245    .bss (NOLOAD) : ALIGN(16) {
246        __BSS_START__ = .;
247        *(SORT_BY_ALIGNMENT(.bss*))
248        *(COMMON)
249#if !USE_COHERENT_MEM
250        /*
251         * Bakery locks are stored in normal .bss memory
252         *
253         * Each lock's data is spread across multiple cache lines, one per CPU,
254         * but multiple locks can share the same cache line.
255         * The compiler will allocate enough memory for one CPU's bakery locks,
256         * the remaining cache lines are allocated by the linker script
257         */
258        . = ALIGN(CACHE_WRITEBACK_GRANULE);
259        __BAKERY_LOCK_START__ = .;
260        __PERCPU_BAKERY_LOCK_START__ = .;
261        *(bakery_lock)
262        . = ALIGN(CACHE_WRITEBACK_GRANULE);
263        __PERCPU_BAKERY_LOCK_END__ = .;
264        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__);
265        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
266        __BAKERY_LOCK_END__ = .;
267
268	/*
269	 * If BL31 doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
270	 * will be zero. For this reason, the only two valid values for
271	 * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
272	 * PLAT_PERCPU_BAKERY_LOCK_SIZE.
273	 */
274#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
275    ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE),
276        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
277#endif
278#endif
279
280#if ENABLE_PMF
281        /*
282         * Time-stamps are stored in normal .bss memory
283         *
284         * The compiler will allocate enough memory for one CPU's time-stamps,
285         * the remaining memory for other CPUs is allocated by the
286         * linker script
287         */
288        . = ALIGN(CACHE_WRITEBACK_GRANULE);
289        __PMF_TIMESTAMP_START__ = .;
290        KEEP(*(pmf_timestamp_array))
291        . = ALIGN(CACHE_WRITEBACK_GRANULE);
292        __PMF_PERCPU_TIMESTAMP_END__ = .;
293        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
294        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
295        __PMF_TIMESTAMP_END__ = .;
296#endif /* ENABLE_PMF */
297        __BSS_END__ = .;
298    } >NOBITS
299
300    /*
301     * The xlat_table section is for full, aligned page tables (4K).
302     * Removing them from .bss avoids forcing 4K alignment on
303     * the .bss section. The tables are initialized to zero by the translation
304     * tables library.
305     */
306    xlat_table (NOLOAD) : {
307        *(xlat_table)
308    } >NOBITS
309
310#if USE_COHERENT_MEM
311    /*
312     * The base address of the coherent memory section must be page-aligned (4K)
313     * to guarantee that the coherent data are stored on their own pages and
314     * are not mixed with normal data.  This is required to set up the correct
315     * memory attributes for the coherent data page tables.
316     */
317    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
318        __COHERENT_RAM_START__ = .;
319        /*
320         * Bakery locks are stored in coherent memory
321         *
322         * Each lock's data is contiguous and fully allocated by the compiler
323         */
324        *(bakery_lock)
325        *(tzfw_coherent_mem)
326        __COHERENT_RAM_END_UNALIGNED__ = .;
327        /*
328         * Memory page(s) mapped to this section will be marked
329         * as device memory.  No other unexpected data must creep in.
330         * Ensure the rest of the current memory page is unused.
331         */
332        . = ALIGN(PAGE_SIZE);
333        __COHERENT_RAM_END__ = .;
334    } >NOBITS
335#endif
336
337#if SEPARATE_NOBITS_REGION
338    /*
339     * Define a linker symbol to mark end of the NOBITS memory area for this
340     * image.
341     */
342    __NOBITS_END__ = .;
343
344    ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
345#else
346    /*
347     * Define a linker symbol to mark end of the RW memory area for this
348     * image.
349     */
350    __RW_END__ = .;
351    __BL31_END__ = .;
352
353    /DISCARD/ : {
354        *(.dynsym .dynstr .hash .gnu.hash)
355    }
356
357    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
358#endif
359}
360