xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision 5b33ad174a03a5ccdcd6321c64d69167361dc21a)
1/*
2 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <lib/xlat_tables/xlat_tables_defs.h>
10
11OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
12OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
13ENTRY(bl31_entrypoint)
14
15
16MEMORY {
17    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
18#if SEPARATE_NOBITS_REGION
19    NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
20#else
21#define NOBITS RAM
22#endif
23}
24
25#ifdef PLAT_EXTRA_LD_SCRIPT
26#include <plat.ld.S>
27#endif
28
29SECTIONS
30{
31    . = BL31_BASE;
32    ASSERT(. == ALIGN(PAGE_SIZE),
33           "BL31_BASE address is not aligned on a page boundary.")
34
35    __BL31_START__ = .;
36
37#if SEPARATE_CODE_AND_RODATA
38    .text . : {
39        __TEXT_START__ = .;
40        *bl31_entrypoint.o(.text*)
41        *(SORT_BY_ALIGNMENT(.text*))
42        *(.vectors)
43        . = ALIGN(PAGE_SIZE);
44        __TEXT_END__ = .;
45    } >RAM
46
47    .rodata . : {
48        __RODATA_START__ = .;
49        *(SORT_BY_ALIGNMENT(.rodata*))
50
51        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
52        . = ALIGN(8);
53        __RT_SVC_DESCS_START__ = .;
54        KEEP(*(rt_svc_descs))
55        __RT_SVC_DESCS_END__ = .;
56
57#if ENABLE_PMF
58        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
59        . = ALIGN(8);
60        __PMF_SVC_DESCS_START__ = .;
61        KEEP(*(pmf_svc_descs))
62        __PMF_SVC_DESCS_END__ = .;
63#endif /* ENABLE_PMF */
64
65        /*
66         * Ensure 8-byte alignment for cpu_ops so that its fields are also
67         * aligned. Also ensure cpu_ops inclusion.
68         */
69        . = ALIGN(8);
70        __CPU_OPS_START__ = .;
71        KEEP(*(cpu_ops))
72        __CPU_OPS_END__ = .;
73
74        /*
75         * Keep the .got section in the RO section as it is patched
76         * prior to enabling the MMU and having the .got in RO is better for
77         * security. GOT is a table of addresses so ensure 8-byte alignment.
78         */
79        . = ALIGN(8);
80        __GOT_START__ = .;
81        *(.got)
82        __GOT_END__ = .;
83
84        /* Place pubsub sections for events */
85        . = ALIGN(8);
86#include <lib/el3_runtime/pubsub_events.h>
87
88        . = ALIGN(PAGE_SIZE);
89        __RODATA_END__ = .;
90    } >RAM
91#else
92    ro . : {
93        __RO_START__ = .;
94        *bl31_entrypoint.o(.text*)
95        *(SORT_BY_ALIGNMENT(.text*))
96        *(SORT_BY_ALIGNMENT(.rodata*))
97
98        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
99        . = ALIGN(8);
100        __RT_SVC_DESCS_START__ = .;
101        KEEP(*(rt_svc_descs))
102        __RT_SVC_DESCS_END__ = .;
103
104#if ENABLE_PMF
105        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
106        . = ALIGN(8);
107        __PMF_SVC_DESCS_START__ = .;
108        KEEP(*(pmf_svc_descs))
109        __PMF_SVC_DESCS_END__ = .;
110#endif /* ENABLE_PMF */
111
112        /*
113         * Ensure 8-byte alignment for cpu_ops so that its fields are also
114         * aligned. Also ensure cpu_ops inclusion.
115         */
116        . = ALIGN(8);
117        __CPU_OPS_START__ = .;
118        KEEP(*(cpu_ops))
119        __CPU_OPS_END__ = .;
120
121        /*
122         * Keep the .got section in the RO section as it is patched
123         * prior to enabling the MMU and having the .got in RO is better for
124         * security. GOT is a table of addresses so ensure 8-byte alignment.
125         */
126        . = ALIGN(8);
127        __GOT_START__ = .;
128        *(.got)
129        __GOT_END__ = .;
130
131        /* Place pubsub sections for events */
132        . = ALIGN(8);
133#include <lib/el3_runtime/pubsub_events.h>
134
135        *(.vectors)
136        __RO_END_UNALIGNED__ = .;
137        /*
138         * Memory page(s) mapped to this section will be marked as read-only,
139         * executable.  No RW data from the next section must creep in.
140         * Ensure the rest of the current memory page is unused.
141         */
142        . = ALIGN(PAGE_SIZE);
143        __RO_END__ = .;
144    } >RAM
145#endif
146
147    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
148           "cpu_ops not defined for this platform.")
149
150#if SPM_MM
151#ifndef SPM_SHIM_EXCEPTIONS_VMA
152#define SPM_SHIM_EXCEPTIONS_VMA         RAM
153#endif
154
155    /*
156     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
157     * address, but we need to place them in a separate page so that we can set
158     * individual permissions to them, so the actual alignment needed is 4K.
159     *
160     * There's no need to include this into the RO section of BL31 because it
161     * doesn't need to be accessed by BL31.
162     */
163    spm_shim_exceptions : ALIGN(PAGE_SIZE) {
164        __SPM_SHIM_EXCEPTIONS_START__ = .;
165        *(.spm_shim_exceptions)
166        . = ALIGN(PAGE_SIZE);
167        __SPM_SHIM_EXCEPTIONS_END__ = .;
168    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
169
170    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
171    . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
172#endif
173
174    /*
175     * Define a linker symbol to mark start of the RW memory area for this
176     * image.
177     */
178    __RW_START__ = . ;
179
180    /*
181     * .data must be placed at a lower address than the stacks if the stack
182     * protector is enabled. Alternatively, the .data.stack_protector_canary
183     * section can be placed independently of the main .data section.
184     */
185   .data . : {
186        __DATA_START__ = .;
187        *(SORT_BY_ALIGNMENT(.data*))
188        __DATA_END__ = .;
189    } >RAM
190
191    /*
192     * .rela.dyn needs to come after .data for the read-elf utility to parse
193     * this section correctly. Ensure 8-byte alignment so that the fields of
194     * RELA data structure are aligned.
195     */
196    . = ALIGN(8);
197    __RELA_START__ = .;
198    .rela.dyn . : {
199    } >RAM
200    __RELA_END__ = .;
201
202#ifdef BL31_PROGBITS_LIMIT
203    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
204#endif
205
206#if SEPARATE_NOBITS_REGION
207    /*
208     * Define a linker symbol to mark end of the RW memory area for this
209     * image.
210     */
211    __RW_END__ = .;
212    __BL31_END__ = .;
213
214    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
215
216    . = BL31_NOBITS_BASE;
217    ASSERT(. == ALIGN(PAGE_SIZE),
218           "BL31 NOBITS base address is not aligned on a page boundary.")
219
220    __NOBITS_START__ = .;
221#endif
222
223    stacks (NOLOAD) : {
224        __STACKS_START__ = .;
225        *(tzfw_normal_stacks)
226        __STACKS_END__ = .;
227    } >NOBITS
228
229    /*
230     * The .bss section gets initialised to 0 at runtime.
231     * Its base address should be 16-byte aligned for better performance of the
232     * zero-initialization code.
233     */
234    .bss (NOLOAD) : ALIGN(16) {
235        __BSS_START__ = .;
236        *(SORT_BY_ALIGNMENT(.bss*))
237        *(COMMON)
238#if !USE_COHERENT_MEM
239        /*
240         * Bakery locks are stored in normal .bss memory
241         *
242         * Each lock's data is spread across multiple cache lines, one per CPU,
243         * but multiple locks can share the same cache line.
244         * The compiler will allocate enough memory for one CPU's bakery locks,
245         * the remaining cache lines are allocated by the linker script
246         */
247        . = ALIGN(CACHE_WRITEBACK_GRANULE);
248        __BAKERY_LOCK_START__ = .;
249        __PERCPU_BAKERY_LOCK_START__ = .;
250        *(bakery_lock)
251        . = ALIGN(CACHE_WRITEBACK_GRANULE);
252        __PERCPU_BAKERY_LOCK_END__ = .;
253        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__);
254        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
255        __BAKERY_LOCK_END__ = .;
256
257	/*
258	 * If BL31 doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
259	 * will be zero. For this reason, the only two valid values for
260	 * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
261	 * PLAT_PERCPU_BAKERY_LOCK_SIZE.
262	 */
263#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
264    ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE),
265        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
266#endif
267#endif
268
269#if ENABLE_PMF
270        /*
271         * Time-stamps are stored in normal .bss memory
272         *
273         * The compiler will allocate enough memory for one CPU's time-stamps,
274         * the remaining memory for other CPUs is allocated by the
275         * linker script
276         */
277        . = ALIGN(CACHE_WRITEBACK_GRANULE);
278        __PMF_TIMESTAMP_START__ = .;
279        KEEP(*(pmf_timestamp_array))
280        . = ALIGN(CACHE_WRITEBACK_GRANULE);
281        __PMF_PERCPU_TIMESTAMP_END__ = .;
282        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
283        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
284        __PMF_TIMESTAMP_END__ = .;
285#endif /* ENABLE_PMF */
286        __BSS_END__ = .;
287    } >NOBITS
288
289    /*
290     * The xlat_table section is for full, aligned page tables (4K).
291     * Removing them from .bss avoids forcing 4K alignment on
292     * the .bss section. The tables are initialized to zero by the translation
293     * tables library.
294     */
295    xlat_table (NOLOAD) : {
296        *(xlat_table)
297    } >NOBITS
298
299#if USE_COHERENT_MEM
300    /*
301     * The base address of the coherent memory section must be page-aligned (4K)
302     * to guarantee that the coherent data are stored on their own pages and
303     * are not mixed with normal data.  This is required to set up the correct
304     * memory attributes for the coherent data page tables.
305     */
306    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
307        __COHERENT_RAM_START__ = .;
308        /*
309         * Bakery locks are stored in coherent memory
310         *
311         * Each lock's data is contiguous and fully allocated by the compiler
312         */
313        *(bakery_lock)
314        *(tzfw_coherent_mem)
315        __COHERENT_RAM_END_UNALIGNED__ = .;
316        /*
317         * Memory page(s) mapped to this section will be marked
318         * as device memory.  No other unexpected data must creep in.
319         * Ensure the rest of the current memory page is unused.
320         */
321        . = ALIGN(PAGE_SIZE);
322        __COHERENT_RAM_END__ = .;
323    } >NOBITS
324#endif
325
326#if SEPARATE_NOBITS_REGION
327    /*
328     * Define a linker symbol to mark end of the NOBITS memory area for this
329     * image.
330     */
331    __NOBITS_END__ = .;
332
333    ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
334#else
335    /*
336     * Define a linker symbol to mark end of the RW memory area for this
337     * image.
338     */
339    __RW_END__ = .;
340    __BL31_END__ = .;
341
342    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
343#endif
344}
345