xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision 07146afb1164e42adf068a505f1d3105c4acaff5)
1/*
2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8#include <xlat_tables_defs.h>
9
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
12ENTRY(bl31_entrypoint)
13
14
15MEMORY {
16    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
17}
18
19#ifdef PLAT_EXTRA_LD_SCRIPT
20#include <plat.ld.S>
21#endif
22
23SECTIONS
24{
25    . = BL31_BASE;
26    ASSERT(. == ALIGN(PAGE_SIZE),
27           "BL31_BASE address is not aligned on a page boundary.")
28
29    __BL31_START__ = .;
30
31#if SEPARATE_CODE_AND_RODATA
32    .text . : {
33        __TEXT_START__ = .;
34        *bl31_entrypoint.o(.text*)
35        *(.text*)
36        *(.vectors)
37        . = ALIGN(PAGE_SIZE);
38        __TEXT_END__ = .;
39    } >RAM
40
41    .rodata . : {
42        __RODATA_START__ = .;
43        *(.rodata*)
44
45        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
46        . = ALIGN(8);
47        __RT_SVC_DESCS_START__ = .;
48        KEEP(*(rt_svc_descs))
49        __RT_SVC_DESCS_END__ = .;
50
51#if ENABLE_PMF
52        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
53        . = ALIGN(8);
54        __PMF_SVC_DESCS_START__ = .;
55        KEEP(*(pmf_svc_descs))
56        __PMF_SVC_DESCS_END__ = .;
57#endif /* ENABLE_PMF */
58
59        /*
60         * Ensure 8-byte alignment for cpu_ops so that its fields are also
61         * aligned. Also ensure cpu_ops inclusion.
62         */
63        . = ALIGN(8);
64        __CPU_OPS_START__ = .;
65        KEEP(*(cpu_ops))
66        __CPU_OPS_END__ = .;
67
68        /*
69         * Keep the .got section in the RO section as it is patched
70         * prior to enabling the MMU and having the .got in RO is better for
71         * security. GOT is a table of addresses so ensure 8-byte alignment.
72         */
73        . = ALIGN(8);
74        __GOT_START__ = .;
75        *(.got)
76        __GOT_END__ = .;
77
78        /* Place pubsub sections for events */
79        . = ALIGN(8);
80#include <pubsub_events.h>
81
82        . = ALIGN(PAGE_SIZE);
83        __RODATA_END__ = .;
84    } >RAM
85#else
86    ro . : {
87        __RO_START__ = .;
88        *bl31_entrypoint.o(.text*)
89        *(.text*)
90        *(.rodata*)
91
92        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
93        . = ALIGN(8);
94        __RT_SVC_DESCS_START__ = .;
95        KEEP(*(rt_svc_descs))
96        __RT_SVC_DESCS_END__ = .;
97
98#if ENABLE_PMF
99        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
100        . = ALIGN(8);
101        __PMF_SVC_DESCS_START__ = .;
102        KEEP(*(pmf_svc_descs))
103        __PMF_SVC_DESCS_END__ = .;
104#endif /* ENABLE_PMF */
105
106        /*
107         * Ensure 8-byte alignment for cpu_ops so that its fields are also
108         * aligned. Also ensure cpu_ops inclusion.
109         */
110        . = ALIGN(8);
111        __CPU_OPS_START__ = .;
112        KEEP(*(cpu_ops))
113        __CPU_OPS_END__ = .;
114
115        /*
116         * Keep the .got section in the RO section as it is patched
117         * prior to enabling the MMU and having the .got in RO is better for
118         * security. GOT is a table of addresses so ensure 8-byte alignment.
119         */
120        . = ALIGN(8);
121        __GOT_START__ = .;
122        *(.got)
123        __GOT_END__ = .;
124
125        /* Place pubsub sections for events */
126        . = ALIGN(8);
127#include <pubsub_events.h>
128
129        *(.vectors)
130        __RO_END_UNALIGNED__ = .;
131        /*
132         * Memory page(s) mapped to this section will be marked as read-only,
133         * executable.  No RW data from the next section must creep in.
134         * Ensure the rest of the current memory page is unused.
135         */
136        . = ALIGN(PAGE_SIZE);
137        __RO_END__ = .;
138    } >RAM
139#endif
140
141    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
142           "cpu_ops not defined for this platform.")
143
144#if ENABLE_SPM
145    /*
146     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
147     * address, but we need to place them in a separate page so that we can set
148     * individual permissions to them, so the actual alignment needed is 4K.
149     *
150     * There's no need to include this into the RO section of BL31 because it
151     * doesn't need to be accessed by BL31.
152     */
153    spm_shim_exceptions : ALIGN(PAGE_SIZE) {
154        __SPM_SHIM_EXCEPTIONS_START__ = .;
155        *(.spm_shim_exceptions)
156        . = ALIGN(PAGE_SIZE);
157        __SPM_SHIM_EXCEPTIONS_END__ = .;
158    } >RAM
159#endif
160
161    /*
162     * Define a linker symbol to mark start of the RW memory area for this
163     * image.
164     */
165    __RW_START__ = . ;
166
167    /*
168     * .data must be placed at a lower address than the stacks if the stack
169     * protector is enabled. Alternatively, the .data.stack_protector_canary
170     * section can be placed independently of the main .data section.
171     */
172   .data . : {
173        __DATA_START__ = .;
174        *(.data*)
175        __DATA_END__ = .;
176    } >RAM
177
178    /*
179     * .rela.dyn needs to come after .data for the read-elf utility to parse
180     * this section correctly. Ensure 8-byte alignment so that the fields of
181     * RELA data structure are aligned.
182     */
183    . = ALIGN(8);
184    __RELA_START__ = .;
185    .rela.dyn . : {
186    } >RAM
187    __RELA_END__ = .;
188
189#ifdef BL31_PROGBITS_LIMIT
190    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
191#endif
192
193    stacks (NOLOAD) : {
194        __STACKS_START__ = .;
195        *(tzfw_normal_stacks)
196        __STACKS_END__ = .;
197    } >RAM
198
199    /*
200     * The .bss section gets initialised to 0 at runtime.
201     * Its base address should be 16-byte aligned for better performance of the
202     * zero-initialization code.
203     */
204    .bss (NOLOAD) : ALIGN(16) {
205        __BSS_START__ = .;
206        *(.bss*)
207        *(COMMON)
208#if !USE_COHERENT_MEM
209        /*
210         * Bakery locks are stored in normal .bss memory
211         *
212         * Each lock's data is spread across multiple cache lines, one per CPU,
213         * but multiple locks can share the same cache line.
214         * The compiler will allocate enough memory for one CPU's bakery locks,
215         * the remaining cache lines are allocated by the linker script
216         */
217        . = ALIGN(CACHE_WRITEBACK_GRANULE);
218        __BAKERY_LOCK_START__ = .;
219        *(bakery_lock)
220        . = ALIGN(CACHE_WRITEBACK_GRANULE);
221        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
222        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
223        __BAKERY_LOCK_END__ = .;
224
225	/*
226	 * If BL31 doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
227	 * will be zero. For this reason, the only two valid values for
228	 * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
229	 * PLAT_PERCPU_BAKERY_LOCK_SIZE.
230	 */
231#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
232    ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE),
233        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
234#endif
235#endif
236
237#if ENABLE_PMF
238        /*
239         * Time-stamps are stored in normal .bss memory
240         *
241         * The compiler will allocate enough memory for one CPU's time-stamps,
242         * the remaining memory for other CPU's is allocated by the
243         * linker script
244         */
245        . = ALIGN(CACHE_WRITEBACK_GRANULE);
246        __PMF_TIMESTAMP_START__ = .;
247        KEEP(*(pmf_timestamp_array))
248        . = ALIGN(CACHE_WRITEBACK_GRANULE);
249        __PMF_PERCPU_TIMESTAMP_END__ = .;
250        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
251        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
252        __PMF_TIMESTAMP_END__ = .;
253#endif /* ENABLE_PMF */
254        __BSS_END__ = .;
255    } >RAM
256
257    /*
258     * The xlat_table section is for full, aligned page tables (4K).
259     * Removing them from .bss avoids forcing 4K alignment on
260     * the .bss section. The tables are initialized to zero by the translation
261     * tables library.
262     */
263    xlat_table (NOLOAD) : {
264        *(xlat_table)
265    } >RAM
266
267#if USE_COHERENT_MEM
268    /*
269     * The base address of the coherent memory section must be page-aligned (4K)
270     * to guarantee that the coherent data are stored on their own pages and
271     * are not mixed with normal data.  This is required to set up the correct
272     * memory attributes for the coherent data page tables.
273     */
274    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
275        __COHERENT_RAM_START__ = .;
276        /*
277         * Bakery locks are stored in coherent memory
278         *
279         * Each lock's data is contiguous and fully allocated by the compiler
280         */
281        *(bakery_lock)
282        *(tzfw_coherent_mem)
283        __COHERENT_RAM_END_UNALIGNED__ = .;
284        /*
285         * Memory page(s) mapped to this section will be marked
286         * as device memory.  No other unexpected data must creep in.
287         * Ensure the rest of the current memory page is unused.
288         */
289        . = ALIGN(PAGE_SIZE);
290        __COHERENT_RAM_END__ = .;
291    } >RAM
292#endif
293
294    /*
295     * Define a linker symbol to mark end of the RW memory area for this
296     * image.
297     */
298    __RW_END__ = .;
299    __BL31_END__ = .;
300
301    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
302}
303