xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision c3cf06f1a3a9b9ee8ac7a0ae505f95c45f7dca84)
1/*
2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8#include <xlat_tables_defs.h>
9
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
12ENTRY(bl31_entrypoint)
13
14
15MEMORY {
16    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
17}
18
19#ifdef PLAT_EXTRA_LD_SCRIPT
20#include <plat.ld.S>
21#endif
22
23SECTIONS
24{
25    . = BL31_BASE;
26    ASSERT(. == ALIGN(PAGE_SIZE),
27           "BL31_BASE address is not aligned on a page boundary.")
28
29    __BL31_START__ = .;
30
31#if SEPARATE_CODE_AND_RODATA
32    .text . : {
33        __TEXT_START__ = .;
34        *bl31_entrypoint.o(.text*)
35        *(.text*)
36        *(.vectors)
37        . = ALIGN(PAGE_SIZE);
38        __TEXT_END__ = .;
39    } >RAM
40
41    .rodata . : {
42        __RODATA_START__ = .;
43        *(.rodata*)
44
45        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
46        . = ALIGN(8);
47        __RT_SVC_DESCS_START__ = .;
48        KEEP(*(rt_svc_descs))
49        __RT_SVC_DESCS_END__ = .;
50
51#if ENABLE_PMF
52        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
53        . = ALIGN(8);
54        __PMF_SVC_DESCS_START__ = .;
55        KEEP(*(pmf_svc_descs))
56        __PMF_SVC_DESCS_END__ = .;
57#endif /* ENABLE_PMF */
58
59        /*
60         * Ensure 8-byte alignment for cpu_ops so that its fields are also
61         * aligned. Also ensure cpu_ops inclusion.
62         */
63        . = ALIGN(8);
64        __CPU_OPS_START__ = .;
65        KEEP(*(cpu_ops))
66        __CPU_OPS_END__ = .;
67
68        /*
69         * Keep the .got section in the RO section as the it is patched
70         * prior to enabling the MMU and having the .got in RO is better for
71         * security.
72         */
73        . = ALIGN(16);
74        __GOT_START__ = .;
75        *(.got)
76        __GOT_END__ = .;
77
78        /* Place pubsub sections for events */
79        . = ALIGN(8);
80#include <pubsub_events.h>
81
82        . = ALIGN(PAGE_SIZE);
83        __RODATA_END__ = .;
84    } >RAM
85#else
86    ro . : {
87        __RO_START__ = .;
88        *bl31_entrypoint.o(.text*)
89        *(.text*)
90        *(.rodata*)
91
92        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
93        . = ALIGN(8);
94        __RT_SVC_DESCS_START__ = .;
95        KEEP(*(rt_svc_descs))
96        __RT_SVC_DESCS_END__ = .;
97
98#if ENABLE_PMF
99        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
100        . = ALIGN(8);
101        __PMF_SVC_DESCS_START__ = .;
102        KEEP(*(pmf_svc_descs))
103        __PMF_SVC_DESCS_END__ = .;
104#endif /* ENABLE_PMF */
105
106        /*
107         * Ensure 8-byte alignment for cpu_ops so that its fields are also
108         * aligned. Also ensure cpu_ops inclusion.
109         */
110        . = ALIGN(8);
111        __CPU_OPS_START__ = .;
112        KEEP(*(cpu_ops))
113        __CPU_OPS_END__ = .;
114
115        /* Place pubsub sections for events */
116        . = ALIGN(8);
117#include <pubsub_events.h>
118
119        *(.vectors)
120        __RO_END_UNALIGNED__ = .;
121        /*
122         * Memory page(s) mapped to this section will be marked as read-only,
123         * executable.  No RW data from the next section must creep in.
124         * Ensure the rest of the current memory page is unused.
125         */
126        . = ALIGN(PAGE_SIZE);
127        __RO_END__ = .;
128    } >RAM
129#endif
130
131    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
132           "cpu_ops not defined for this platform.")
133
134#if ENABLE_SPM
135    /*
136     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
137     * address, but we need to place them in a separate page so that we can set
138     * individual permissions to them, so the actual alignment needed is 4K.
139     *
140     * There's no need to include this into the RO section of BL31 because it
141     * doesn't need to be accessed by BL31.
142     */
143    spm_shim_exceptions : ALIGN(PAGE_SIZE) {
144        __SPM_SHIM_EXCEPTIONS_START__ = .;
145        *(.spm_shim_exceptions)
146        . = ALIGN(PAGE_SIZE);
147        __SPM_SHIM_EXCEPTIONS_END__ = .;
148    } >RAM
149#endif
150
151    /*
152     * Define a linker symbol to mark start of the RW memory area for this
153     * image.
154     */
155    __RW_START__ = . ;
156
157    /*
158     * .data must be placed at a lower address than the stacks if the stack
159     * protector is enabled. Alternatively, the .data.stack_protector_canary
160     * section can be placed independently of the main .data section.
161     */
162   .data . : {
163        __DATA_START__ = .;
164        *(.data*)
165        __DATA_END__ = .;
166    } >RAM
167
168    . = ALIGN(16);
169    /*
170     * .rela.dyn needs to come after .data for the read-elf utility to parse
171     * this section correctly.
172     */
173    __RELA_START__ = .;
174    .rela.dyn . : {
175    } >RAM
176    __RELA_END__ = .;
177
178#ifdef BL31_PROGBITS_LIMIT
179    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
180#endif
181
182    stacks (NOLOAD) : {
183        __STACKS_START__ = .;
184        *(tzfw_normal_stacks)
185        __STACKS_END__ = .;
186    } >RAM
187
188    /*
189     * The .bss section gets initialised to 0 at runtime.
190     * Its base address should be 16-byte aligned for better performance of the
191     * zero-initialization code.
192     */
193    .bss (NOLOAD) : ALIGN(16) {
194        __BSS_START__ = .;
195        *(.bss*)
196        *(COMMON)
197#if !USE_COHERENT_MEM
198        /*
199         * Bakery locks are stored in normal .bss memory
200         *
201         * Each lock's data is spread across multiple cache lines, one per CPU,
202         * but multiple locks can share the same cache line.
203         * The compiler will allocate enough memory for one CPU's bakery locks,
204         * the remaining cache lines are allocated by the linker script
205         */
206        . = ALIGN(CACHE_WRITEBACK_GRANULE);
207        __BAKERY_LOCK_START__ = .;
208        *(bakery_lock)
209        . = ALIGN(CACHE_WRITEBACK_GRANULE);
210        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
211        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
212        __BAKERY_LOCK_END__ = .;
213
214	/*
215	 * If BL31 doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
216	 * will be zero. For this reason, the only two valid values for
217	 * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
218	 * PLAT_PERCPU_BAKERY_LOCK_SIZE.
219	 */
220#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
221    ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE),
222        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
223#endif
224#endif
225
226#if ENABLE_PMF
227        /*
228         * Time-stamps are stored in normal .bss memory
229         *
230         * The compiler will allocate enough memory for one CPU's time-stamps,
231         * the remaining memory for other CPU's is allocated by the
232         * linker script
233         */
234        . = ALIGN(CACHE_WRITEBACK_GRANULE);
235        __PMF_TIMESTAMP_START__ = .;
236        KEEP(*(pmf_timestamp_array))
237        . = ALIGN(CACHE_WRITEBACK_GRANULE);
238        __PMF_PERCPU_TIMESTAMP_END__ = .;
239        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
240        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
241        __PMF_TIMESTAMP_END__ = .;
242#endif /* ENABLE_PMF */
243        __BSS_END__ = .;
244    } >RAM
245
246    /*
247     * The xlat_table section is for full, aligned page tables (4K).
248     * Removing them from .bss avoids forcing 4K alignment on
249     * the .bss section. The tables are initialized to zero by the translation
250     * tables library.
251     */
252    xlat_table (NOLOAD) : {
253        *(xlat_table)
254    } >RAM
255
256#if USE_COHERENT_MEM
257    /*
258     * The base address of the coherent memory section must be page-aligned (4K)
259     * to guarantee that the coherent data are stored on their own pages and
260     * are not mixed with normal data.  This is required to set up the correct
261     * memory attributes for the coherent data page tables.
262     */
263    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
264        __COHERENT_RAM_START__ = .;
265        /*
266         * Bakery locks are stored in coherent memory
267         *
268         * Each lock's data is contiguous and fully allocated by the compiler
269         */
270        *(bakery_lock)
271        *(tzfw_coherent_mem)
272        __COHERENT_RAM_END_UNALIGNED__ = .;
273        /*
274         * Memory page(s) mapped to this section will be marked
275         * as device memory.  No other unexpected data must creep in.
276         * Ensure the rest of the current memory page is unused.
277         */
278        . = ALIGN(PAGE_SIZE);
279        __COHERENT_RAM_END__ = .;
280    } >RAM
281#endif
282
283    /*
284     * Define a linker symbol to mark end of the RW memory area for this
285     * image.
286     */
287    __RW_END__ = .;
288    __BL31_END__ = .;
289
290    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
291}
292