xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision 9fb288a03ed2ced7706defbbf78f008e921e17e2)
1/*
2 * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <common/bl_common.ld.h>
8#include <lib/xlat_tables/xlat_tables_defs.h>
9
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
12ENTRY(bl31_entrypoint)
13
14
15MEMORY {
16    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
17#if SEPARATE_NOBITS_REGION
18    NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
19#else
20#define NOBITS RAM
21#endif
22}
23
24#ifdef PLAT_EXTRA_LD_SCRIPT
25#include <plat.ld.S>
26#endif
27
28SECTIONS
29{
30    . = BL31_BASE;
31    ASSERT(. == ALIGN(PAGE_SIZE),
32           "BL31_BASE address is not aligned on a page boundary.")
33
34    __BL31_START__ = .;
35
36#if SEPARATE_CODE_AND_RODATA
37    .text . : {
38        __TEXT_START__ = .;
39        *bl31_entrypoint.o(.text*)
40        *(SORT_BY_ALIGNMENT(.text*))
41        *(.vectors)
42        . = ALIGN(PAGE_SIZE);
43        __TEXT_END__ = .;
44    } >RAM
45
46    .rodata . : {
47        __RODATA_START__ = .;
48        *(SORT_BY_ALIGNMENT(.rodata*))
49
50	RT_SVC_DESCS
51	FCONF_POPULATOR
52	PMF_SVC_DESCS
53	CPU_OPS
54	GOT
55
56        /* Place pubsub sections for events */
57        . = ALIGN(8);
58#include <lib/el3_runtime/pubsub_events.h>
59
60        . = ALIGN(PAGE_SIZE);
61        __RODATA_END__ = .;
62    } >RAM
63#else
64    ro . : {
65        __RO_START__ = .;
66        *bl31_entrypoint.o(.text*)
67        *(SORT_BY_ALIGNMENT(.text*))
68        *(SORT_BY_ALIGNMENT(.rodata*))
69
70	RT_SVC_DESCS
71	FCONF_POPULATOR
72	PMF_SVC_DESCS
73	CPU_OPS
74	GOT
75
76        /* Place pubsub sections for events */
77        . = ALIGN(8);
78#include <lib/el3_runtime/pubsub_events.h>
79
80        *(.vectors)
81        __RO_END_UNALIGNED__ = .;
82        /*
83         * Memory page(s) mapped to this section will be marked as read-only,
84         * executable.  No RW data from the next section must creep in.
85         * Ensure the rest of the current memory page is unused.
86         */
87        . = ALIGN(PAGE_SIZE);
88        __RO_END__ = .;
89    } >RAM
90#endif
91
92    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
93           "cpu_ops not defined for this platform.")
94
95#if SPM_MM
96#ifndef SPM_SHIM_EXCEPTIONS_VMA
97#define SPM_SHIM_EXCEPTIONS_VMA         RAM
98#endif
99
100    /*
101     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
102     * address, but we need to place them in a separate page so that we can set
103     * individual permissions to them, so the actual alignment needed is 4K.
104     *
105     * There's no need to include this into the RO section of BL31 because it
106     * doesn't need to be accessed by BL31.
107     */
108    spm_shim_exceptions : ALIGN(PAGE_SIZE) {
109        __SPM_SHIM_EXCEPTIONS_START__ = .;
110        *(.spm_shim_exceptions)
111        . = ALIGN(PAGE_SIZE);
112        __SPM_SHIM_EXCEPTIONS_END__ = .;
113    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
114
115    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
116    . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
117#endif
118
119    /*
120     * Define a linker symbol to mark start of the RW memory area for this
121     * image.
122     */
123    __RW_START__ = . ;
124
125    /*
126     * .data must be placed at a lower address than the stacks if the stack
127     * protector is enabled. Alternatively, the .data.stack_protector_canary
128     * section can be placed independently of the main .data section.
129     */
130   .data . : {
131        __DATA_START__ = .;
132        *(SORT_BY_ALIGNMENT(.data*))
133        __DATA_END__ = .;
134    } >RAM
135
136    /*
137     * .rela.dyn needs to come after .data for the read-elf utility to parse
138     * this section correctly. Ensure 8-byte alignment so that the fields of
139     * RELA data structure are aligned.
140     */
141    . = ALIGN(8);
142    __RELA_START__ = .;
143    .rela.dyn . : {
144    } >RAM
145    __RELA_END__ = .;
146
147#ifdef BL31_PROGBITS_LIMIT
148    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
149#endif
150
151#if SEPARATE_NOBITS_REGION
152    /*
153     * Define a linker symbol to mark end of the RW memory area for this
154     * image.
155     */
156    . = ALIGN(PAGE_SIZE);
157    __RW_END__ = .;
158    __BL31_END__ = .;
159
160    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
161
162    . = BL31_NOBITS_BASE;
163    ASSERT(. == ALIGN(PAGE_SIZE),
164           "BL31 NOBITS base address is not aligned on a page boundary.")
165
166    __NOBITS_START__ = .;
167#endif
168
169    stacks (NOLOAD) : {
170        __STACKS_START__ = .;
171        *(tzfw_normal_stacks)
172        __STACKS_END__ = .;
173    } >NOBITS
174
175    /*
176     * The .bss section gets initialised to 0 at runtime.
177     * Its base address should be 16-byte aligned for better performance of the
178     * zero-initialization code.
179     */
180    .bss (NOLOAD) : ALIGN(16) {
181        __BSS_START__ = .;
182        *(SORT_BY_ALIGNMENT(.bss*))
183        *(COMMON)
184	BAKERY_LOCK_NORMAL
185	PMF_TIMESTAMP
186        __BSS_END__ = .;
187    } >NOBITS
188
189    XLAT_TABLE_SECTION >NOBITS
190
191#if USE_COHERENT_MEM
192    /*
193     * The base address of the coherent memory section must be page-aligned (4K)
194     * to guarantee that the coherent data are stored on their own pages and
195     * are not mixed with normal data.  This is required to set up the correct
196     * memory attributes for the coherent data page tables.
197     */
198    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
199        __COHERENT_RAM_START__ = .;
200        /*
201         * Bakery locks are stored in coherent memory
202         *
203         * Each lock's data is contiguous and fully allocated by the compiler
204         */
205        *(bakery_lock)
206        *(tzfw_coherent_mem)
207        __COHERENT_RAM_END_UNALIGNED__ = .;
208        /*
209         * Memory page(s) mapped to this section will be marked
210         * as device memory.  No other unexpected data must creep in.
211         * Ensure the rest of the current memory page is unused.
212         */
213        . = ALIGN(PAGE_SIZE);
214        __COHERENT_RAM_END__ = .;
215    } >NOBITS
216#endif
217
218#if SEPARATE_NOBITS_REGION
219    /*
220     * Define a linker symbol to mark end of the NOBITS memory area for this
221     * image.
222     */
223    __NOBITS_END__ = .;
224
225    ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
226#else
227    /*
228     * Define a linker symbol to mark end of the RW memory area for this
229     * image.
230     */
231    __RW_END__ = .;
232    __BL31_END__ = .;
233
234    /DISCARD/ : {
235        *(.dynsym .dynstr .hash .gnu.hash)
236    }
237
238    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
239#endif
240}
241