xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision da04341ed52d214139fe2d16667ef5b58c38e502)
1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <common/bl_common.ld.h>
8#include <lib/xlat_tables/xlat_tables_defs.h>
9
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
12ENTRY(bl31_entrypoint)
13
14MEMORY {
15    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
16
17#if SEPARATE_NOBITS_REGION
18    NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
19#else /* SEPARATE_NOBITS_REGION */
20#   define NOBITS RAM
21#endif /* SEPARATE_NOBITS_REGION */
22}
23
24#ifdef PLAT_EXTRA_LD_SCRIPT
25#   include <plat.ld.S>
26#endif /* PLAT_EXTRA_LD_SCRIPT */
27
28SECTIONS {
29    . = BL31_BASE;
30
31    ASSERT(. == ALIGN(PAGE_SIZE),
32        "BL31_BASE address is not aligned on a page boundary.")
33
34    __BL31_START__ = .;
35
36#if SEPARATE_CODE_AND_RODATA
37    .text . : {
38        __TEXT_START__ = .;
39
40        *bl31_entrypoint.o(.text*)
41        *(SORT_BY_ALIGNMENT(SORT(.text*)))
42        *(.vectors)
43
44        . = ALIGN(PAGE_SIZE);
45
46        __TEXT_END__ = .;
47    } >RAM
48
49    .rodata . : {
50        __RODATA_START__ = .;
51
52        *(SORT_BY_ALIGNMENT(.rodata*))
53
54#   if PLAT_EXTRA_RODATA_INCLUDES
55#       include <plat.ld.rodata.inc>
56#   endif /* PLAT_EXTRA_RODATA_INCLUDES */
57
58        RODATA_COMMON
59
60        . = ALIGN(8);
61
62#   include <lib/el3_runtime/pubsub_events.h>
63
64        . = ALIGN(PAGE_SIZE);
65
66        __RODATA_END__ = .;
67    } >RAM
68#else /* SEPARATE_CODE_AND_RODATA */
69    .ro . : {
70        __RO_START__ = .;
71
72        *bl31_entrypoint.o(.text*)
73        *(SORT_BY_ALIGNMENT(.text*))
74        *(SORT_BY_ALIGNMENT(.rodata*))
75
76        RODATA_COMMON
77
78        . = ALIGN(8);
79
80#   include <lib/el3_runtime/pubsub_events.h>
81
82        *(.vectors)
83
84        __RO_END_UNALIGNED__ = .;
85
86        /*
87         * Memory page(s) mapped to this section will be marked as read-only,
88         * executable. No RW data from the next section must creep in. Ensure
89         * that the rest of the current memory page is unused.
90         */
91        . = ALIGN(PAGE_SIZE);
92
93        __RO_END__ = .;
94    } >RAM
95#endif /* SEPARATE_CODE_AND_RODATA */
96
97    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
98        "cpu_ops not defined for this platform.")
99
100#if SPM_MM
101#   ifndef SPM_SHIM_EXCEPTIONS_VMA
102#       define SPM_SHIM_EXCEPTIONS_VMA RAM
103#   endif /* SPM_SHIM_EXCEPTIONS_VMA */
104
105    /*
106     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
107     * address but we need to place them in a separate page so that we can set
108     * individual permissions on them, so the actual alignment needed is the
109     * page size.
110     *
111     * There's no need to include this into the RO section of BL31 because it
112     * doesn't need to be accessed by BL31.
113     */
114    .spm_shim_exceptions : ALIGN(PAGE_SIZE) {
115        __SPM_SHIM_EXCEPTIONS_START__ = .;
116
117        *(.spm_shim_exceptions)
118
119        . = ALIGN(PAGE_SIZE);
120
121        __SPM_SHIM_EXCEPTIONS_END__ = .;
122    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
123
124    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(.spm_shim_exceptions));
125
126    . = LOADADDR(.spm_shim_exceptions) + SIZEOF(.spm_shim_exceptions);
127#endif /* SPM_MM */
128
129    __RW_START__ = .;
130
131    DATA_SECTION >RAM
132    RELA_SECTION >RAM
133
134#ifdef BL31_PROGBITS_LIMIT
135    ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
136#endif /* BL31_PROGBITS_LIMIT */
137
138#if SEPARATE_NOBITS_REGION
139    . = ALIGN(PAGE_SIZE);
140
141    __RW_END__ = .;
142    __BL31_END__ = .;
143
144    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
145
146    . = BL31_NOBITS_BASE;
147
148    ASSERT(. == ALIGN(PAGE_SIZE),
149        "BL31 NOBITS base address is not aligned on a page boundary.")
150
151    __NOBITS_START__ = .;
152#endif /* SEPARATE_NOBITS_REGION */
153
154    STACK_SECTION >NOBITS
155    BSS_SECTION >NOBITS
156    XLAT_TABLE_SECTION >NOBITS
157
158#if USE_COHERENT_MEM
159    /*
160     * The base address of the coherent memory section must be page-aligned to
161     * guarantee that the coherent data are stored on their own pages and are
162     * not mixed with normal data.  This is required to set up the correct
163     * memory attributes for the coherent data page tables.
164     */
165    .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
166        __COHERENT_RAM_START__ = .;
167
168        /*
169         * Bakery locks are stored in coherent memory. Each lock's data is
170         * contiguous and fully allocated by the compiler.
171         */
172        *(.bakery_lock)
173        *(.tzfw_coherent_mem)
174
175        __COHERENT_RAM_END_UNALIGNED__ = .;
176
177        /*
178         * Memory page(s) mapped to this section will be marked as device
179         * memory. No other unexpected data must creep in. Ensure the rest of
180         * the current memory page is unused.
181         */
182        . = ALIGN(PAGE_SIZE);
183
184        __COHERENT_RAM_END__ = .;
185    } >NOBITS
186#endif /* USE_COHERENT_MEM */
187
188#if SEPARATE_NOBITS_REGION
189    __NOBITS_END__ = .;
190
191    ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
192#else /* SEPARATE_NOBITS_REGION */
193    __RW_END__ = .;
194    __BL31_END__ = .;
195
196    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
197#endif /* SEPARATE_NOBITS_REGION */
198
199    /DISCARD/ : {
200        *(.dynsym .dynstr .hash .gnu.hash)
201    }
202}
203