xref: /rk3399_ARM-atf/bl31/bl31.ld.S (revision f43e09a12e4f4f32185d3e2accceb65895d1f16b)
1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <common/bl_common.ld.h>
8#include <lib/xlat_tables/xlat_tables_defs.h>
9
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
12ENTRY(bl31_entrypoint)
13
14MEMORY {
15    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
16
17#if SEPARATE_NOBITS_REGION
18    NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
19#else /* SEPARATE_NOBITS_REGION */
20#   define NOBITS RAM
21#endif /* SEPARATE_NOBITS_REGION */
22}
23
24#ifdef PLAT_EXTRA_LD_SCRIPT
25#   include <plat.ld.S>
26#endif /* PLAT_EXTRA_LD_SCRIPT */
27
28SECTIONS {
29    RAM_REGION_START = ORIGIN(RAM);
30    RAM_REGION_LENGTH = LENGTH(RAM);
31    . = BL31_BASE;
32
33    ASSERT(. == ALIGN(PAGE_SIZE),
34        "BL31_BASE address is not aligned on a page boundary.")
35
36    __BL31_START__ = .;
37
38#if SEPARATE_CODE_AND_RODATA
39    .text . : {
40        __TEXT_START__ = .;
41
42        *bl31_entrypoint.o(.text*)
43        *(SORT_BY_ALIGNMENT(SORT(.text*)))
44        *(.vectors)
45
46        . = ALIGN(PAGE_SIZE);
47
48        __TEXT_END__ = .;
49    } >RAM
50
51    .rodata . : {
52        __RODATA_START__ = .;
53
54        *(SORT_BY_ALIGNMENT(.rodata*))
55
56#   if PLAT_EXTRA_RODATA_INCLUDES
57#       include <plat.ld.rodata.inc>
58#   endif /* PLAT_EXTRA_RODATA_INCLUDES */
59
60        RODATA_COMMON
61
62        . = ALIGN(8);
63
64#   include <lib/el3_runtime/pubsub_events.h>
65
66        . = ALIGN(PAGE_SIZE);
67
68        __RODATA_END__ = .;
69    } >RAM
70#else /* SEPARATE_CODE_AND_RODATA */
71    .ro . : {
72        __RO_START__ = .;
73
74        *bl31_entrypoint.o(.text*)
75        *(SORT_BY_ALIGNMENT(.text*))
76        *(SORT_BY_ALIGNMENT(.rodata*))
77
78        RODATA_COMMON
79
80        . = ALIGN(8);
81
82#   include <lib/el3_runtime/pubsub_events.h>
83
84        *(.vectors)
85
86        __RO_END_UNALIGNED__ = .;
87
88        /*
89         * Memory page(s) mapped to this section will be marked as read-only,
90         * executable. No RW data from the next section must creep in. Ensure
91         * that the rest of the current memory page is unused.
92         */
93        . = ALIGN(PAGE_SIZE);
94
95        __RO_END__ = .;
96    } >RAM
97#endif /* SEPARATE_CODE_AND_RODATA */
98
99    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
100        "cpu_ops not defined for this platform.")
101
102#if SPM_MM
103#   ifndef SPM_SHIM_EXCEPTIONS_VMA
104#       define SPM_SHIM_EXCEPTIONS_VMA RAM
105#   endif /* SPM_SHIM_EXCEPTIONS_VMA */
106
107    /*
108     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
109     * address but we need to place them in a separate page so that we can set
110     * individual permissions on them, so the actual alignment needed is the
111     * page size.
112     *
113     * There's no need to include this into the RO section of BL31 because it
114     * doesn't need to be accessed by BL31.
115     */
116    .spm_shim_exceptions : ALIGN(PAGE_SIZE) {
117        __SPM_SHIM_EXCEPTIONS_START__ = .;
118
119        *(.spm_shim_exceptions)
120
121        . = ALIGN(PAGE_SIZE);
122
123        __SPM_SHIM_EXCEPTIONS_END__ = .;
124    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
125
126    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(.spm_shim_exceptions));
127
128    . = LOADADDR(.spm_shim_exceptions) + SIZEOF(.spm_shim_exceptions);
129#endif /* SPM_MM */
130
131    __RW_START__ = .;
132
133    DATA_SECTION >RAM
134    RELA_SECTION >RAM
135
136#ifdef BL31_PROGBITS_LIMIT
137    ASSERT(
138        . <= BL31_PROGBITS_LIMIT,
139        "BL31 progbits has exceeded its limit. Consider disabling some features."
140    )
141#endif /* BL31_PROGBITS_LIMIT */
142
143#if SEPARATE_NOBITS_REGION
144    . = ALIGN(PAGE_SIZE);
145
146    __RW_END__ = .;
147    __BL31_END__ = .;
148
149    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
150
151    . = BL31_NOBITS_BASE;
152
153    ASSERT(. == ALIGN(PAGE_SIZE),
154        "BL31 NOBITS base address is not aligned on a page boundary.")
155
156    __NOBITS_START__ = .;
157#endif /* SEPARATE_NOBITS_REGION */
158
159    STACK_SECTION >NOBITS
160    BSS_SECTION >NOBITS
161    XLAT_TABLE_SECTION >NOBITS
162
163#if USE_COHERENT_MEM
164    /*
165     * The base address of the coherent memory section must be page-aligned to
166     * guarantee that the coherent data are stored on their own pages and are
167     * not mixed with normal data.  This is required to set up the correct
168     * memory attributes for the coherent data page tables.
169     */
170    .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
171        __COHERENT_RAM_START__ = .;
172
173        /*
174         * Bakery locks are stored in coherent memory. Each lock's data is
175         * contiguous and fully allocated by the compiler.
176         */
177        *(.bakery_lock)
178        *(.tzfw_coherent_mem)
179
180        __COHERENT_RAM_END_UNALIGNED__ = .;
181
182        /*
183         * Memory page(s) mapped to this section will be marked as device
184         * memory. No other unexpected data must creep in. Ensure the rest of
185         * the current memory page is unused.
186         */
187        . = ALIGN(PAGE_SIZE);
188
189        __COHERENT_RAM_END__ = .;
190    } >NOBITS
191#endif /* USE_COHERENT_MEM */
192
193#if SEPARATE_NOBITS_REGION
194    __NOBITS_END__ = .;
195
196    ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
197#else /* SEPARATE_NOBITS_REGION */
198    __RW_END__ = .;
199    __BL31_END__ = .;
200
201    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
202#endif /* SEPARATE_NOBITS_REGION */
203    RAM_REGION_END = .;
204
205    /DISCARD/ : {
206        *(.dynsym .dynstr .hash .gnu.hash)
207    }
208}
209