xref: /rk3399_ARM-atf/bl32/sp_min/sp_min.ld.S (revision 43cbaf061587e7e8b3529e4b1d30de3ab1b52d3e)
1/*
2 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9OUTPUT_FORMAT(elf32-littlearm)
10OUTPUT_ARCH(arm)
11ENTRY(sp_min_vector_table)
12
13MEMORY {
14    RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
15}
16
17
18SECTIONS
19{
20    . = BL32_BASE;
21   ASSERT(. == ALIGN(4096),
22          "BL32_BASE address is not aligned on a page boundary.")
23
24#if SEPARATE_CODE_AND_RODATA
25    .text . : {
26        __TEXT_START__ = .;
27        *entrypoint.o(.text*)
28        *(.text*)
29        *(.vectors)
30        . = NEXT(4096);
31        __TEXT_END__ = .;
32    } >RAM
33
34    .rodata . : {
35        __RODATA_START__ = .;
36        *(.rodata*)
37
38        /* Ensure 4-byte alignment for descriptors and ensure inclusion */
39        . = ALIGN(4);
40        __RT_SVC_DESCS_START__ = .;
41        KEEP(*(rt_svc_descs))
42        __RT_SVC_DESCS_END__ = .;
43
44        /*
45         * Ensure 4-byte alignment for cpu_ops so that its fields are also
46         * aligned. Also ensure cpu_ops inclusion.
47         */
48        . = ALIGN(4);
49        __CPU_OPS_START__ = .;
50        KEEP(*(cpu_ops))
51        __CPU_OPS_END__ = .;
52
53        . = NEXT(4096);
54        __RODATA_END__ = .;
55    } >RAM
56#else
57    ro . : {
58        __RO_START__ = .;
59        *entrypoint.o(.text*)
60        *(.text*)
61        *(.rodata*)
62
63        /* Ensure 4-byte alignment for descriptors and ensure inclusion */
64        . = ALIGN(4);
65        __RT_SVC_DESCS_START__ = .;
66        KEEP(*(rt_svc_descs))
67        __RT_SVC_DESCS_END__ = .;
68
69        /*
70         * Ensure 4-byte alignment for cpu_ops so that its fields are also
71         * aligned. Also ensure cpu_ops inclusion.
72         */
73        . = ALIGN(4);
74        __CPU_OPS_START__ = .;
75        KEEP(*(cpu_ops))
76        __CPU_OPS_END__ = .;
77
78        *(.vectors)
79        __RO_END_UNALIGNED__ = .;
80
81        /*
82         * Memory page(s) mapped to this section will be marked as
83         * read-only, executable.  No RW data from the next section must
84         * creep in.  Ensure the rest of the current memory block is unused.
85         */
86        . = NEXT(4096);
87        __RO_END__ = .;
88    } >RAM
89#endif
90
91    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
92           "cpu_ops not defined for this platform.")
93    /*
94     * Define a linker symbol to mark start of the RW memory area for this
95     * image.
96     */
97    __RW_START__ = . ;
98
99    .data . : {
100        __DATA_START__ = .;
101        *(.data*)
102        __DATA_END__ = .;
103    } >RAM
104
105    stacks (NOLOAD) : {
106        __STACKS_START__ = .;
107        *(tzfw_normal_stacks)
108        __STACKS_END__ = .;
109    } >RAM
110
111    /*
112     * The .bss section gets initialised to 0 at runtime.
113     * Its base address should be 8-byte aligned for better performance of the
114     * zero-initialization code.
115     */
116    .bss (NOLOAD) : ALIGN(8) {
117        __BSS_START__ = .;
118        *(.bss*)
119        *(COMMON)
120#if !USE_COHERENT_MEM
121        /*
122         * Bakery locks are stored in normal .bss memory
123         *
124         * Each lock's data is spread across multiple cache lines, one per CPU,
125         * but multiple locks can share the same cache line.
126         * The compiler will allocate enough memory for one CPU's bakery locks,
127         * the remaining cache lines are allocated by the linker script
128         */
129        . = ALIGN(CACHE_WRITEBACK_GRANULE);
130        __BAKERY_LOCK_START__ = .;
131        *(bakery_lock)
132        . = ALIGN(CACHE_WRITEBACK_GRANULE);
133        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
134        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
135        __BAKERY_LOCK_END__ = .;
136#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
137    ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
138        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
139#endif
140#endif
141
142#if ENABLE_PMF
143        /*
144         * Time-stamps are stored in normal .bss memory
145         *
146         * The compiler will allocate enough memory for one CPU's time-stamps,
147         * the remaining memory for other CPU's is allocated by the
148         * linker script
149         */
150        . = ALIGN(CACHE_WRITEBACK_GRANULE);
151        __PMF_TIMESTAMP_START__ = .;
152        KEEP(*(pmf_timestamp_array))
153        . = ALIGN(CACHE_WRITEBACK_GRANULE);
154        __PMF_PERCPU_TIMESTAMP_END__ = .;
155        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
156        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
157        __PMF_TIMESTAMP_END__ = .;
158#endif /* ENABLE_PMF */
159
160        __BSS_END__ = .;
161    } >RAM
162
163    /*
164     * The xlat_table section is for full, aligned page tables (4K).
165     * Removing them from .bss avoids forcing 4K alignment on
166     * the .bss section and eliminates the unecessary zero init
167     */
168    xlat_table (NOLOAD) : {
169        *(xlat_table)
170    } >RAM
171
172     __BSS_SIZE__ = SIZEOF(.bss);
173
174#if USE_COHERENT_MEM
175    /*
176     * The base address of the coherent memory section must be page-aligned (4K)
177     * to guarantee that the coherent data are stored on their own pages and
178     * are not mixed with normal data.  This is required to set up the correct
179     * memory attributes for the coherent data page tables.
180     */
181    coherent_ram (NOLOAD) : ALIGN(4096) {
182        __COHERENT_RAM_START__ = .;
183        /*
184         * Bakery locks are stored in coherent memory
185         *
186         * Each lock's data is contiguous and fully allocated by the compiler
187         */
188        *(bakery_lock)
189        *(tzfw_coherent_mem)
190        __COHERENT_RAM_END_UNALIGNED__ = .;
191        /*
192         * Memory page(s) mapped to this section will be marked
193         * as device memory.  No other unexpected data must creep in.
194         * Ensure the rest of the current memory page is unused.
195         */
196        . = NEXT(4096);
197        __COHERENT_RAM_END__ = .;
198    } >RAM
199
200    __COHERENT_RAM_UNALIGNED_SIZE__ =
201        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
202#endif
203
204    /*
205     * Define a linker symbol to mark end of the RW memory area for this
206     * image.
207     */
208    __RW_END__ = .;
209
210   __BL32_END__ = .;
211}
212