xref: /rk3399_ARM-atf/bl32/sp_min/sp_min.ld.S (revision c11ba852b970f2a125442da26d907c0842f09a25)
1*c11ba852SSoby Mathew/*
2*c11ba852SSoby Mathew * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3*c11ba852SSoby Mathew *
4*c11ba852SSoby Mathew * Redistribution and use in source and binary forms, with or without
5*c11ba852SSoby Mathew * modification, are permitted provided that the following conditions are met:
6*c11ba852SSoby Mathew *
7*c11ba852SSoby Mathew * Redistributions of source code must retain the above copyright notice, this
8*c11ba852SSoby Mathew * list of conditions and the following disclaimer.
9*c11ba852SSoby Mathew *
10*c11ba852SSoby Mathew * Redistributions in binary form must reproduce the above copyright notice,
11*c11ba852SSoby Mathew * this list of conditions and the following disclaimer in the documentation
12*c11ba852SSoby Mathew * and/or other materials provided with the distribution.
13*c11ba852SSoby Mathew *
14*c11ba852SSoby Mathew * Neither the name of ARM nor the names of its contributors may be used
15*c11ba852SSoby Mathew * to endorse or promote products derived from this software without specific
16*c11ba852SSoby Mathew * prior written permission.
17*c11ba852SSoby Mathew *
18*c11ba852SSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19*c11ba852SSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*c11ba852SSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*c11ba852SSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22*c11ba852SSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23*c11ba852SSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24*c11ba852SSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25*c11ba852SSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26*c11ba852SSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27*c11ba852SSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28*c11ba852SSoby Mathew * POSSIBILITY OF SUCH DAMAGE.
29*c11ba852SSoby Mathew */
30*c11ba852SSoby Mathew
31*c11ba852SSoby Mathew#include <platform_def.h>
32*c11ba852SSoby Mathew
33*c11ba852SSoby MathewOUTPUT_FORMAT(elf32-littlearm)
34*c11ba852SSoby MathewOUTPUT_ARCH(arm)
35*c11ba852SSoby MathewENTRY(sp_min_vector_table)
36*c11ba852SSoby Mathew
37*c11ba852SSoby MathewMEMORY {
38*c11ba852SSoby Mathew    RAM (rwx): ORIGIN = BL32_BASE, LENGTH = BL32_LIMIT - BL32_BASE
39*c11ba852SSoby Mathew}
40*c11ba852SSoby Mathew
41*c11ba852SSoby Mathew
42*c11ba852SSoby MathewSECTIONS
43*c11ba852SSoby Mathew{
44*c11ba852SSoby Mathew    . = BL32_BASE;
45*c11ba852SSoby Mathew   ASSERT(. == ALIGN(4096),
46*c11ba852SSoby Mathew          "BL32_BASE address is not aligned on a page boundary.")
47*c11ba852SSoby Mathew
48*c11ba852SSoby Mathew#if SEPARATE_CODE_AND_RODATA
49*c11ba852SSoby Mathew    .text . : {
50*c11ba852SSoby Mathew        __TEXT_START__ = .;
51*c11ba852SSoby Mathew        *entrypoint.o(.text*)
52*c11ba852SSoby Mathew        *(.text*)
53*c11ba852SSoby Mathew        . = NEXT(4096);
54*c11ba852SSoby Mathew        __TEXT_END__ = .;
55*c11ba852SSoby Mathew    } >RAM
56*c11ba852SSoby Mathew
57*c11ba852SSoby Mathew    .rodata . : {
58*c11ba852SSoby Mathew        __RODATA_START__ = .;
59*c11ba852SSoby Mathew        *(.rodata*)
60*c11ba852SSoby Mathew
61*c11ba852SSoby Mathew        /* Ensure 4-byte alignment for descriptors and ensure inclusion */
62*c11ba852SSoby Mathew        . = ALIGN(4);
63*c11ba852SSoby Mathew        __RT_SVC_DESCS_START__ = .;
64*c11ba852SSoby Mathew        KEEP(*(rt_svc_descs))
65*c11ba852SSoby Mathew        __RT_SVC_DESCS_END__ = .;
66*c11ba852SSoby Mathew
67*c11ba852SSoby Mathew        /*
68*c11ba852SSoby Mathew         * Ensure 4-byte alignment for cpu_ops so that its fields are also
69*c11ba852SSoby Mathew         * aligned. Also ensure cpu_ops inclusion.
70*c11ba852SSoby Mathew         */
71*c11ba852SSoby Mathew        . = ALIGN(4);
72*c11ba852SSoby Mathew        __CPU_OPS_START__ = .;
73*c11ba852SSoby Mathew        KEEP(*(cpu_ops))
74*c11ba852SSoby Mathew        __CPU_OPS_END__ = .;
75*c11ba852SSoby Mathew
76*c11ba852SSoby Mathew        . = NEXT(4096);
77*c11ba852SSoby Mathew        __RODATA_END__ = .;
78*c11ba852SSoby Mathew    } >RAM
79*c11ba852SSoby Mathew#else
80*c11ba852SSoby Mathew    ro . : {
81*c11ba852SSoby Mathew        __RO_START__ = .;
82*c11ba852SSoby Mathew        *entrypoint.o(.text*)
83*c11ba852SSoby Mathew        *(.text*)
84*c11ba852SSoby Mathew        *(.rodata*)
85*c11ba852SSoby Mathew
86*c11ba852SSoby Mathew        /* Ensure 4-byte alignment for descriptors and ensure inclusion */
87*c11ba852SSoby Mathew        . = ALIGN(4);
88*c11ba852SSoby Mathew        __RT_SVC_DESCS_START__ = .;
89*c11ba852SSoby Mathew        KEEP(*(rt_svc_descs))
90*c11ba852SSoby Mathew        __RT_SVC_DESCS_END__ = .;
91*c11ba852SSoby Mathew
92*c11ba852SSoby Mathew        /*
93*c11ba852SSoby Mathew         * Ensure 4-byte alignment for cpu_ops so that its fields are also
94*c11ba852SSoby Mathew         * aligned. Also ensure cpu_ops inclusion.
95*c11ba852SSoby Mathew         */
96*c11ba852SSoby Mathew        . = ALIGN(4);
97*c11ba852SSoby Mathew        __CPU_OPS_START__ = .;
98*c11ba852SSoby Mathew        KEEP(*(cpu_ops))
99*c11ba852SSoby Mathew        __CPU_OPS_END__ = .;
100*c11ba852SSoby Mathew
101*c11ba852SSoby Mathew        __RO_END_UNALIGNED__ = .;
102*c11ba852SSoby Mathew
103*c11ba852SSoby Mathew        /*
104*c11ba852SSoby Mathew         * Memory page(s) mapped to this section will be marked as
105*c11ba852SSoby Mathew         * read-only, executable.  No RW data from the next section must
106*c11ba852SSoby Mathew         * creep in.  Ensure the rest of the current memory block is unused.
107*c11ba852SSoby Mathew         */
108*c11ba852SSoby Mathew        . = NEXT(4096);
109*c11ba852SSoby Mathew        __RO_END__ = .;
110*c11ba852SSoby Mathew    } >RAM
111*c11ba852SSoby Mathew#endif
112*c11ba852SSoby Mathew
113*c11ba852SSoby Mathew    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
114*c11ba852SSoby Mathew           "cpu_ops not defined for this platform.")
115*c11ba852SSoby Mathew    /*
116*c11ba852SSoby Mathew     * Define a linker symbol to mark start of the RW memory area for this
117*c11ba852SSoby Mathew     * image.
118*c11ba852SSoby Mathew     */
119*c11ba852SSoby Mathew    __RW_START__ = . ;
120*c11ba852SSoby Mathew
121*c11ba852SSoby Mathew    .data . : {
122*c11ba852SSoby Mathew        __DATA_START__ = .;
123*c11ba852SSoby Mathew        *(.data*)
124*c11ba852SSoby Mathew        __DATA_END__ = .;
125*c11ba852SSoby Mathew    } >RAM
126*c11ba852SSoby Mathew
127*c11ba852SSoby Mathew    stacks (NOLOAD) : {
128*c11ba852SSoby Mathew        __STACKS_START__ = .;
129*c11ba852SSoby Mathew        *(tzfw_normal_stacks)
130*c11ba852SSoby Mathew        __STACKS_END__ = .;
131*c11ba852SSoby Mathew    } >RAM
132*c11ba852SSoby Mathew
133*c11ba852SSoby Mathew    /*
134*c11ba852SSoby Mathew     * The .bss section gets initialised to 0 at runtime.
135*c11ba852SSoby Mathew     * Its base address must be 16-byte aligned.
136*c11ba852SSoby Mathew     */
137*c11ba852SSoby Mathew    .bss (NOLOAD) : ALIGN(16) {
138*c11ba852SSoby Mathew        __BSS_START__ = .;
139*c11ba852SSoby Mathew        *(.bss*)
140*c11ba852SSoby Mathew        *(COMMON)
141*c11ba852SSoby Mathew#if !USE_COHERENT_MEM
142*c11ba852SSoby Mathew        /*
143*c11ba852SSoby Mathew         * Bakery locks are stored in normal .bss memory
144*c11ba852SSoby Mathew         *
145*c11ba852SSoby Mathew         * Each lock's data is spread across multiple cache lines, one per CPU,
146*c11ba852SSoby Mathew         * but multiple locks can share the same cache line.
147*c11ba852SSoby Mathew         * The compiler will allocate enough memory for one CPU's bakery locks,
148*c11ba852SSoby Mathew         * the remaining cache lines are allocated by the linker script
149*c11ba852SSoby Mathew         */
150*c11ba852SSoby Mathew        . = ALIGN(CACHE_WRITEBACK_GRANULE);
151*c11ba852SSoby Mathew        __BAKERY_LOCK_START__ = .;
152*c11ba852SSoby Mathew        *(bakery_lock)
153*c11ba852SSoby Mathew        . = ALIGN(CACHE_WRITEBACK_GRANULE);
154*c11ba852SSoby Mathew        __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
155*c11ba852SSoby Mathew        . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
156*c11ba852SSoby Mathew        __BAKERY_LOCK_END__ = .;
157*c11ba852SSoby Mathew#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
158*c11ba852SSoby Mathew    ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
159*c11ba852SSoby Mathew        "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
160*c11ba852SSoby Mathew#endif
161*c11ba852SSoby Mathew#endif
162*c11ba852SSoby Mathew
163*c11ba852SSoby Mathew#if ENABLE_PMF
164*c11ba852SSoby Mathew        /*
165*c11ba852SSoby Mathew         * Time-stamps are stored in normal .bss memory
166*c11ba852SSoby Mathew         *
167*c11ba852SSoby Mathew         * The compiler will allocate enough memory for one CPU's time-stamps,
168*c11ba852SSoby Mathew         * the remaining memory for other CPU's is allocated by the
169*c11ba852SSoby Mathew         * linker script
170*c11ba852SSoby Mathew         */
171*c11ba852SSoby Mathew        . = ALIGN(CACHE_WRITEBACK_GRANULE);
172*c11ba852SSoby Mathew        __PMF_TIMESTAMP_START__ = .;
173*c11ba852SSoby Mathew        KEEP(*(pmf_timestamp_array))
174*c11ba852SSoby Mathew        . = ALIGN(CACHE_WRITEBACK_GRANULE);
175*c11ba852SSoby Mathew        __PMF_PERCPU_TIMESTAMP_END__ = .;
176*c11ba852SSoby Mathew        __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
177*c11ba852SSoby Mathew        . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
178*c11ba852SSoby Mathew        __PMF_TIMESTAMP_END__ = .;
179*c11ba852SSoby Mathew#endif /* ENABLE_PMF */
180*c11ba852SSoby Mathew
181*c11ba852SSoby Mathew        __BSS_END__ = .;
182*c11ba852SSoby Mathew    } >RAM
183*c11ba852SSoby Mathew
184*c11ba852SSoby Mathew    /*
185*c11ba852SSoby Mathew     * The xlat_table section is for full, aligned page tables (4K).
186*c11ba852SSoby Mathew     * Removing them from .bss avoids forcing 4K alignment on
187*c11ba852SSoby Mathew     * the .bss section and eliminates the unecessary zero init
188*c11ba852SSoby Mathew     */
189*c11ba852SSoby Mathew    xlat_table (NOLOAD) : {
190*c11ba852SSoby Mathew        *(xlat_table)
191*c11ba852SSoby Mathew    } >RAM
192*c11ba852SSoby Mathew
193*c11ba852SSoby Mathew     __BSS_SIZE__ = SIZEOF(.bss);
194*c11ba852SSoby Mathew
195*c11ba852SSoby Mathew#if USE_COHERENT_MEM
196*c11ba852SSoby Mathew    /*
197*c11ba852SSoby Mathew     * The base address of the coherent memory section must be page-aligned (4K)
198*c11ba852SSoby Mathew     * to guarantee that the coherent data are stored on their own pages and
199*c11ba852SSoby Mathew     * are not mixed with normal data.  This is required to set up the correct
200*c11ba852SSoby Mathew     * memory attributes for the coherent data page tables.
201*c11ba852SSoby Mathew     */
202*c11ba852SSoby Mathew    coherent_ram (NOLOAD) : ALIGN(4096) {
203*c11ba852SSoby Mathew        __COHERENT_RAM_START__ = .;
204*c11ba852SSoby Mathew        /*
205*c11ba852SSoby Mathew         * Bakery locks are stored in coherent memory
206*c11ba852SSoby Mathew         *
207*c11ba852SSoby Mathew         * Each lock's data is contiguous and fully allocated by the compiler
208*c11ba852SSoby Mathew         */
209*c11ba852SSoby Mathew        *(bakery_lock)
210*c11ba852SSoby Mathew        *(tzfw_coherent_mem)
211*c11ba852SSoby Mathew        __COHERENT_RAM_END_UNALIGNED__ = .;
212*c11ba852SSoby Mathew        /*
213*c11ba852SSoby Mathew         * Memory page(s) mapped to this section will be marked
214*c11ba852SSoby Mathew         * as device memory.  No other unexpected data must creep in.
215*c11ba852SSoby Mathew         * Ensure the rest of the current memory page is unused.
216*c11ba852SSoby Mathew         */
217*c11ba852SSoby Mathew        . = NEXT(4096);
218*c11ba852SSoby Mathew        __COHERENT_RAM_END__ = .;
219*c11ba852SSoby Mathew    } >RAM
220*c11ba852SSoby Mathew
221*c11ba852SSoby Mathew    __COHERENT_RAM_UNALIGNED_SIZE__ =
222*c11ba852SSoby Mathew        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
223*c11ba852SSoby Mathew#endif
224*c11ba852SSoby Mathew
225*c11ba852SSoby Mathew    /*
226*c11ba852SSoby Mathew     * Define a linker symbol to mark end of the RW memory area for this
227*c11ba852SSoby Mathew     * image.
228*c11ba852SSoby Mathew     */
229*c11ba852SSoby Mathew    __RW_END__ = .;
230*c11ba852SSoby Mathew
231*c11ba852SSoby Mathew   __BL32_END__ = .;
232*c11ba852SSoby Mathew}
233