xref: /rk3399_ARM-atf/bl2/bl2_el3.ld.S (revision b1d27b484f4172542eca074fdac42ffd13736a0f)
1*b1d27b48SRoberto Vargas/*
2*b1d27b48SRoberto Vargas * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3*b1d27b48SRoberto Vargas *
4*b1d27b48SRoberto Vargas * SPDX-License-Identifier: BSD-3-Clause
5*b1d27b48SRoberto Vargas */
6*b1d27b48SRoberto Vargas
7*b1d27b48SRoberto Vargas#include <platform_def.h>
8*b1d27b48SRoberto Vargas#include <xlat_tables_defs.h>
9*b1d27b48SRoberto Vargas
10*b1d27b48SRoberto VargasOUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11*b1d27b48SRoberto VargasOUTPUT_ARCH(PLATFORM_LINKER_ARCH)
12*b1d27b48SRoberto VargasENTRY(bl2_entrypoint)
13*b1d27b48SRoberto Vargas
14*b1d27b48SRoberto VargasMEMORY {
15*b1d27b48SRoberto Vargas    RAM (rwx): ORIGIN = BL2_BASE, LENGTH = BL2_LIMIT - BL2_BASE
16*b1d27b48SRoberto Vargas}
17*b1d27b48SRoberto Vargas
18*b1d27b48SRoberto Vargas
19*b1d27b48SRoberto VargasSECTIONS
20*b1d27b48SRoberto Vargas{
21*b1d27b48SRoberto Vargas    . = BL2_BASE;
22*b1d27b48SRoberto Vargas    ASSERT(. == ALIGN(PAGE_SIZE),
23*b1d27b48SRoberto Vargas           "BL2_BASE address is not aligned on a page boundary.")
24*b1d27b48SRoberto Vargas
25*b1d27b48SRoberto Vargas#if SEPARATE_CODE_AND_RODATA
26*b1d27b48SRoberto Vargas    .text . : {
27*b1d27b48SRoberto Vargas        __TEXT_START__ = .;
28*b1d27b48SRoberto Vargas        *bl2_el3_entrypoint.o(.text*)
29*b1d27b48SRoberto Vargas        *(.text*)
30*b1d27b48SRoberto Vargas        *(.vectors)
31*b1d27b48SRoberto Vargas        . = NEXT(PAGE_SIZE);
32*b1d27b48SRoberto Vargas        __TEXT_END__ = .;
33*b1d27b48SRoberto Vargas     } >RAM
34*b1d27b48SRoberto Vargas
35*b1d27b48SRoberto Vargas    .rodata . : {
36*b1d27b48SRoberto Vargas        __RODATA_START__ = .;
37*b1d27b48SRoberto Vargas        *(.rodata*)
38*b1d27b48SRoberto Vargas
39*b1d27b48SRoberto Vargas        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
40*b1d27b48SRoberto Vargas        . = ALIGN(8);
41*b1d27b48SRoberto Vargas        __PARSER_LIB_DESCS_START__ = .;
42*b1d27b48SRoberto Vargas        KEEP(*(.img_parser_lib_descs))
43*b1d27b48SRoberto Vargas        __PARSER_LIB_DESCS_END__ = .;
44*b1d27b48SRoberto Vargas
45*b1d27b48SRoberto Vargas        /*
46*b1d27b48SRoberto Vargas         * Ensure 8-byte alignment for cpu_ops so that its fields are also
47*b1d27b48SRoberto Vargas         * aligned. Also ensure cpu_ops inclusion.
48*b1d27b48SRoberto Vargas         */
49*b1d27b48SRoberto Vargas        . = ALIGN(8);
50*b1d27b48SRoberto Vargas        __CPU_OPS_START__ = .;
51*b1d27b48SRoberto Vargas        KEEP(*(cpu_ops))
52*b1d27b48SRoberto Vargas        __CPU_OPS_END__ = .;
53*b1d27b48SRoberto Vargas
54*b1d27b48SRoberto Vargas        . = NEXT(PAGE_SIZE);
55*b1d27b48SRoberto Vargas        __RODATA_END__ = .;
56*b1d27b48SRoberto Vargas    } >RAM
57*b1d27b48SRoberto Vargas#else
58*b1d27b48SRoberto Vargas    ro . : {
59*b1d27b48SRoberto Vargas        __RO_START__ = .;
60*b1d27b48SRoberto Vargas        *bl2_el3_entrypoint.o(.text*)
61*b1d27b48SRoberto Vargas        *(.text*)
62*b1d27b48SRoberto Vargas        *(.rodata*)
63*b1d27b48SRoberto Vargas
64*b1d27b48SRoberto Vargas        /*
65*b1d27b48SRoberto Vargas         * Ensure 8-byte alignment for cpu_ops so that its fields are also
66*b1d27b48SRoberto Vargas         * aligned. Also ensure cpu_ops inclusion.
67*b1d27b48SRoberto Vargas         */
68*b1d27b48SRoberto Vargas        . = ALIGN(8);
69*b1d27b48SRoberto Vargas        __CPU_OPS_START__ = .;
70*b1d27b48SRoberto Vargas        KEEP(*(cpu_ops))
71*b1d27b48SRoberto Vargas        __CPU_OPS_END__ = .;
72*b1d27b48SRoberto Vargas
73*b1d27b48SRoberto Vargas        /* Ensure 8-byte alignment for descriptors and ensure inclusion */
74*b1d27b48SRoberto Vargas        . = ALIGN(8);
75*b1d27b48SRoberto Vargas        __PARSER_LIB_DESCS_START__ = .;
76*b1d27b48SRoberto Vargas        KEEP(*(.img_parser_lib_descs))
77*b1d27b48SRoberto Vargas        __PARSER_LIB_DESCS_END__ = .;
78*b1d27b48SRoberto Vargas
79*b1d27b48SRoberto Vargas        *(.vectors)
80*b1d27b48SRoberto Vargas        __RO_END_UNALIGNED__ = .;
81*b1d27b48SRoberto Vargas        /*
82*b1d27b48SRoberto Vargas         * Memory page(s) mapped to this section will be marked as
83*b1d27b48SRoberto Vargas         * read-only, executable.  No RW data from the next section must
84*b1d27b48SRoberto Vargas         * creep in.  Ensure the rest of the current memory page is unused.
85*b1d27b48SRoberto Vargas         */
86*b1d27b48SRoberto Vargas        . = NEXT(PAGE_SIZE);
87*b1d27b48SRoberto Vargas
88*b1d27b48SRoberto Vargas        __RO_END__ = .;
89*b1d27b48SRoberto Vargas    } >RAM
90*b1d27b48SRoberto Vargas#endif
91*b1d27b48SRoberto Vargas
92*b1d27b48SRoberto Vargas    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
93*b1d27b48SRoberto Vargas          "cpu_ops not defined for this platform.")
94*b1d27b48SRoberto Vargas
95*b1d27b48SRoberto Vargas    /*
96*b1d27b48SRoberto Vargas     * Define a linker symbol to mark start of the RW memory area for this
97*b1d27b48SRoberto Vargas     * image.
98*b1d27b48SRoberto Vargas     */
99*b1d27b48SRoberto Vargas    __RW_START__ = . ;
100*b1d27b48SRoberto Vargas
101*b1d27b48SRoberto Vargas    /*
102*b1d27b48SRoberto Vargas     * .data must be placed at a lower address than the stacks if the stack
103*b1d27b48SRoberto Vargas     * protector is enabled. Alternatively, the .data.stack_protector_canary
104*b1d27b48SRoberto Vargas     * section can be placed independently of the main .data section.
105*b1d27b48SRoberto Vargas     */
106*b1d27b48SRoberto Vargas    .data . : {
107*b1d27b48SRoberto Vargas        __DATA_START__ = .;
108*b1d27b48SRoberto Vargas        *(.data*)
109*b1d27b48SRoberto Vargas        __DATA_END__ = .;
110*b1d27b48SRoberto Vargas    } >RAM
111*b1d27b48SRoberto Vargas
112*b1d27b48SRoberto Vargas    stacks (NOLOAD) : {
113*b1d27b48SRoberto Vargas        __STACKS_START__ = .;
114*b1d27b48SRoberto Vargas        *(tzfw_normal_stacks)
115*b1d27b48SRoberto Vargas        __STACKS_END__ = .;
116*b1d27b48SRoberto Vargas    } >RAM
117*b1d27b48SRoberto Vargas
118*b1d27b48SRoberto Vargas    /*
119*b1d27b48SRoberto Vargas     * The .bss section gets initialised to 0 at runtime.
120*b1d27b48SRoberto Vargas     * Its base address should be 16-byte aligned for better performance of the
121*b1d27b48SRoberto Vargas     * zero-initialization code.
122*b1d27b48SRoberto Vargas     */
123*b1d27b48SRoberto Vargas    .bss : ALIGN(16) {
124*b1d27b48SRoberto Vargas        __BSS_START__ = .;
125*b1d27b48SRoberto Vargas        *(SORT_BY_ALIGNMENT(.bss*))
126*b1d27b48SRoberto Vargas        *(COMMON)
127*b1d27b48SRoberto Vargas        __BSS_END__ = .;
128*b1d27b48SRoberto Vargas    } >RAM
129*b1d27b48SRoberto Vargas
130*b1d27b48SRoberto Vargas    /*
131*b1d27b48SRoberto Vargas     * The xlat_table section is for full, aligned page tables (4K).
132*b1d27b48SRoberto Vargas     * Removing them from .bss avoids forcing 4K alignment on
133*b1d27b48SRoberto Vargas     * the .bss section and eliminates the unnecessary zero init
134*b1d27b48SRoberto Vargas     */
135*b1d27b48SRoberto Vargas    xlat_table (NOLOAD) : {
136*b1d27b48SRoberto Vargas        *(xlat_table)
137*b1d27b48SRoberto Vargas    } >RAM
138*b1d27b48SRoberto Vargas
139*b1d27b48SRoberto Vargas#if USE_COHERENT_MEM
140*b1d27b48SRoberto Vargas    /*
141*b1d27b48SRoberto Vargas     * The base address of the coherent memory section must be page-aligned (4K)
142*b1d27b48SRoberto Vargas     * to guarantee that the coherent data are stored on their own pages and
143*b1d27b48SRoberto Vargas     * are not mixed with normal data.  This is required to set up the correct
144*b1d27b48SRoberto Vargas     * memory attributes for the coherent data page tables.
145*b1d27b48SRoberto Vargas     */
146*b1d27b48SRoberto Vargas    coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
147*b1d27b48SRoberto Vargas        __COHERENT_RAM_START__ = .;
148*b1d27b48SRoberto Vargas        *(tzfw_coherent_mem)
149*b1d27b48SRoberto Vargas        __COHERENT_RAM_END_UNALIGNED__ = .;
150*b1d27b48SRoberto Vargas        /*
151*b1d27b48SRoberto Vargas         * Memory page(s) mapped to this section will be marked
152*b1d27b48SRoberto Vargas         * as device memory.  No other unexpected data must creep in.
153*b1d27b48SRoberto Vargas         * Ensure the rest of the current memory page is unused.
154*b1d27b48SRoberto Vargas         */
155*b1d27b48SRoberto Vargas        . = NEXT(PAGE_SIZE);
156*b1d27b48SRoberto Vargas        __COHERENT_RAM_END__ = .;
157*b1d27b48SRoberto Vargas    } >RAM
158*b1d27b48SRoberto Vargas#endif
159*b1d27b48SRoberto Vargas
160*b1d27b48SRoberto Vargas    /*
161*b1d27b48SRoberto Vargas     * Define a linker symbol to mark end of the RW memory area for this
162*b1d27b48SRoberto Vargas     * image.
163*b1d27b48SRoberto Vargas     */
164*b1d27b48SRoberto Vargas    __RW_END__ = .;
165*b1d27b48SRoberto Vargas    __BL2_END__ = .;
166*b1d27b48SRoberto Vargas
167*b1d27b48SRoberto Vargas    __BSS_SIZE__ = SIZEOF(.bss);
168*b1d27b48SRoberto Vargas
169*b1d27b48SRoberto Vargas#if USE_COHERENT_MEM
170*b1d27b48SRoberto Vargas    __COHERENT_RAM_UNALIGNED_SIZE__ =
171*b1d27b48SRoberto Vargas        __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
172*b1d27b48SRoberto Vargas#endif
173*b1d27b48SRoberto Vargas
174*b1d27b48SRoberto Vargas    ASSERT(. <= BL2_LIMIT, "BL2 image has exceeded its limit.")
175*b1d27b48SRoberto Vargas}
176