1/* 2 * Copyright (c) 2004-2008 Texas Instruments 3 * 4 * (C) Copyright 2002 5 * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de> 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10#include <config.h> 11#include <asm/psci.h> 12 13OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm") 14OUTPUT_ARCH(arm) 15ENTRY(_start) 16SECTIONS 17{ 18#if !defined(CONFIG_CMDLINE) && !defined(CONFIG_U_BOOT_CMD_ALWAYS) 19 /DISCARD/ : { *(.u_boot_list_2_cmd_*) } 20#endif 21#if defined(CONFIG_ARMV7_SECURE_BASE) && defined(CONFIG_ARMV7_NONSEC) 22 /* 23 * If CONFIG_ARMV7_SECURE_BASE is true, secure code will not 24 * bundle with u-boot, and code offsets are fixed. Secure zone 25 * only needs to be copied from the loading address to 26 * CONFIG_ARMV7_SECURE_BASE, which is the linking and running 27 * address for secure code. 28 * 29 * If CONFIG_ARMV7_SECURE_BASE is undefined, the secure zone will 30 * be included in u-boot address space, and some absolute address 31 * were used in secure code. The absolute addresses of the secure 32 * code also needs to be relocated along with the accompanying u-boot 33 * code. 34 * 35 * So DISCARD is only for CONFIG_ARMV7_SECURE_BASE. 36 */ 37 /DISCARD/ : { *(.rel._secure*) } 38#endif 39 . = 0x00000000; 40 41 . = ALIGN(4); 42 .text : 43 { 44 *(.__image_copy_start) 45 *(.vectors) 46 CPUDIR/start.o (.text*) 47 *(.text*) 48 } 49 50#ifdef CONFIG_ARMV7_NONSEC 51 52 /* Align the secure section only if we're going to use it in situ */ 53 .__secure_start : 54#ifndef CONFIG_ARMV7_SECURE_BASE 55 ALIGN(CONSTANT(COMMONPAGESIZE)) 56#endif 57 { 58 KEEP(*(.__secure_start)) 59 } 60 61#ifndef CONFIG_ARMV7_SECURE_BASE 62#define CONFIG_ARMV7_SECURE_BASE 63#define __ARMV7_PSCI_STACK_IN_RAM 64#endif 65 66 .secure_text CONFIG_ARMV7_SECURE_BASE : 67 AT(ADDR(.__secure_start) + SIZEOF(.__secure_start)) 68 { 69 *(._secure.text) 70 } 71 72 .secure_data : AT(LOADADDR(.secure_text) + SIZEOF(.secure_text)) 73 { 74 *(._secure.data) 75 } 76 77#ifdef CONFIG_ARMV7_PSCI 78 .secure_stack ALIGN(ADDR(.secure_data) + SIZEOF(.secure_data), 79 CONSTANT(COMMONPAGESIZE)) (NOLOAD) : 80#ifdef __ARMV7_PSCI_STACK_IN_RAM 81 AT(ADDR(.secure_stack)) 82#else 83 AT(LOADADDR(.secure_data) + SIZEOF(.secure_data)) 84#endif 85 { 86 KEEP(*(.__secure_stack_start)) 87 88 /* Skip addreses for stack */ 89 . = . + CONFIG_ARMV7_PSCI_NR_CPUS * ARM_PSCI_STACK_SIZE; 90 91 /* Align end of stack section to page boundary */ 92 . = ALIGN(CONSTANT(COMMONPAGESIZE)); 93 94 KEEP(*(.__secure_stack_end)) 95 96#ifdef CONFIG_ARMV7_SECURE_MAX_SIZE 97 /* 98 * We are not checking (__secure_end - __secure_start) here, 99 * as these are the load addresses, and do not include the 100 * stack section. Instead, use the end of the stack section 101 * and the start of the text section. 102 */ 103 ASSERT((. - ADDR(.secure_text)) <= CONFIG_ARMV7_SECURE_MAX_SIZE, 104 "Error: secure section exceeds secure memory size"); 105#endif 106 } 107 108#ifndef __ARMV7_PSCI_STACK_IN_RAM 109 /* Reset VMA but don't allocate space if we have secure SRAM */ 110 . = LOADADDR(.secure_stack); 111#endif 112 113#endif 114 115 .__secure_end : AT(ADDR(.__secure_end)) { 116 *(.__secure_end) 117 LONG(0x1d1071c); /* Must output something to reset LMA */ 118 } 119#endif 120 121 . = ALIGN(4); 122 .rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) } 123 124 . = ALIGN(4); 125 .data : { 126 *(.data*) 127 } 128 129 . = ALIGN(4); 130 131 . = .; 132 133 . = ALIGN(4); 134 .u_boot_list : { 135 KEEP(*(SORT(.u_boot_list*))); 136 } 137 138 /* 139 * Stack unwinding tables 140 */ 141 . = ALIGN(8); 142 /* .ARM.exidx is sorted, so has to go in its own output section. */ 143 .ARM.exidx : { 144 __exidx_start = .; 145 *(.ARM.exidx*) 146 __exidx_end = .; 147 } 148 149 .ARM.extab : { 150 __extab_start = .; 151 *(.ARM.extab*) 152 __extab_end = .; 153 } 154 155 . = ALIGN(4); 156 157 .__efi_runtime_start : { 158 *(.__efi_runtime_start) 159 } 160 161 .efi_runtime : { 162 *(efi_runtime_text) 163 *(efi_runtime_data) 164 } 165 166 .__efi_runtime_stop : { 167 *(.__efi_runtime_stop) 168 } 169 170 .efi_runtime_rel_start : 171 { 172 *(.__efi_runtime_rel_start) 173 } 174 175 .efi_runtime_rel : { 176 *(.relefi_runtime_text) 177 *(.relefi_runtime_data) 178 } 179 180 .efi_runtime_rel_stop : 181 { 182 *(.__efi_runtime_rel_stop) 183 } 184 185 . = ALIGN(8); 186 187 .image_copy_end : 188 { 189 *(.__image_copy_end) 190 } 191 192 .rel_dyn_start : 193 { 194 *(.__rel_dyn_start) 195 } 196 197 .rel.dyn : { 198 *(.rel*) 199 } 200 201 .rel_dyn_end : 202 { 203 *(.__rel_dyn_end) 204 } 205 206 .end : 207 { 208 . = ALIGN(8); 209 *(.__end) 210 } 211 212 _image_binary_end = .; 213 214 /* 215 * Deprecated: this MMU section is used by pxa at present but 216 * should not be used by new boards/CPUs. 217 */ 218 . = ALIGN(4096); 219 .mmutable : { 220 *(.mmutable) 221 } 222 223/* 224 * Compiler-generated __bss_start and __bss_end, see arch/arm/lib/bss.c 225 * __bss_base and __bss_limit are for linker only (overlay ordering) 226 */ 227 228 .bss_start __rel_dyn_start (OVERLAY) : { 229 KEEP(*(.__bss_start)); 230 __bss_base = .; 231 } 232 233 .bss __bss_base (OVERLAY) : { 234 *(.bss*) 235 . = ALIGN(4); 236 __bss_limit = .; 237 } 238 239 .bss_end __bss_limit (OVERLAY) : { 240 KEEP(*(.__bss_end)); 241 } 242 243 .dynsym _image_binary_end : { *(.dynsym) } 244 .dynbss : { *(.dynbss) } 245 .dynstr : { *(.dynstr*) } 246 .dynamic : { *(.dynamic*) } 247 .plt : { *(.plt*) } 248 .interp : { *(.interp*) } 249 .gnu.hash : { *(.gnu.hash) } 250 .gnu : { *(.gnu*) } 251 .ARM.exidx : { *(.ARM.exidx*) } 252 .gnu.linkonce.armexidx : { *(.gnu.linkonce.armexidx.*) } 253} 254