1/* SPDX-License-Identifier: (BSD-2-Clause AND MIT) */ 2/* 3 * Copyright 2022-2023 NXP 4 */ 5 6/* 7 * Copyright (c) 2014, Linaro Limited 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright notice, 14 * this list of conditions and the following disclaimer. 15 * 16 * 2. Redistributions in binary form must reproduce the above copyright notice, 17 * this list of conditions and the following disclaimer in the documentation 18 * and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33/* 34 * Copyright (c) 2008-2010 Travis Geiselbrecht 35 * 36 * Permission is hereby granted, free of charge, to any person obtaining 37 * a copy of this software and associated documentation files 38 * (the "Software"), to deal in the Software without restriction, 39 * including without limitation the rights to use, copy, modify, merge, 40 * publish, distribute, sublicense, and/or sell copies of the Software, 41 * and to permit persons to whom the Software is furnished to do so, 42 * subject to the following conditions: 43 * 44 * The above copyright notice and this permission notice shall be 45 * included in all copies or substantial portions of the Software. 46 * 47 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 48 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 49 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 50 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 51 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 52 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 53 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 54 */ 55 56#include <mm/core_mmu.h> 57#include <platform_config.h> 58#include <util.h> 59 60/* 61 * Note: 62 * Clang 11 (ld.lld) generates non-relocatable reference when using ROUNDDOWN() 63 * from <util.h>, which does not work with ASLR. 64 */ 65#define LD_ROUNDDOWN(x, y) ((x) - ((x) % (y))) 66 67OUTPUT_FORMAT(CFG_KERN_LINKER_FORMAT) 68OUTPUT_ARCH(CFG_KERN_LINKER_ARCH) 69 70ENTRY(_start) 71SECTIONS 72{ 73 . = TEE_LOAD_ADDR; 74 /* Ensure text section is page aligned */ 75 ASSERT(!(TEE_LOAD_ADDR & (SMALL_PAGE_SIZE - 1)), 76 "text start should align to 4Kb") 77 78 __text_start = .; 79 80 /* 81 * Memory between TEE_LOAD_ADDR and page aligned rounded down 82 * value will be mapped with unpaged "text" section attributes: 83 * likely to be read-only/executable. 84 */ 85 __flatmap_rx_start = LD_ROUNDDOWN(__text_start, SMALL_PAGE_SIZE); 86 87 .text : { 88 KEEP(*(.text._start)) 89 __identity_map_init_start = .; 90 __text_data_start = .; 91 *(.identity_map.data) 92 __text_data_end = .; 93 *(.identity_map .identity_map.*) 94 __identity_map_init_end = .; 95 *(.text .text.*) 96 *(.sram.text.glue_7* .gnu.linkonce.t.*) 97 . = ALIGN(8); 98 } 99 __text_end = .; 100 101#ifdef CFG_CORE_RODATA_NOEXEC 102 . = ALIGN(SMALL_PAGE_SIZE); 103#endif 104 __flatmap_rx_size = . - __flatmap_rx_start; 105 __flatmap_ro_start = .; 106 107 .rodata : ALIGN(8) { 108 __rodata_start = .; 109 *(.gnu.linkonce.r.*) 110 *(.rodata .rodata.*) 111#ifndef CFG_CORE_ASLR 112 . = ALIGN(8); 113 KEEP(*(SORT(.scattered_array*))); 114#endif 115 . = ALIGN(8); 116 __rodata_end = .; 117 } 118 119 .got : { *(.got.plt) *(.got) } 120 .note.gnu.property : { *(.note.gnu.property) } 121 .plt : { *(.plt) } 122 123 .ctors : ALIGN(8) { 124 __ctor_list = .; 125 KEEP(*(.ctors .ctors.* .init_array .init_array.*)) 126 __ctor_end = .; 127 } 128 .dtors : ALIGN(8) { 129 __dtor_list = .; 130 KEEP(*(.dtors .dtors.* .fini_array .fini_array.*)) 131 __dtor_end = .; 132 } 133 134 /* Start page aligned read-write memory */ 135#ifdef CFG_CORE_RWDATA_NOEXEC 136 . = ALIGN(SMALL_PAGE_SIZE); 137#endif 138 __flatmap_ro_size = . - __flatmap_ro_start; 139 __flatmap_rw_start = .; 140 141 .data : ALIGN(8) { 142 /* writable data */ 143 __data_start_rom = .; 144 /* in one segment binaries, the rom data address is on top 145 of the ram data address */ 146 __data_start = .; 147 *(.data .data.* .gnu.linkonce.d.*) 148 . = ALIGN(8); 149 /* 150 * To allow the linker relax accesses to global symbols, 151 * those need to be within imm12 (signed 12-bit) offsets 152 * from __global_pointer$. 153 */ 154 PROVIDE(__global_pointer$ = . + 0x800 ); 155 } 156 157 /* uninitialized data */ 158 .bss : { 159 __data_end = .; 160 __bss_start = .; 161 *(.bss .bss.*) 162 *(.gnu.linkonce.b.*) 163 *(COMMON) 164 . = ALIGN(8); 165 __bss_end = .; 166 } 167 168 .heap1 (NOLOAD) : { 169 /* 170 * We're keeping track of the padding added before the 171 * .nozi section so we can do something useful with 172 * this otherwise wasted memory. 173 */ 174 __heap1_start = .; 175 . += CFG_CORE_HEAP_SIZE; 176 . = ALIGN(4 * 1024); 177 __heap1_end = .; 178 } 179 /* 180 * Uninitialized data that shouldn't be zero initialized at 181 * runtime. 182 */ 183 .nozi (NOLOAD) : { 184 __nozi_start = .; 185 KEEP(*(.nozi .nozi.*)) 186 . = ALIGN(16); 187 __nozi_end = .; 188 __nozi_stack_start = .; 189 KEEP(*(.nozi_stack .nozi_stack.*)) 190 . = ALIGN(8); 191 __nozi_stack_end = .; 192 } 193 194#ifdef CFG_CORE_SANITIZE_KADDRESS 195 . = TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8; 196 . = ALIGN(8); 197 .asan_shadow : { 198 __asan_shadow_start = .; 199 . += TEE_RAM_VA_SIZE / 9; 200 __asan_shadow_end = .; 201 __asan_shadow_size = __asan_shadow_end - __asan_shadow_start; 202 } 203#endif /*CFG_CORE_SANITIZE_KADDRESS*/ 204 205 __end = .; 206 __init_size = __data_end - TEE_LOAD_ADDR; 207 208 /* 209 * Guard against moving the location counter backwards in the assignment 210 * below. 211 */ 212 ASSERT(. <= (TEE_RAM_START + TEE_RAM_VA_SIZE), 213 "TEE_RAM_VA_SIZE is too small") 214 . = TEE_RAM_START + TEE_RAM_VA_SIZE; 215 216 _end_of_ram = .; 217 218 __flatmap_rw_size = _end_of_ram - __flatmap_rw_start; 219 __get_tee_init_end = .; 220 221 /* 222 * These regions will not become a normal part of the dumped 223 * binary, instead some are interpreted by the dump script and 224 * converted into suitable format for OP-TEE itself to use. 225 */ 226 .dynamic : { *(.dynamic) } 227 .hash : { *(.hash) } 228 .dynsym : { *(.dynsym) } 229 .dynstr : { *(.dynstr) } 230 231 .rel : { 232 *(.rel.*) 233 } 234 .rela : { 235 *(.rela.*) 236 } 237#ifndef CFG_CORE_ASLR 238 ASSERT(SIZEOF(.rel) == 0, "Relocation entries not expected") 239 ASSERT(SIZEOF(.rela) == 0, "Relocation entries not expected") 240#endif 241 242 /DISCARD/ : { 243 /* Strip unnecessary stuff */ 244 *(.comment .note .eh_frame .interp) 245 /* Strip meta variables */ 246 *(__keep_meta_vars*) 247 } 248 249} 250 251/* Unpaged read-only memories */ 252__vcore_unpg_rx_start = __flatmap_rx_start; 253__vcore_unpg_ro_start = __flatmap_ro_start; 254#ifdef CFG_CORE_RODATA_NOEXEC 255__vcore_unpg_rx_size = __flatmap_rx_size; 256__vcore_unpg_ro_size = __flatmap_ro_size; 257#else 258__vcore_unpg_rx_size = __flatmap_rx_size + __flatmap_ro_size; 259__vcore_unpg_ro_size = 0; 260#endif 261__vcore_unpg_rx_end = __vcore_unpg_rx_start + __vcore_unpg_rx_size; 262__vcore_unpg_ro_end = __vcore_unpg_ro_start + __vcore_unpg_ro_size; 263 264/* Unpaged read-write memory */ 265__vcore_unpg_rw_start = __flatmap_rw_start; 266__vcore_unpg_rw_size = __flatmap_rw_size; 267__vcore_unpg_rw_end = __vcore_unpg_rw_start + __vcore_unpg_rw_size; 268 269#ifdef CFG_CORE_SANITIZE_KADDRESS 270__asan_map_start = (__asan_shadow_start / SMALL_PAGE_SIZE) * 271 SMALL_PAGE_SIZE; 272__asan_map_end = ((__asan_shadow_end - 1) / SMALL_PAGE_SIZE) * 273 SMALL_PAGE_SIZE + SMALL_PAGE_SIZE; 274__asan_map_size = __asan_map_end - __asan_map_start; 275#endif /*CFG_CORE_SANITIZE_KADDRESS*/ 276