xref: /optee_os/core/arch/arm/kernel/kern.ld.S (revision 487f8cd272605d62acdd72acd16603aa18c55c6b)
1/* SPDX-License-Identifier: (BSD-2-Clause AND MIT) */
2/*
3 * Copyright (c) 2014, Linaro Limited
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * Copyright (c) 2008-2010 Travis Geiselbrecht
31 *
32 * Permission is hereby granted, free of charge, to any person obtaining
33 * a copy of this software and associated documentation files
34 * (the "Software"), to deal in the Software without restriction,
35 * including without limitation the rights to use, copy, modify, merge,
36 * publish, distribute, sublicense, and/or sell copies of the Software,
37 * and to permit persons to whom the Software is furnished to do so,
38 * subject to the following conditions:
39 *
40 * The above copyright notice and this permission notice shall be
41 * included in all copies or substantial portions of the Software.
42 *
43 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
44 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
45 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
46 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
47 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
48 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
49 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
50 */
51
52#include <mm/core_mmu.h>
53#include <platform_config.h>
54#include <util.h>
55
56/*
57 * TEE_RAM_VA_START:            The start virtual address of the TEE RAM
58 * TEE_TEXT_VA_START:           The start virtual address of the OP-TEE text
59 */
60#define TEE_RAM_VA_START        TEE_RAM_START
61#define TEE_TEXT_VA_START       (TEE_RAM_VA_START + \
62					(TEE_LOAD_ADDR - TEE_RAM_START))
63
64/*
65 * Note:
66 * Clang 11 (ld.lld) generates non-relocatable reference when using ROUNDDOWN()
67 * from <util.h>, which does not work with ASLR.
68 */
69#define LD_ROUNDDOWN(x, y) ((x) - ((x) % (y)))
70
71OUTPUT_FORMAT(CFG_KERN_LINKER_FORMAT)
72OUTPUT_ARCH(CFG_KERN_LINKER_ARCH)
73
74ENTRY(_start)
75SECTIONS
76{
77	. = TEE_TEXT_VA_START;
78#ifdef ARM32
79	ASSERT(!(TEE_TEXT_VA_START & 31), "text start should align to 32bytes")
80#endif
81#ifdef ARM64
82	ASSERT(!(TEE_TEXT_VA_START & 127), "text start should align to 128bytes")
83#endif
84	__text_start = .;
85
86	/*
87	 * Memory between TEE_TEXT_VA_START and page aligned rounded down
88	 * value will be mapped with unpaged "text" section attributes:
89	 * likely to be read-only/executable.
90	 */
91	__flatmap_unpg_rx_start = LD_ROUNDDOWN(__text_start, SMALL_PAGE_SIZE);
92
93	.text : {
94		KEEP(*(.text._start))
95		__identity_map_init_start = .;
96		__text_data_start = .;
97		*(.identity_map.data)
98		__text_data_end = .;
99		*(.identity_map .identity_map.* \
100			/*
101			 * The one below is needed because it's a weak
102			 * symbol that may be overridden by platform
103			 * specific code.
104			 */
105		  .text.get_core_pos_mpidr)
106		__identity_map_init_end = .;
107		KEEP(*(.text.init .text.plat_cpu_reset_early \
108		       .text.reset .text.reset_primary .text.unhandled_cpu \
109		       .text.__assert_flat_mapped_range))
110
111#ifdef CFG_WITH_PAGER
112		*(.text)
113/* Include list of sections needed for paging */
114#include <text_unpaged.ld.S>
115#else
116		*(.text .text.*)
117#endif
118		*(.sram.text.glue_7* .gnu.linkonce.t.*)
119		. = ALIGN(8);
120	}
121	__text_end = .;
122
123#ifdef CFG_CORE_RODATA_NOEXEC
124	. = ALIGN(SMALL_PAGE_SIZE);
125#endif
126	__flatmap_unpg_rx_size = . - __flatmap_unpg_rx_start;
127	__flatmap_unpg_ro_start = .;
128
129	.rodata : ALIGN(8) {
130		__rodata_start = .;
131		*(.gnu.linkonce.r.*)
132#ifdef CFG_WITH_PAGER
133		*(.rodata .rodata.__unpaged .rodata.__unpaged.*)
134#include <rodata_unpaged.ld.S>
135#else
136		*(.rodata .rodata.*)
137#ifndef CFG_CORE_ASLR
138		. = ALIGN(8);
139		KEEP(*(SORT(.scattered_array*)));
140#endif
141#endif
142		. = ALIGN(8);
143		__rodata_end = .;
144	}
145
146#if defined(CFG_CORE_ASLR)
147	.data.rel.ro : {
148#if !defined(CFG_WITH_PAGER)
149		. = ALIGN(8);
150		KEEP(*(SORT(.scattered_array*)));
151#endif
152		*(.data.rel.ro.__unpaged .data.rel.ro.__unpaged.*)
153	}
154#endif
155
156	.got : { *(.got.plt) *(.got) }
157	.note.gnu.property : { *(.note.gnu.property) }
158	.plt : { *(.plt) }
159
160	.ctors : ALIGN(8) {
161		__ctor_list = .;
162		KEEP(*(.ctors .ctors.* .init_array .init_array.*))
163		__ctor_end = .;
164	}
165	.dtors : ALIGN(8) {
166		__dtor_list = .;
167		KEEP(*(.dtors .dtors.* .fini_array .fini_array.*))
168		__dtor_end = .;
169	}
170
171	/* .ARM.exidx is sorted, so has to go in its own output section.  */
172	.ARM.exidx : {
173		__exidx_start = .;
174		*(.ARM.exidx* .gnu.linkonce.armexidx.*)
175		__exidx_end = .;
176	}
177
178	.ARM.extab : {
179		__extab_start = .;
180		*(.ARM.extab*)
181		__extab_end = .;
182	}
183
184	/* Start page aligned read-write memory */
185#ifdef CFG_CORE_RWDATA_NOEXEC
186	. = ALIGN(SMALL_PAGE_SIZE);
187#endif
188	__flatmap_unpg_ro_size = . - __flatmap_unpg_ro_start;
189
190#ifdef CFG_VIRTUALIZATION
191	__flatmap_nex_rw_start = . ;
192	.nex_data : ALIGN(8) {
193		*(.nex_data .nex_data.*)
194	}
195
196	.nex_bss : ALIGN(8) {
197		__nex_bss_start = .;
198		*(.nex_bss .nex_bss.*)
199		__nex_bss_end = .;
200	}
201
202	/*
203	 * We want to keep all nexus memory in one place, because
204	 * it should be always mapped and it is easier to map one
205	 * memory region than two.
206	 * Next section are NOLOAD ones, but they are followed
207	 * by sections with data. Thus, this NOLOAD section will
208	 * be included in the resulting binary, filled with zeroes
209	 */
210	.nex_stack (NOLOAD) : {
211		__nozi_stack_start = .;
212		KEEP(*(.nozi_stack.stack_tmp .nozi_stack.stack_abt))
213		. = ALIGN(8);
214		__nozi_stack_end = .;
215	}
216
217	.nex_heap (NOLOAD) : {
218		__nex_heap_start = .;
219		. += CFG_CORE_NEX_HEAP_SIZE;
220		. = ALIGN(16 * 1024);
221		__nex_heap_end = .;
222	}
223	.nex_nozi (NOLOAD) : {
224		ASSERT(!(ABSOLUTE(.) & (16 * 1024 - 1)), "align nozi to 16kB");
225		KEEP(*(.nozi.mmu.base_table .nozi.mmu.l2))
226	}
227
228	. = ALIGN(SMALL_PAGE_SIZE);
229
230	__flatmap_nex_rw_size = . - __flatmap_nex_rw_start;
231	__flatmap_nex_rw_end = .;
232#endif
233
234	__flatmap_unpg_rw_start = .;
235
236	.data : ALIGN(8) {
237		/* writable data  */
238		__data_start_rom = .;
239		/* in one segment binaries, the rom data address is on top
240		   of the ram data address */
241		__data_start = .;
242		*(.data .data.* .gnu.linkonce.d.*)
243		. = ALIGN(8);
244	}
245
246	/* unintialized data */
247	.bss : {
248		__data_end = .;
249		__bss_start = .;
250		*(.bss .bss.*)
251		*(.gnu.linkonce.b.*)
252		*(COMMON)
253		. = ALIGN(8);
254		__bss_end = .;
255	}
256
257	.heap1 (NOLOAD) : {
258		/*
259		 * We're keeping track of the padding added before the
260		 * .nozi section so we can do something useful with
261		 * this otherwise wasted memory.
262		 */
263		__heap1_start = .;
264#ifndef CFG_WITH_PAGER
265		. += CFG_CORE_HEAP_SIZE;
266#endif
267#ifdef CFG_WITH_LPAE
268		. = ALIGN(4 * 1024);
269#else
270		. = ALIGN(16 * 1024);
271#endif
272		__heap1_end = .;
273	}
274	/*
275	 * Uninitialized data that shouldn't be zero initialized at
276	 * runtime.
277	 *
278	 * L1 mmu table requires 16 KiB alignment
279	 */
280	.nozi (NOLOAD) : {
281		__nozi_start = .;
282		KEEP(*(.nozi .nozi.*))
283		. = ALIGN(16);
284		__nozi_end = .;
285		/*
286		 * If virtualization is enabled, abt and tmp stacks will placed
287		 * at above .nex_stack section and thread stacks will go there
288		 */
289		__nozi_stack_start = .;
290		KEEP(*(.nozi_stack .nozi_stack.*))
291		. = ALIGN(8);
292		__nozi_stack_end = .;
293	}
294
295#ifdef CFG_WITH_PAGER
296	.heap2 (NOLOAD) : {
297		__heap2_start = .;
298		/*
299		 * Reserve additional memory for heap, the total should be
300		 * at least CFG_CORE_HEAP_SIZE, but count what has already
301		 * been reserved in .heap1
302		 */
303		. += CFG_CORE_HEAP_SIZE - (__heap1_end - __heap1_start);
304		. = ALIGN(SMALL_PAGE_SIZE);
305		__heap2_end = .;
306	}
307
308	/* Start page aligned read-only memory */
309	__flatmap_unpg_rw_size = . - __flatmap_unpg_rw_start;
310
311	__init_start = .;
312	__flatmap_init_rx_start = .;
313
314	ASSERT(!(__flatmap_init_rx_start & (SMALL_PAGE_SIZE - 1)),
315		"read-write memory is not paged aligned")
316
317	.text_init : {
318/*
319 * Include list of sections needed for boot initialization, this list
320 * overlaps with unpaged.ld.S but since unpaged.ld.S is first all those
321 * sections will go into the unpaged area.
322 */
323#include <text_init.ld.S>
324		KEEP(*(.text.startup.*));
325		/* Make sure constructor functions are available during init */
326		KEEP(*(.text._GLOBAL__sub_*));
327		. = ALIGN(8);
328	}
329
330#ifdef CFG_CORE_RODATA_NOEXEC
331	. = ALIGN(SMALL_PAGE_SIZE);
332#endif
333	__flatmap_init_rx_size = . - __flatmap_init_rx_start;
334	__flatmap_init_ro_start = .;
335
336	.rodata_init : {
337#include <rodata_init.ld.S>
338#ifndef CFG_CORE_ASLR
339		. = ALIGN(8);
340		KEEP(*(SORT(.scattered_array*)));
341#endif
342	}
343#ifdef CFG_CORE_ASLR
344	.data.rel.ro_init : ALIGN(8) {
345		KEEP(*(SORT(.scattered_array*)));
346	}
347#endif
348	. = ALIGN(8);
349	__ro_and_relro_data_init_end = .;
350
351	__init_end = ALIGN(__ro_and_relro_data_init_end, SMALL_PAGE_SIZE);
352	__get_tee_init_end = __init_end;
353	__init_size = __init_end - __init_start;
354
355	/* vcore flat map stops here. No need to page align, rodata follows. */
356	__flatmap_init_ro_size = __init_end - __flatmap_init_ro_start;
357
358	.rodata_pageable : ALIGN(8) {
359		*(.rodata*)
360	}
361
362#ifdef CFG_CORE_RODATA_NOEXEC
363	. = ALIGN(SMALL_PAGE_SIZE);
364#endif
365
366	.text_pageable : ALIGN(8) {
367		*(.text*)
368		. = ALIGN(SMALL_PAGE_SIZE);
369	}
370
371	__pageable_part_end = .;
372	__pageable_part_start = __init_end;
373	__pageable_start = __init_start;
374	__pageable_end = __pageable_part_end;
375
376	ASSERT(TEE_LOAD_ADDR >= TEE_RAM_START,
377		"Load address before start of physical memory")
378	ASSERT(TEE_LOAD_ADDR < (TEE_RAM_START + TEE_RAM_PH_SIZE),
379		"Load address after end of physical memory")
380	ASSERT((TEE_RAM_VA_START + TEE_RAM_PH_SIZE - __init_end) >
381		SMALL_PAGE_SIZE, "Too few free pages to initialize paging")
382
383
384#endif /*CFG_WITH_PAGER*/
385
386#ifdef CFG_CORE_SANITIZE_KADDRESS
387	. = TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8;
388	. = ALIGN(8);
389	.asan_shadow : {
390		__asan_shadow_start = .;
391		. += TEE_RAM_VA_SIZE / 9;
392		__asan_shadow_end = .;
393		__asan_shadow_size = __asan_shadow_end - __asan_shadow_start;
394	}
395#endif /*CFG_CORE_SANITIZE_KADDRESS*/
396
397	__end = .;
398
399#ifndef CFG_WITH_PAGER
400	__init_size = __data_end - TEE_TEXT_VA_START;
401#endif
402	/*
403	 * Guard against moving the location counter backwards in the assignment
404	 * below.
405	 */
406	ASSERT(. <= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE),
407		"TEE_RAM_VA_SIZE is too small")
408	. = TEE_RAM_VA_START + TEE_RAM_VA_SIZE;
409
410	_end_of_ram = .;
411
412#ifndef CFG_WITH_PAGER
413	__flatmap_unpg_rw_size = _end_of_ram - __flatmap_unpg_rw_start;
414	__get_tee_init_end = .;
415#endif
416
417	/*
418	 * These regions will not become a normal part of the dumped
419	 * binary, instead some are interpreted by the dump script and
420	 * converted into suitable format for OP-TEE itself to use.
421	 */
422	.dynamic : { *(.dynamic) }
423	.hash : { *(.hash) }
424	.dynsym : { *(.dynsym) }
425	.dynstr : { *(.dynstr) }
426
427	.rel : {
428		*(.rel.*)
429	}
430	.rela : {
431		*(.rela.*)
432	}
433#ifndef CFG_CORE_ASLR
434	ASSERT(SIZEOF(.rel) == 0, "Relocation entries not expected")
435	ASSERT(SIZEOF(.rela) == 0, "Relocation entries not expected")
436#endif
437
438	/DISCARD/ : {
439		/* Strip unnecessary stuff */
440		*(.comment .note .eh_frame .interp)
441		/* Strip meta variables */
442		*(__keep_meta_vars*)
443	}
444
445}
446
447/* Unpaged read-only memories */
448__vcore_unpg_rx_start = __flatmap_unpg_rx_start;
449__vcore_unpg_ro_start = __flatmap_unpg_ro_start;
450#ifdef CFG_CORE_RODATA_NOEXEC
451__vcore_unpg_rx_size = __flatmap_unpg_rx_size;
452__vcore_unpg_ro_size = __flatmap_unpg_ro_size;
453#else
454__vcore_unpg_rx_size = __flatmap_unpg_rx_size + __flatmap_unpg_ro_size;
455__vcore_unpg_ro_size = 0;
456#endif
457__vcore_unpg_rx_end = __vcore_unpg_rx_start + __vcore_unpg_rx_size;
458__vcore_unpg_ro_end = __vcore_unpg_ro_start + __vcore_unpg_ro_size;
459
460/* Unpaged read-write memory */
461__vcore_unpg_rw_start = __flatmap_unpg_rw_start;
462__vcore_unpg_rw_size = __flatmap_unpg_rw_size;
463__vcore_unpg_rw_end = __vcore_unpg_rw_start + __vcore_unpg_rw_size;
464
465#ifdef CFG_VIRTUALIZATION
466/* Nexus read-write memory */
467__vcore_nex_rw_start = __flatmap_nex_rw_start;
468__vcore_nex_rw_size = __flatmap_nex_rw_size;
469__vcore_nex_rw_end = __vcore_nex_rw_start + __vcore_nex_rw_size;
470#endif
471
472#ifdef CFG_WITH_PAGER
473/*
474 * Core init mapping shall cover up to end of the physical RAM.
475 * This is required since the hash table is appended to the
476 * binary data after the firmware build sequence.
477 */
478#define __FLATMAP_PAGER_TRAILING_SPACE	\
479	(TEE_RAM_START + TEE_RAM_PH_SIZE - \
480		(__flatmap_init_ro_start + __flatmap_init_ro_size))
481
482/* Paged/init read-only memories */
483__vcore_init_rx_start = __flatmap_init_rx_start;
484__vcore_init_ro_start = __flatmap_init_ro_start;
485#ifdef CFG_CORE_RODATA_NOEXEC
486__vcore_init_rx_size = __flatmap_init_rx_size;
487__vcore_init_ro_size = __flatmap_init_ro_size + __FLATMAP_PAGER_TRAILING_SPACE;
488#else
489__vcore_init_rx_size = __flatmap_init_rx_size + __flatmap_init_ro_size +
490		       __FLATMAP_PAGER_TRAILING_SPACE;
491__vcore_init_ro_size = 0;
492#endif /* CFG_CORE_RODATA_NOEXEC */
493__vcore_init_rx_end = __vcore_init_rx_start + __vcore_init_rx_size;
494__vcore_init_ro_end = __vcore_init_ro_start + __vcore_init_ro_size;
495#endif /* CFG_WITH_PAGER */
496
497#ifdef CFG_CORE_SANITIZE_KADDRESS
498__asan_map_start = (__asan_shadow_start / SMALL_PAGE_SIZE) *
499		   SMALL_PAGE_SIZE;
500__asan_map_end = ((__asan_shadow_end - 1) / SMALL_PAGE_SIZE) *
501		 SMALL_PAGE_SIZE + SMALL_PAGE_SIZE;
502__asan_map_size = __asan_map_end - __asan_map_start;
503#endif /*CFG_CORE_SANITIZE_KADDRESS*/
504