xref: /optee_os/core/arch/arm/kernel/kern.ld.S (revision 439d2a896bdeac822c412374ba327ffee097696a)
1/* SPDX-License-Identifier: (BSD-2-Clause AND MIT) */
2/*
3 * Copyright (c) 2014, Linaro Limited
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * Copyright (c) 2008-2010 Travis Geiselbrecht
31 *
32 * Permission is hereby granted, free of charge, to any person obtaining
33 * a copy of this software and associated documentation files
34 * (the "Software"), to deal in the Software without restriction,
35 * including without limitation the rights to use, copy, modify, merge,
36 * publish, distribute, sublicense, and/or sell copies of the Software,
37 * and to permit persons to whom the Software is furnished to do so,
38 * subject to the following conditions:
39 *
40 * The above copyright notice and this permission notice shall be
41 * included in all copies or substantial portions of the Software.
42 *
43 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
44 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
45 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
46 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
47 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
48 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
49 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
50 */
51
52#include <mm/core_mmu.h>
53#include <platform_config.h>
54#include <util.h>
55
56/*
57 * Note:
58 * Clang 11 (ld.lld) generates non-relocatable reference when using ROUNDDOWN()
59 * from <util.h>, which does not work with ASLR.
60 */
61#define LD_ROUNDDOWN(x, y) ((x) - ((x) % (y)))
62
63OUTPUT_FORMAT(CFG_KERN_LINKER_FORMAT)
64OUTPUT_ARCH(CFG_KERN_LINKER_ARCH)
65
66ENTRY(_start)
67SECTIONS
68{
69	. = TEE_LOAD_ADDR;
70#ifdef ARM32
71	ASSERT(!(TEE_LOAD_ADDR & 31), "text start should align to 32bytes")
72#endif
73#ifdef ARM64
74	ASSERT(!(TEE_LOAD_ADDR & 127), "text start should align to 128bytes")
75#endif
76	__text_start = .;
77
78	/*
79	 * Memory between TEE_LOAD_ADDR and page aligned rounded down
80	 * value will be mapped with unpaged "text" section attributes:
81	 * likely to be read-only/executable.
82	 */
83	__flatmap_unpg_rx_start = LD_ROUNDDOWN(__text_start, SMALL_PAGE_SIZE);
84
85	.text : {
86		KEEP(*(.text._start))
87		__identity_map_init_start = .;
88		__text_data_start = .;
89		*(.identity_map.data)
90		__text_data_end = .;
91		*(.identity_map .identity_map.* \
92			/*
93			 * The one below is needed because it's a weak
94			 * symbol that may be overridden by platform
95			 * specific code.
96			 */
97		  .text.get_core_pos_mpidr)
98		__identity_map_init_end = .;
99		KEEP(*(.text.init .text.plat_cpu_reset_early \
100		       .text.reset .text.reset_primary .text.unhandled_cpu \
101		       .text.__assert_flat_mapped_range))
102
103#ifdef CFG_WITH_PAGER
104		*(.text)
105/* Include list of sections needed for paging */
106#include <text_unpaged.ld.S>
107#else
108		*(.text .text.*)
109#endif
110		*(.sram.text.glue_7* .gnu.linkonce.t.*)
111		. = ALIGN(8);
112	}
113	__text_end = .;
114
115#ifdef CFG_CORE_RODATA_NOEXEC
116	. = ALIGN(SMALL_PAGE_SIZE);
117#endif
118	__flatmap_unpg_rx_size = . - __flatmap_unpg_rx_start;
119	__flatmap_unpg_ro_start = .;
120
121	.rodata : ALIGN(8) {
122		__rodata_start = .;
123		*(.gnu.linkonce.r.*)
124#ifdef CFG_WITH_PAGER
125		*(.rodata .rodata.__unpaged .rodata.__unpaged.*)
126#include <rodata_unpaged.ld.S>
127#else
128		*(.rodata .rodata.*)
129#ifndef CFG_CORE_ASLR
130		. = ALIGN(8);
131		KEEP(*(SORT(.scattered_array*)));
132#endif
133#endif
134		. = ALIGN(8);
135		__rodata_end = .;
136	}
137
138#if defined(CFG_CORE_ASLR)
139	.data.rel.ro : {
140#if !defined(CFG_WITH_PAGER)
141		. = ALIGN(8);
142		KEEP(*(SORT(.scattered_array*)));
143#endif
144		*(.data.rel.ro.__unpaged .data.rel.ro.__unpaged.*)
145	}
146#endif
147
148	.got : { *(.got.plt) *(.got) }
149	.note.gnu.property : { *(.note.gnu.property) }
150	.plt : { *(.plt) }
151
152	.ctors : ALIGN(8) {
153		__ctor_list = .;
154		KEEP(*(.ctors .ctors.* .init_array .init_array.*))
155		__ctor_end = .;
156	}
157	.dtors : ALIGN(8) {
158		__dtor_list = .;
159		KEEP(*(.dtors .dtors.* .fini_array .fini_array.*))
160		__dtor_end = .;
161	}
162
163	/* .ARM.exidx is sorted, so has to go in its own output section.  */
164	.ARM.exidx : {
165		__exidx_start = .;
166		*(.ARM.exidx* .gnu.linkonce.armexidx.*)
167		__exidx_end = .;
168	}
169
170	.ARM.extab : {
171		__extab_start = .;
172		*(.ARM.extab*)
173		__extab_end = .;
174	}
175
176	/* Start page aligned read-write memory */
177#ifdef CFG_CORE_RWDATA_NOEXEC
178	. = ALIGN(SMALL_PAGE_SIZE);
179#endif
180	__flatmap_unpg_ro_size = . - __flatmap_unpg_ro_start;
181
182#ifdef CFG_NS_VIRTUALIZATION
183	__flatmap_nex_rw_start = . ;
184	.nex_data : ALIGN(8) {
185		*(.nex_data .nex_data.*)
186	}
187
188	.nex_bss : ALIGN(8) {
189		__nex_bss_start = .;
190		*(.nex_bss .nex_bss.*)
191		__nex_bss_end = .;
192	}
193
194	/*
195	 * We want to keep all nexus memory in one place, because
196	 * it should be always mapped and it is easier to map one
197	 * memory region than two.
198	 * Next section are NOLOAD ones, but they are followed
199	 * by sections with data. Thus, this NOLOAD section will
200	 * be included in the resulting binary, filled with zeroes
201	 */
202	.nex_stack (NOLOAD) : {
203		__nozi_stack_start = .;
204		KEEP(*(.nozi_stack.stack_tmp .nozi_stack.stack_abt))
205		. = ALIGN(8);
206		__nozi_stack_end = .;
207	}
208
209	.nex_heap (NOLOAD) : {
210		__nex_heap_start = .;
211		. += CFG_CORE_NEX_HEAP_SIZE;
212		. = ALIGN(16 * 1024);
213		__nex_heap_end = .;
214	}
215	.nex_nozi (NOLOAD) : {
216		ASSERT(!(ABSOLUTE(.) & (16 * 1024 - 1)), "align nozi to 16kB");
217		KEEP(*(.nozi.mmu.base_table .nozi.mmu.l2))
218	}
219
220	. = ALIGN(SMALL_PAGE_SIZE);
221
222	__flatmap_nex_rw_size = . - __flatmap_nex_rw_start;
223	__flatmap_nex_rw_end = .;
224#endif
225
226	__flatmap_unpg_rw_start = .;
227
228	.data : ALIGN(8) {
229		/* writable data  */
230		__data_start_rom = .;
231		/* in one segment binaries, the rom data address is on top
232		   of the ram data address */
233		__data_start = .;
234		*(.data .data.* .gnu.linkonce.d.*)
235		. = ALIGN(8);
236	}
237
238	/* unintialized data */
239	.bss : {
240		__data_end = .;
241		__bss_start = .;
242		*(.bss .bss.*)
243		*(.gnu.linkonce.b.*)
244		*(COMMON)
245		. = ALIGN(8);
246		__bss_end = .;
247	}
248
249	.heap1 (NOLOAD) : {
250		/*
251		 * We're keeping track of the padding added before the
252		 * .nozi section so we can do something useful with
253		 * this otherwise wasted memory.
254		 */
255		__heap1_start = .;
256#ifndef CFG_WITH_PAGER
257		. += CFG_CORE_HEAP_SIZE;
258#endif
259#ifdef CFG_WITH_LPAE
260		. = ALIGN(4 * 1024);
261#else
262		. = ALIGN(16 * 1024);
263#endif
264		__heap1_end = .;
265	}
266	/*
267	 * Uninitialized data that shouldn't be zero initialized at
268	 * runtime.
269	 *
270	 * L1 mmu table requires 16 KiB alignment
271	 */
272	.nozi (NOLOAD) : {
273		__nozi_start = .;
274		KEEP(*(.nozi .nozi.*))
275		. = ALIGN(16);
276		__nozi_end = .;
277		/*
278		 * If virtualization is enabled, abt and tmp stacks will placed
279		 * at above .nex_stack section and thread stacks will go there
280		 */
281		__nozi_stack_start = .;
282		KEEP(*(.nozi_stack .nozi_stack.*))
283		. = ALIGN(8);
284		__nozi_stack_end = .;
285	}
286
287#ifdef CFG_WITH_PAGER
288	.heap2 (NOLOAD) : {
289		__heap2_start = .;
290		/*
291		 * Reserve additional memory for heap, the total should be
292		 * at least CFG_CORE_HEAP_SIZE, but count what has already
293		 * been reserved in .heap1
294		 */
295		. += CFG_CORE_HEAP_SIZE - (__heap1_end - __heap1_start);
296		. = ALIGN(SMALL_PAGE_SIZE);
297		__heap2_end = .;
298	}
299
300	/* Start page aligned read-only memory */
301	__flatmap_unpg_rw_size = . - __flatmap_unpg_rw_start;
302
303	__init_start = .;
304	__flatmap_init_rx_start = .;
305
306	ASSERT(!(__flatmap_init_rx_start & (SMALL_PAGE_SIZE - 1)),
307		"read-write memory is not paged aligned")
308
309	.text_init : {
310		__text_init_start = .;
311/*
312 * Include list of sections needed for boot initialization, this list
313 * overlaps with unpaged.ld.S but since unpaged.ld.S is first all those
314 * sections will go into the unpaged area.
315 */
316#include <text_init.ld.S>
317		KEEP(*(.text.startup.*));
318		/* Make sure constructor functions are available during init */
319		KEEP(*(.text._GLOBAL__sub_*));
320		. = ALIGN(8);
321		__text_init_end = .;
322	}
323
324#ifdef CFG_CORE_RODATA_NOEXEC
325	. = ALIGN(SMALL_PAGE_SIZE);
326#endif
327	__flatmap_init_rx_size = . - __flatmap_init_rx_start;
328	__flatmap_init_ro_start = .;
329
330	.rodata_init : {
331		__rodata_init_start = .;
332#include <rodata_init.ld.S>
333#ifndef CFG_CORE_ASLR
334		. = ALIGN(8);
335		KEEP(*(SORT(.scattered_array*)));
336#endif
337		__rodata_init_end = .;
338	}
339#ifdef CFG_CORE_ASLR
340	.data.rel.ro_init : ALIGN(8) {
341		KEEP(*(SORT(.scattered_array*)));
342	}
343#endif
344	. = ALIGN(8);
345	__ro_and_relro_data_init_end = .;
346
347	__init_end = ALIGN(__ro_and_relro_data_init_end, SMALL_PAGE_SIZE);
348	__get_tee_init_end = __init_end;
349	__init_size = __init_end - __init_start;
350
351	/* vcore flat map stops here. No need to page align, rodata follows. */
352	__flatmap_init_ro_size = __init_end - __flatmap_init_ro_start;
353
354	.rodata_pageable : ALIGN(8) {
355		__rodata_pageable_start = .;
356		*(.rodata*)
357		__rodata_pageable_end = .;
358	}
359
360#ifdef CFG_CORE_RODATA_NOEXEC
361	. = ALIGN(SMALL_PAGE_SIZE);
362#endif
363
364	.text_pageable : ALIGN(8) {
365		__text_pageable_start = .;
366		*(.text*)
367		. = ALIGN(SMALL_PAGE_SIZE);
368		__text_pageable_end = .;
369	}
370
371	__pageable_part_end = .;
372	__pageable_part_start = __init_end;
373	__pageable_start = __init_start;
374	__pageable_end = __pageable_part_end;
375
376	ASSERT(TEE_LOAD_ADDR >= TEE_RAM_START,
377		"Load address before start of physical memory")
378	ASSERT(TEE_LOAD_ADDR < (TEE_RAM_START + TEE_RAM_PH_SIZE),
379		"Load address after end of physical memory")
380	ASSERT((TEE_RAM_START + TEE_RAM_PH_SIZE - __init_end) >
381		SMALL_PAGE_SIZE, "Too few free pages to initialize paging")
382
383
384#endif /*CFG_WITH_PAGER*/
385
386#ifdef CFG_CORE_SANITIZE_KADDRESS
387	. = TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8;
388	. = ALIGN(8);
389	.asan_shadow : {
390		__asan_shadow_start = .;
391		. += TEE_RAM_VA_SIZE / 9;
392		__asan_shadow_end = .;
393		__asan_shadow_size = __asan_shadow_end - __asan_shadow_start;
394	}
395#endif /*CFG_CORE_SANITIZE_KADDRESS*/
396
397	__end = .;
398
399#ifndef CFG_WITH_PAGER
400	__init_size = __data_end - TEE_LOAD_ADDR;
401#endif
402	/*
403	 * Guard against moving the location counter backwards in the assignment
404	 * below.
405	 */
406	ASSERT(. <= (TEE_RAM_START + TEE_RAM_VA_SIZE),
407		"TEE_RAM_VA_SIZE is too small")
408	. = TEE_RAM_START + TEE_RAM_VA_SIZE;
409
410	_end_of_ram = .;
411
412#ifndef CFG_WITH_PAGER
413	__flatmap_unpg_rw_size = _end_of_ram - __flatmap_unpg_rw_start;
414	__get_tee_init_end = .;
415#endif
416
417	/*
418	 * These regions will not become a normal part of the dumped
419	 * binary, instead some are interpreted by the dump script and
420	 * converted into suitable format for OP-TEE itself to use.
421	 */
422	.dynamic : { *(.dynamic) }
423	.hash : { *(.hash) }
424	.dynsym : { *(.dynsym) }
425	.dynstr : { *(.dynstr) }
426
427	.rel : {
428		*(.rel.*)
429	}
430	.rela : {
431		*(.rela.*)
432	}
433#ifndef CFG_CORE_ASLR
434	ASSERT(SIZEOF(.rel) == 0, "Relocation entries not expected")
435	ASSERT(SIZEOF(.rela) == 0, "Relocation entries not expected")
436#endif
437
438	/DISCARD/ : {
439		/* Strip unnecessary stuff */
440		*(.comment .note .eh_frame .interp)
441		/* Strip meta variables */
442		*(__keep_meta_vars*)
443	}
444
445}
446
447/* Unpaged read-only memories */
448__vcore_unpg_rx_start = __flatmap_unpg_rx_start;
449__vcore_unpg_ro_start = __flatmap_unpg_ro_start;
450#ifdef CFG_CORE_RODATA_NOEXEC
451__vcore_unpg_rx_size = __flatmap_unpg_rx_size;
452__vcore_unpg_ro_size = __flatmap_unpg_ro_size;
453#else
454__vcore_unpg_rx_size = __flatmap_unpg_rx_size + __flatmap_unpg_ro_size;
455__vcore_unpg_ro_size = 0;
456#endif
457__vcore_unpg_rx_end = __vcore_unpg_rx_start + __vcore_unpg_rx_size;
458__vcore_unpg_ro_end = __vcore_unpg_ro_start + __vcore_unpg_ro_size;
459
460/* Unpaged read-write memory */
461__vcore_unpg_rw_start = __flatmap_unpg_rw_start;
462__vcore_unpg_rw_size = __flatmap_unpg_rw_size;
463__vcore_unpg_rw_end = __vcore_unpg_rw_start + __vcore_unpg_rw_size;
464
465#ifdef CFG_NS_VIRTUALIZATION
466/* Nexus read-write memory */
467__vcore_nex_rw_start = __flatmap_nex_rw_start;
468__vcore_nex_rw_size = __flatmap_nex_rw_size;
469__vcore_nex_rw_end = __vcore_nex_rw_start + __vcore_nex_rw_size;
470#endif
471
472#ifdef CFG_WITH_PAGER
473/*
474 * Core init mapping shall cover up to end of the physical RAM.
475 * This is required since the hash table is appended to the
476 * binary data after the firmware build sequence.
477 */
478#define __FLATMAP_PAGER_TRAILING_SPACE	\
479	(TEE_RAM_START + TEE_RAM_PH_SIZE - \
480		(__flatmap_init_ro_start + __flatmap_init_ro_size))
481
482/* Paged/init read-only memories */
483__vcore_init_rx_start = __flatmap_init_rx_start;
484__vcore_init_ro_start = __flatmap_init_ro_start;
485#ifdef CFG_CORE_RODATA_NOEXEC
486__vcore_init_rx_size = __flatmap_init_rx_size;
487__vcore_init_ro_size = __flatmap_init_ro_size + __FLATMAP_PAGER_TRAILING_SPACE;
488#else
489__vcore_init_rx_size = __flatmap_init_rx_size + __flatmap_init_ro_size +
490		       __FLATMAP_PAGER_TRAILING_SPACE;
491__vcore_init_ro_size = 0;
492#endif /* CFG_CORE_RODATA_NOEXEC */
493__vcore_init_rx_end = __vcore_init_rx_start + __vcore_init_rx_size;
494__vcore_init_ro_end = __vcore_init_ro_start + __vcore_init_ro_size;
495#endif /* CFG_WITH_PAGER */
496
497#ifdef CFG_CORE_SANITIZE_KADDRESS
498__asan_map_start = (__asan_shadow_start / SMALL_PAGE_SIZE) *
499		   SMALL_PAGE_SIZE;
500__asan_map_end = ((__asan_shadow_end - 1) / SMALL_PAGE_SIZE) *
501		 SMALL_PAGE_SIZE + SMALL_PAGE_SIZE;
502__asan_map_size = __asan_map_end - __asan_map_start;
503#endif /*CFG_CORE_SANITIZE_KADDRESS*/
504