xref: /optee_os/core/arch/riscv/kernel/kern.ld.S (revision 678a558fd2617dd957b862f521ce3e8481636010)
1/* SPDX-License-Identifier: (BSD-2-Clause AND MIT) */
2/*
3 * Copyright 2022-2023 NXP
4 */
5
6/*
7 * Copyright (c) 2014, Linaro Limited
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 *
16 * 2. Redistributions in binary form must reproduce the above copyright notice,
17 * this list of conditions and the following disclaimer in the documentation
18 * and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Copyright (c) 2008-2010 Travis Geiselbrecht
35 *
36 * Permission is hereby granted, free of charge, to any person obtaining
37 * a copy of this software and associated documentation files
38 * (the "Software"), to deal in the Software without restriction,
39 * including without limitation the rights to use, copy, modify, merge,
40 * publish, distribute, sublicense, and/or sell copies of the Software,
41 * and to permit persons to whom the Software is furnished to do so,
42 * subject to the following conditions:
43 *
44 * The above copyright notice and this permission notice shall be
45 * included in all copies or substantial portions of the Software.
46 *
47 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
48 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
49 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
50 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
51 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
52 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
53 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
54 */
55
56#include <mm/core_mmu.h>
57#include <platform_config.h>
58#include <util.h>
59
60/*
61 * Note:
62 * Clang 11 (ld.lld) generates non-relocatable reference when using ROUNDDOWN()
63 * from <util.h>, which does not work with ASLR.
64 */
65#define LD_ROUNDDOWN(x, y) ((x) - ((x) % (y)))
66
67OUTPUT_FORMAT(CFG_KERN_LINKER_FORMAT)
68OUTPUT_ARCH(CFG_KERN_LINKER_ARCH)
69
70ENTRY(_start)
71SECTIONS
72{
73	. = TEE_LOAD_ADDR;
74	/* Ensure text section is page aligned */
75	ASSERT(!(TEE_LOAD_ADDR & (SMALL_PAGE_SIZE - 1)),
76	       "text start should align to 4Kb")
77
78	__text_start = .;
79
80	/*
81	 * Memory between TEE_LOAD_ADDR and page aligned rounded down
82	 * value will be mapped with unpaged "text" section attributes:
83	 * likely to be read-only/executable.
84	 */
85	__flatmap_rx_start = LD_ROUNDDOWN(__text_start, SMALL_PAGE_SIZE);
86
87	.text : {
88		KEEP(*(.text._start))
89		__identity_map_init_start = .;
90		__text_data_start = .;
91		*(.identity_map.data)
92		__text_data_end = .;
93		*(.identity_map .identity_map.*)
94		__identity_map_init_end = .;
95		*(.text .text.*)
96		*(.sram.text.glue_7* .gnu.linkonce.t.*)
97		. = ALIGN(8);
98	}
99	__text_end = .;
100
101#ifdef CFG_CORE_RODATA_NOEXEC
102	. = ALIGN(SMALL_PAGE_SIZE);
103#endif
104	__flatmap_rx_size = . - __flatmap_rx_start;
105	__flatmap_ro_start = .;
106
107	.rodata : ALIGN(8) {
108		__rodata_start = .;
109		*(.gnu.linkonce.r.*)
110		*(.rodata .rodata.*)
111#ifndef CFG_CORE_ASLR
112		. = ALIGN(8);
113		KEEP(*(SORT(.scattered_array*)));
114#endif
115		. = ALIGN(8);
116		__rodata_end = .;
117	}
118
119	.dynsym : {
120		__dyn_sym_start = .;
121		*(.dynsym)
122		__dyn_sym_end = .;
123	}
124
125	.rel.dyn : {
126		*(.rel.*)
127	}
128
129	.rela.dyn : ALIGN(8) {
130		PROVIDE(__rel_dyn_start = .);
131		*(.rela*)
132		PROVIDE(__rel_dyn_end = .);
133	}
134
135#if defined(CFG_CORE_ASLR)
136	.data.rel.ro : {
137		. = ALIGN(8);
138		KEEP(*(SORT(.scattered_array*)));
139		*(.data.rel.ro.__unpaged .data.rel.ro.__unpaged.*)
140	}
141#endif
142
143	.got : { *(.got.plt) *(.got) }
144	.note.gnu.property : { *(.note.gnu.property) }
145	.plt : { *(.plt) }
146
147	.ctors : ALIGN(8) {
148		__ctor_list = .;
149		KEEP(*(.ctors .ctors.* .init_array .init_array.*))
150		__ctor_end = .;
151	}
152	.dtors : ALIGN(8) {
153		__dtor_list = .;
154		KEEP(*(.dtors .dtors.* .fini_array .fini_array.*))
155		__dtor_end = .;
156	}
157
158	/* Start page aligned read-write memory */
159#ifdef CFG_CORE_RWDATA_NOEXEC
160	. = ALIGN(SMALL_PAGE_SIZE);
161#endif
162	__flatmap_ro_size = . - __flatmap_ro_start;
163	__flatmap_rw_start = .;
164
165	.data : ALIGN(8) {
166		/* writable data  */
167		__data_start_rom = .;
168		/* in one segment binaries, the rom data address is on top
169		   of the ram data address */
170		__data_start = .;
171		*(.data .data.* .gnu.linkonce.d.*)
172		. = ALIGN(8);
173		/*
174		 * To allow the linker relax accesses to global symbols,
175		 * those need to be within imm12 (signed 12-bit) offsets
176		 * from __global_pointer$.
177		 */
178		PROVIDE(__global_pointer$ = . + 0x800 );
179		*(.sdata .sdata.* .gnu.linkonce.s.*)
180	}
181
182	/* uninitialized data */
183	.bss : {
184		__data_end = .;
185		__bss_start = .;
186		*(.sbss .sbss.*)
187		*(.gnu.linkonce.sb.*)
188		*(.bss .bss.*)
189		*(.gnu.linkonce.b.*)
190		*(COMMON)
191		. = ALIGN(8);
192		__bss_end = .;
193	}
194
195	.heap1 (NOLOAD) : {
196		/*
197		 * We're keeping track of the padding added before the
198		 * .nozi section so we can do something useful with
199		 * this otherwise wasted memory.
200		 */
201		__heap1_start = .;
202		. += CFG_CORE_HEAP_SIZE;
203		. = ALIGN(4 * 1024);
204		__heap1_end = .;
205	}
206	/*
207	 * Uninitialized data that shouldn't be zero initialized at
208	 * runtime.
209	 */
210	.nozi (NOLOAD) : {
211		__nozi_start = .;
212		KEEP(*(.nozi .nozi.*))
213		. = ALIGN(16);
214		__nozi_end = .;
215		__nozi_stack_start = .;
216		KEEP(*(.nozi_stack .nozi_stack.*))
217		. = ALIGN(8);
218		__nozi_stack_end = .;
219	}
220
221	. = ALIGN(SMALL_PAGE_SIZE);
222	__flatmap_free_start = .;
223	__flatmap_rw_size = __flatmap_free_start - __flatmap_rw_start;
224
225#ifdef CFG_CORE_SANITIZE_KADDRESS
226	. = TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8;
227	. = ALIGN(8);
228	.asan_shadow : {
229		__asan_shadow_start = .;
230		. += TEE_RAM_VA_SIZE / 9;
231		__asan_shadow_end = .;
232		__asan_shadow_size = __asan_shadow_end - __asan_shadow_start;
233	}
234#endif /*CFG_CORE_SANITIZE_KADDRESS*/
235
236	__end = .;
237	__init_size = __data_end - TEE_LOAD_ADDR;
238
239	/*
240	 * Guard against moving the location counter backwards in the assignment
241	 * below.
242	 */
243	ASSERT(. <= (TEE_RAM_START + TEE_RAM_VA_SIZE),
244		"TEE_RAM_VA_SIZE is too small")
245	. = TEE_RAM_START + TEE_RAM_VA_SIZE;
246
247	_end_of_ram = .;
248
249	__get_tee_init_end = .;
250	__flatmap_free_size = _end_of_ram - __flatmap_free_start;
251
252	. = ALIGN(8);
253
254#ifndef CFG_CORE_ASLR
255	ASSERT(SIZEOF(.rel.dyn) == 0, "Relocation entries not expected")
256	ASSERT(SIZEOF(.rela.dyn) == 0, "Relocation entries not expected")
257#endif
258
259	/DISCARD/ : {
260		/* Strip unnecessary stuff */
261		*(.comment .note .eh_frame .interp .rela.plt)
262		/* Strip meta variables */
263		*(__keep_meta_vars*)
264	}
265
266}
267
268/* Unpaged read-only memories */
269__vcore_unpg_rx_start = __flatmap_rx_start;
270__vcore_unpg_ro_start = __flatmap_ro_start;
271#ifdef CFG_CORE_RODATA_NOEXEC
272__vcore_unpg_rx_size = __flatmap_rx_size;
273__vcore_unpg_ro_size = __flatmap_ro_size;
274#else
275__vcore_unpg_rx_size = __flatmap_rx_size + __flatmap_ro_size;
276__vcore_unpg_ro_size = 0;
277#endif
278__vcore_unpg_rx_end = __vcore_unpg_rx_start + __vcore_unpg_rx_size;
279__vcore_unpg_ro_end = __vcore_unpg_ro_start + __vcore_unpg_ro_size;
280
281/* Unpaged read-write memory */
282__vcore_unpg_rw_start = __flatmap_rw_start;
283__vcore_unpg_rw_size = __flatmap_rw_size;
284__vcore_unpg_rw_end = __vcore_unpg_rw_start + __vcore_unpg_rw_size;
285
286__vcore_free_start = __flatmap_free_start;
287__vcore_free_size = __flatmap_free_size;
288__vcore_free_end = __flatmap_free_start + __flatmap_free_size;
289
290#ifdef CFG_CORE_SANITIZE_KADDRESS
291__asan_map_start = (__asan_shadow_start / SMALL_PAGE_SIZE) *
292		   SMALL_PAGE_SIZE;
293__asan_map_end = ((__asan_shadow_end - 1) / SMALL_PAGE_SIZE) *
294		 SMALL_PAGE_SIZE + SMALL_PAGE_SIZE;
295__asan_map_size = __asan_map_end - __asan_map_start;
296#endif /*CFG_CORE_SANITIZE_KADDRESS*/
297