xref: /optee_os/core/arch/riscv/kernel/kern.ld.S (revision 76d6685e5f3b91d66dc2091b9d61601c050298bb)
1/* SPDX-License-Identifier: (BSD-2-Clause AND MIT) */
2/*
3 * Copyright 2022-2023 NXP
4 */
5
6/*
7 * Copyright (c) 2014, Linaro Limited
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 *
16 * 2. Redistributions in binary form must reproduce the above copyright notice,
17 * this list of conditions and the following disclaimer in the documentation
18 * and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Copyright (c) 2008-2010 Travis Geiselbrecht
35 *
36 * Permission is hereby granted, free of charge, to any person obtaining
37 * a copy of this software and associated documentation files
38 * (the "Software"), to deal in the Software without restriction,
39 * including without limitation the rights to use, copy, modify, merge,
40 * publish, distribute, sublicense, and/or sell copies of the Software,
41 * and to permit persons to whom the Software is furnished to do so,
42 * subject to the following conditions:
43 *
44 * The above copyright notice and this permission notice shall be
45 * included in all copies or substantial portions of the Software.
46 *
47 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
48 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
49 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
50 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
51 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
52 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
53 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
54 */
55
56#include <mm/core_mmu.h>
57#include <platform_config.h>
58#include <util.h>
59
60/*
61 * Note:
62 * Clang 11 (ld.lld) generates non-relocatable reference when using ROUNDDOWN()
63 * from <util.h>, which does not work with ASLR.
64 */
65#define LD_ROUNDDOWN(x, y) ((x) - ((x) % (y)))
66
67OUTPUT_FORMAT(CFG_KERN_LINKER_FORMAT)
68OUTPUT_ARCH(CFG_KERN_LINKER_ARCH)
69
70ENTRY(_start)
71SECTIONS
72{
73	. = TEE_LOAD_ADDR;
74	/* Ensure text section is page aligned */
75	ASSERT(!(TEE_LOAD_ADDR & (SMALL_PAGE_SIZE - 1)),
76	       "text start should align to 4Kb")
77
78	__text_start = .;
79
80	/*
81	 * Memory between TEE_LOAD_ADDR and page aligned rounded down
82	 * value will be mapped with unpaged "text" section attributes:
83	 * likely to be read-only/executable.
84	 */
85	__flatmap_rx_start = LD_ROUNDDOWN(__text_start, SMALL_PAGE_SIZE);
86
87	.text : {
88		KEEP(*(.text._start))
89		__identity_map_init_start = .;
90		__text_data_start = .;
91		*(.identity_map.data)
92		__text_data_end = .;
93		*(.identity_map .identity_map.*)
94		__identity_map_init_end = .;
95		*(.text .text.*)
96		*(.sram.text.glue_7* .gnu.linkonce.t.*)
97		. = ALIGN(8);
98	}
99	__text_end = .;
100
101#ifdef CFG_CORE_RODATA_NOEXEC
102	. = ALIGN(SMALL_PAGE_SIZE);
103#endif
104	__flatmap_rx_size = . - __flatmap_rx_start;
105	__flatmap_ro_start = .;
106
107	.rodata : ALIGN(8) {
108		__rodata_start = .;
109		*(.gnu.linkonce.r.*)
110		*(.rodata .rodata.*)
111#ifndef CFG_CORE_ASLR
112		. = ALIGN(8);
113		KEEP(*(SORT(.scattered_array*)));
114#endif
115		. = ALIGN(8);
116		__rodata_end = .;
117	}
118
119	.got : { *(.got.plt) *(.got) }
120	.note.gnu.property : { *(.note.gnu.property) }
121	.plt : { *(.plt) }
122
123	.ctors : ALIGN(8) {
124		__ctor_list = .;
125		KEEP(*(.ctors .ctors.* .init_array .init_array.*))
126		__ctor_end = .;
127	}
128	.dtors : ALIGN(8) {
129		__dtor_list = .;
130		KEEP(*(.dtors .dtors.* .fini_array .fini_array.*))
131		__dtor_end = .;
132	}
133
134	/* Start page aligned read-write memory */
135#ifdef CFG_CORE_RWDATA_NOEXEC
136	. = ALIGN(SMALL_PAGE_SIZE);
137#endif
138	__flatmap_ro_size = . - __flatmap_ro_start;
139	__flatmap_rw_start = .;
140
141	.data : ALIGN(8) {
142		/* writable data  */
143		__data_start_rom = .;
144		/* in one segment binaries, the rom data address is on top
145		   of the ram data address */
146		__data_start = .;
147		*(.data .data.* .gnu.linkonce.d.*)
148		. = ALIGN(8);
149		/*
150		 * To allow the linker relax accesses to global symbols,
151		 * those need to be within imm12 (signed 12-bit) offsets
152		 * from __global_pointer$.
153		 */
154		PROVIDE(__global_pointer$ = . + 0x800 );
155		*(.sdata .sdata.* .gnu.linkonce.s.*)
156	}
157
158	/* uninitialized data */
159	.bss : {
160		__data_end = .;
161		__bss_start = .;
162		*(.sbss .sbss.*)
163		*(.gnu.linkonce.sb.*)
164		*(.bss .bss.*)
165		*(.gnu.linkonce.b.*)
166		*(COMMON)
167		. = ALIGN(8);
168		__bss_end = .;
169	}
170
171	.heap1 (NOLOAD) : {
172		/*
173		 * We're keeping track of the padding added before the
174		 * .nozi section so we can do something useful with
175		 * this otherwise wasted memory.
176		 */
177		__heap1_start = .;
178		. += CFG_CORE_HEAP_SIZE;
179		. = ALIGN(4 * 1024);
180		__heap1_end = .;
181	}
182	/*
183	 * Uninitialized data that shouldn't be zero initialized at
184	 * runtime.
185	 */
186	.nozi (NOLOAD) : {
187		__nozi_start = .;
188		KEEP(*(.nozi .nozi.*))
189		. = ALIGN(16);
190		__nozi_end = .;
191		__nozi_stack_start = .;
192		KEEP(*(.nozi_stack .nozi_stack.*))
193		. = ALIGN(8);
194		__nozi_stack_end = .;
195	}
196
197	. = ALIGN(SMALL_PAGE_SIZE);
198	__flatmap_free_start = .;
199	__flatmap_rw_size = __flatmap_free_start - __flatmap_rw_start;
200
201#ifdef CFG_CORE_SANITIZE_KADDRESS
202	. = TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8;
203	. = ALIGN(8);
204	.asan_shadow : {
205		__asan_shadow_start = .;
206		. += TEE_RAM_VA_SIZE / 9;
207		__asan_shadow_end = .;
208		__asan_shadow_size = __asan_shadow_end - __asan_shadow_start;
209	}
210#endif /*CFG_CORE_SANITIZE_KADDRESS*/
211
212	__end = .;
213	__init_size = __data_end - TEE_LOAD_ADDR;
214
215	/*
216	 * Guard against moving the location counter backwards in the assignment
217	 * below.
218	 */
219	ASSERT(. <= (TEE_RAM_START + TEE_RAM_VA_SIZE),
220		"TEE_RAM_VA_SIZE is too small")
221	. = TEE_RAM_START + TEE_RAM_VA_SIZE;
222
223	_end_of_ram = .;
224
225	__get_tee_init_end = .;
226	__flatmap_free_size = _end_of_ram - __flatmap_free_start;
227
228	/*
229	 * These regions will not become a normal part of the dumped
230	 * binary, instead some are interpreted by the dump script and
231	 * converted into suitable format for OP-TEE itself to use.
232	 */
233	.dynamic : { *(.dynamic) }
234	.hash : { *(.hash) }
235	.dynsym : { *(.dynsym) }
236	.dynstr : { *(.dynstr) }
237
238	.rel : {
239		*(.rel.*)
240	}
241	.rela : {
242		*(.rela.*)
243	}
244#ifndef CFG_CORE_ASLR
245	ASSERT(SIZEOF(.rel) == 0, "Relocation entries not expected")
246	ASSERT(SIZEOF(.rela) == 0, "Relocation entries not expected")
247#endif
248
249	/DISCARD/ : {
250		/* Strip unnecessary stuff */
251		*(.comment .note .eh_frame .interp)
252		/* Strip meta variables */
253		*(__keep_meta_vars*)
254	}
255
256}
257
258/* Unpaged read-only memories */
259__vcore_unpg_rx_start = __flatmap_rx_start;
260__vcore_unpg_ro_start = __flatmap_ro_start;
261#ifdef CFG_CORE_RODATA_NOEXEC
262__vcore_unpg_rx_size = __flatmap_rx_size;
263__vcore_unpg_ro_size = __flatmap_ro_size;
264#else
265__vcore_unpg_rx_size = __flatmap_rx_size + __flatmap_ro_size;
266__vcore_unpg_ro_size = 0;
267#endif
268__vcore_unpg_rx_end = __vcore_unpg_rx_start + __vcore_unpg_rx_size;
269__vcore_unpg_ro_end = __vcore_unpg_ro_start + __vcore_unpg_ro_size;
270
271/* Unpaged read-write memory */
272__vcore_unpg_rw_start = __flatmap_rw_start;
273__vcore_unpg_rw_size = __flatmap_rw_size;
274__vcore_unpg_rw_end = __vcore_unpg_rw_start + __vcore_unpg_rw_size;
275
276__vcore_free_start = __flatmap_free_start;
277__vcore_free_size = __flatmap_free_size;
278__vcore_free_end = __flatmap_free_start + __flatmap_free_size;
279
280#ifdef CFG_CORE_SANITIZE_KADDRESS
281__asan_map_start = (__asan_shadow_start / SMALL_PAGE_SIZE) *
282		   SMALL_PAGE_SIZE;
283__asan_map_end = ((__asan_shadow_end - 1) / SMALL_PAGE_SIZE) *
284		 SMALL_PAGE_SIZE + SMALL_PAGE_SIZE;
285__asan_map_size = __asan_map_end - __asan_map_start;
286#endif /*CFG_CORE_SANITIZE_KADDRESS*/
287