xref: /rk3399_ARM-atf/include/common/bl_common.ld.h (revision caa3e7e0a4aeb657873bbd2c002c0e33a614eb1d)
1 /*
2  * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef BL_COMMON_LD_H
8 #define BL_COMMON_LD_H
9 
10 #include <platform_def.h>
11 
12 #ifdef __aarch64__
13 #define STRUCT_ALIGN	8
14 #define BSS_ALIGN	16
15 #else
16 #define STRUCT_ALIGN	4
17 #define BSS_ALIGN	8
18 #endif
19 
20 #ifndef DATA_ALIGN
21 #define DATA_ALIGN	1
22 #endif
23 
24 #define CPU_OPS						\
25 	. = ALIGN(STRUCT_ALIGN);			\
26 	__CPU_OPS_START__ = .;				\
27 	KEEP(*(cpu_ops))				\
28 	__CPU_OPS_END__ = .;
29 
30 #define PARSER_LIB_DESCS				\
31 	. = ALIGN(STRUCT_ALIGN);			\
32 	__PARSER_LIB_DESCS_START__ = .;			\
33 	KEEP(*(.img_parser_lib_descs))			\
34 	__PARSER_LIB_DESCS_END__ = .;
35 
36 #define RT_SVC_DESCS					\
37 	. = ALIGN(STRUCT_ALIGN);			\
38 	__RT_SVC_DESCS_START__ = .;			\
39 	KEEP(*(rt_svc_descs))				\
40 	__RT_SVC_DESCS_END__ = .;
41 
42 #define PMF_SVC_DESCS					\
43 	. = ALIGN(STRUCT_ALIGN);			\
44 	__PMF_SVC_DESCS_START__ = .;			\
45 	KEEP(*(pmf_svc_descs))				\
46 	__PMF_SVC_DESCS_END__ = .;
47 
48 #define FCONF_POPULATOR					\
49 	. = ALIGN(STRUCT_ALIGN);			\
50 	__FCONF_POPULATOR_START__ = .;			\
51 	KEEP(*(.fconf_populator))			\
52 	__FCONF_POPULATOR_END__ = .;
53 
54 /*
55  * Keep the .got section in the RO section as it is patched prior to enabling
56  * the MMU and having the .got in RO is better for security. GOT is a table of
57  * addresses so ensure pointer size alignment.
58  */
59 #define GOT						\
60 	. = ALIGN(STRUCT_ALIGN);			\
61 	__GOT_START__ = .;				\
62 	*(.got)						\
63 	__GOT_END__ = .;
64 
65 /*
66  * The base xlat table
67  *
68  * It is put into the rodata section if PLAT_RO_XLAT_TABLES=1,
69  * or into the bss section otherwise.
70  */
71 #define BASE_XLAT_TABLE					\
72 	. = ALIGN(16);					\
73 	*(base_xlat_table)
74 
75 #if PLAT_RO_XLAT_TABLES
76 #define BASE_XLAT_TABLE_RO		BASE_XLAT_TABLE
77 #define BASE_XLAT_TABLE_BSS
78 #else
79 #define BASE_XLAT_TABLE_RO
80 #define BASE_XLAT_TABLE_BSS		BASE_XLAT_TABLE
81 #endif
82 
83 #define RODATA_COMMON					\
84 	RT_SVC_DESCS					\
85 	FCONF_POPULATOR					\
86 	PMF_SVC_DESCS					\
87 	PARSER_LIB_DESCS				\
88 	CPU_OPS						\
89 	GOT						\
90 	BASE_XLAT_TABLE_RO
91 
92 /*
93  * .data must be placed at a lower address than the stacks if the stack
94  * protector is enabled. Alternatively, the .data.stack_protector_canary
95  * section can be placed independently of the main .data section.
96  */
97 #define DATA_SECTION					\
98 	.data . : ALIGN(DATA_ALIGN) {			\
99 		__DATA_START__ = .;			\
100 		*(SORT_BY_ALIGNMENT(.data*))		\
101 		__DATA_END__ = .;			\
102 	}
103 
104 #define STACK_SECTION					\
105 	stacks (NOLOAD) : {				\
106 		__STACKS_START__ = .;			\
107 		*(tzfw_normal_stacks)			\
108 		__STACKS_END__ = .;			\
109 	}
110 
111 /*
112  * If BL doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
113  * will be zero. For this reason, the only two valid values for
114  * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
115  * PLAT_PERCPU_BAKERY_LOCK_SIZE.
116  */
117 #ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
118 #define BAKERY_LOCK_SIZE_CHECK				\
119 	ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) ||	\
120 	       (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE), \
121 	       "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
122 #else
123 #define BAKERY_LOCK_SIZE_CHECK
124 #endif
125 
126 /*
127  * Bakery locks are stored in normal .bss memory
128  *
129  * Each lock's data is spread across multiple cache lines, one per CPU,
130  * but multiple locks can share the same cache line.
131  * The compiler will allocate enough memory for one CPU's bakery locks,
132  * the remaining cache lines are allocated by the linker script
133  */
134 #if !USE_COHERENT_MEM
135 #define BAKERY_LOCK_NORMAL				\
136 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
137 	__BAKERY_LOCK_START__ = .;			\
138 	__PERCPU_BAKERY_LOCK_START__ = .;		\
139 	*(bakery_lock)					\
140 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
141 	__PERCPU_BAKERY_LOCK_END__ = .;			\
142 	__PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); \
143 	. = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
144 	__BAKERY_LOCK_END__ = .;			\
145 	BAKERY_LOCK_SIZE_CHECK
146 #else
147 #define BAKERY_LOCK_NORMAL
148 #endif
149 
150 /*
151  * Time-stamps are stored in normal .bss memory
152  *
153  * The compiler will allocate enough memory for one CPU's time-stamps,
154  * the remaining memory for other CPUs is allocated by the
155  * linker script
156  */
157 #define PMF_TIMESTAMP					\
158 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
159 	__PMF_TIMESTAMP_START__ = .;			\
160 	KEEP(*(pmf_timestamp_array))			\
161 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
162 	__PMF_PERCPU_TIMESTAMP_END__ = .;		\
163 	__PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); \
164 	. = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
165 	__PMF_TIMESTAMP_END__ = .;
166 
167 
168 /*
169  * The .bss section gets initialised to 0 at runtime.
170  * Its base address has bigger alignment for better performance of the
171  * zero-initialization code.
172  */
173 #define BSS_SECTION					\
174 	.bss (NOLOAD) : ALIGN(BSS_ALIGN) {		\
175 		__BSS_START__ = .;			\
176 		*(SORT_BY_ALIGNMENT(.bss*))		\
177 		*(COMMON)				\
178 		BAKERY_LOCK_NORMAL			\
179 		PMF_TIMESTAMP				\
180 		BASE_XLAT_TABLE_BSS			\
181 		__BSS_END__ = .;			\
182 	}
183 
184 /*
185  * The xlat_table section is for full, aligned page tables (4K).
186  * Removing them from .bss avoids forcing 4K alignment on
187  * the .bss section. The tables are initialized to zero by the translation
188  * tables library.
189  */
190 #define XLAT_TABLE_SECTION				\
191 	xlat_table (NOLOAD) : {				\
192 		*(xlat_table)				\
193 	}
194 
195 #endif /* BL_COMMON_LD_H */
196