xref: /rk3399_ARM-atf/include/common/bl_common.ld.h (revision 34dd1e96fdae59d56d19a8d1270a03860af9f015)
1 /*
2  * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef BL_COMMON_LD_H
8 #define BL_COMMON_LD_H
9 
10 #include <platform_def.h>
11 
12 #ifdef __aarch64__
13 #define STRUCT_ALIGN	8
14 #define BSS_ALIGN	16
15 #else
16 #define STRUCT_ALIGN	4
17 #define BSS_ALIGN	8
18 #endif
19 
20 #ifndef DATA_ALIGN
21 #define DATA_ALIGN	1
22 #endif
23 
24 #define CPU_OPS						\
25 	. = ALIGN(STRUCT_ALIGN);			\
26 	__CPU_OPS_START__ = .;				\
27 	KEEP(*(cpu_ops))				\
28 	__CPU_OPS_END__ = .;
29 
30 #define PARSER_LIB_DESCS				\
31 	. = ALIGN(STRUCT_ALIGN);			\
32 	__PARSER_LIB_DESCS_START__ = .;			\
33 	KEEP(*(.img_parser_lib_descs))			\
34 	__PARSER_LIB_DESCS_END__ = .;
35 
36 #define RT_SVC_DESCS					\
37 	. = ALIGN(STRUCT_ALIGN);			\
38 	__RT_SVC_DESCS_START__ = .;			\
39 	KEEP(*(rt_svc_descs))				\
40 	__RT_SVC_DESCS_END__ = .;
41 
42 #define PMF_SVC_DESCS					\
43 	. = ALIGN(STRUCT_ALIGN);			\
44 	__PMF_SVC_DESCS_START__ = .;			\
45 	KEEP(*(pmf_svc_descs))				\
46 	__PMF_SVC_DESCS_END__ = .;
47 
48 #define FCONF_POPULATOR					\
49 	. = ALIGN(STRUCT_ALIGN);			\
50 	__FCONF_POPULATOR_START__ = .;			\
51 	KEEP(*(.fconf_populator))			\
52 	__FCONF_POPULATOR_END__ = .;
53 
54 /*
55  * Keep the .got section in the RO section as it is patched prior to enabling
56  * the MMU and having the .got in RO is better for security. GOT is a table of
57  * addresses so ensure pointer size alignment.
58  */
59 #define GOT						\
60 	. = ALIGN(STRUCT_ALIGN);			\
61 	__GOT_START__ = .;				\
62 	*(.got)						\
63 	__GOT_END__ = .;
64 
65 /*
66  * The base xlat table
67  *
68  * It is put into the rodata section if PLAT_RO_XLAT_TABLES=1,
69  * or into the bss section otherwise.
70  */
71 #define BASE_XLAT_TABLE					\
72 	. = ALIGN(16);					\
73 	*(base_xlat_table)
74 
75 #if PLAT_RO_XLAT_TABLES
76 #define BASE_XLAT_TABLE_RO		BASE_XLAT_TABLE
77 #define BASE_XLAT_TABLE_BSS
78 #else
79 #define BASE_XLAT_TABLE_RO
80 #define BASE_XLAT_TABLE_BSS		BASE_XLAT_TABLE
81 #endif
82 
83 #define RODATA_COMMON					\
84 	RT_SVC_DESCS					\
85 	FCONF_POPULATOR					\
86 	PMF_SVC_DESCS					\
87 	PARSER_LIB_DESCS				\
88 	CPU_OPS						\
89 	GOT						\
90 	BASE_XLAT_TABLE_RO
91 
92 /*
93  * .data must be placed at a lower address than the stacks if the stack
94  * protector is enabled. Alternatively, the .data.stack_protector_canary
95  * section can be placed independently of the main .data section.
96  */
97 #define DATA_SECTION					\
98 	.data . : ALIGN(DATA_ALIGN) {			\
99 		__DATA_START__ = .;			\
100 		*(SORT_BY_ALIGNMENT(.data*))		\
101 		__DATA_END__ = .;			\
102 	}
103 
104 #if !(defined(IMAGE_BL31) && RECLAIM_INIT_CODE)
105 #define STACK_SECTION					\
106 	stacks (NOLOAD) : {				\
107 		__STACKS_START__ = .;			\
108 		*(tzfw_normal_stacks)			\
109 		__STACKS_END__ = .;			\
110 	}
111 #endif
112 
113 /*
114  * If BL doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
115  * will be zero. For this reason, the only two valid values for
116  * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
117  * PLAT_PERCPU_BAKERY_LOCK_SIZE.
118  */
119 #ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
120 #define BAKERY_LOCK_SIZE_CHECK				\
121 	ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) ||	\
122 	       (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE), \
123 	       "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
124 #else
125 #define BAKERY_LOCK_SIZE_CHECK
126 #endif
127 
128 /*
129  * Bakery locks are stored in normal .bss memory
130  *
131  * Each lock's data is spread across multiple cache lines, one per CPU,
132  * but multiple locks can share the same cache line.
133  * The compiler will allocate enough memory for one CPU's bakery locks,
134  * the remaining cache lines are allocated by the linker script
135  */
136 #if !USE_COHERENT_MEM
137 #define BAKERY_LOCK_NORMAL				\
138 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
139 	__BAKERY_LOCK_START__ = .;			\
140 	__PERCPU_BAKERY_LOCK_START__ = .;		\
141 	*(bakery_lock)					\
142 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
143 	__PERCPU_BAKERY_LOCK_END__ = .;			\
144 	__PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); \
145 	. = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
146 	__BAKERY_LOCK_END__ = .;			\
147 	BAKERY_LOCK_SIZE_CHECK
148 #else
149 #define BAKERY_LOCK_NORMAL
150 #endif
151 
152 /*
153  * Time-stamps are stored in normal .bss memory
154  *
155  * The compiler will allocate enough memory for one CPU's time-stamps,
156  * the remaining memory for other CPUs is allocated by the
157  * linker script
158  */
159 #define PMF_TIMESTAMP					\
160 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
161 	__PMF_TIMESTAMP_START__ = .;			\
162 	KEEP(*(pmf_timestamp_array))			\
163 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
164 	__PMF_PERCPU_TIMESTAMP_END__ = .;		\
165 	__PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); \
166 	. = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
167 	__PMF_TIMESTAMP_END__ = .;
168 
169 
170 /*
171  * The .bss section gets initialised to 0 at runtime.
172  * Its base address has bigger alignment for better performance of the
173  * zero-initialization code.
174  */
175 #define BSS_SECTION					\
176 	.bss (NOLOAD) : ALIGN(BSS_ALIGN) {		\
177 		__BSS_START__ = .;			\
178 		*(SORT_BY_ALIGNMENT(.bss*))		\
179 		*(COMMON)				\
180 		BAKERY_LOCK_NORMAL			\
181 		PMF_TIMESTAMP				\
182 		BASE_XLAT_TABLE_BSS			\
183 		__BSS_END__ = .;			\
184 	}
185 
186 /*
187  * The xlat_table section is for full, aligned page tables (4K).
188  * Removing them from .bss avoids forcing 4K alignment on
189  * the .bss section. The tables are initialized to zero by the translation
190  * tables library.
191  */
192 #define XLAT_TABLE_SECTION				\
193 	xlat_table (NOLOAD) : {				\
194 		*(xlat_table)				\
195 	}
196 
197 #endif /* BL_COMMON_LD_H */
198