1 /*
2 * Copyright 2018-2022 NXP
3 * Copyright (c) 2025, Arm Limited and Contributors. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 */
8
9 #include <assert.h>
10
11 #include <common/desc_image_load.h>
12 #include <dcfg.h>
13 #ifdef POLICY_FUSE_PROVISION
14 #include <fuse_io.h>
15 #endif
16 #include <mmu_def.h>
17 #include <plat_common.h>
18 #ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA
19 #include <plat_nv_storage.h>
20 #endif
21
22 #pragma weak bl2_early_platform_setup2
23 #pragma weak bl2_plat_arch_setup
24 #pragma weak bl2_plat_prepare_exit
25
26 static dram_regions_info_t dram_regions_info = {0};
27
28 /*******************************************************************************
29 * Return the pointer to the 'dram_regions_info structure of the DRAM.
30 * This structure is populated after init_ddr().
31 ******************************************************************************/
get_dram_regions_info(void)32 dram_regions_info_t *get_dram_regions_info(void)
33 {
34 return &dram_regions_info;
35 }
36
37 #ifdef DDR_INIT
populate_dram_regions_info(void)38 static void populate_dram_regions_info(void)
39 {
40 long long dram_remain_size = dram_regions_info.total_dram_size;
41 uint8_t reg_id = 0U;
42
43 dram_regions_info.region[reg_id].addr = NXP_DRAM0_ADDR;
44 dram_regions_info.region[reg_id].size =
45 dram_remain_size > NXP_DRAM0_MAX_SIZE ?
46 NXP_DRAM0_MAX_SIZE : dram_remain_size;
47
48 if (dram_regions_info.region[reg_id].size != NXP_DRAM0_SIZE) {
49 ERROR("Incorrect DRAM0 size is defined in platform_def.h\n");
50 }
51
52 dram_remain_size -= dram_regions_info.region[reg_id].size;
53 dram_regions_info.region[reg_id].size -= (NXP_SECURE_DRAM_SIZE
54 + NXP_SP_SHRD_DRAM_SIZE);
55
56 assert(dram_regions_info.region[reg_id].size > 0);
57
58 /* Reducing total dram size by 66MB */
59 dram_regions_info.total_dram_size -= (NXP_SECURE_DRAM_SIZE
60 + NXP_SP_SHRD_DRAM_SIZE);
61
62 #if defined(NXP_DRAM1_ADDR) && defined(NXP_DRAM1_MAX_SIZE)
63 if (dram_remain_size > 0) {
64 reg_id++;
65 dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR;
66 dram_regions_info.region[reg_id].size =
67 dram_remain_size > NXP_DRAM1_MAX_SIZE ?
68 NXP_DRAM1_MAX_SIZE : dram_remain_size;
69 dram_remain_size -= dram_regions_info.region[reg_id].size;
70 }
71 #endif
72 #if defined(NXP_DRAM2_ADDR) && defined(NXP_DRAM2_MAX_SIZE)
73 if (dram_remain_size > 0) {
74 reg_id++;
75 dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR;
76 dram_regions_info.region[reg_id].size =
77 dram_remain_size > NXP_DRAM1_MAX_SIZE ?
78 NXP_DRAM1_MAX_SIZE : dram_remain_size;
79 dram_remain_size -= dram_regions_info.region[reg_id].size;
80 }
81 #endif
82 reg_id++;
83 dram_regions_info.num_dram_regions = reg_id;
84 }
85 #endif
86
87 #ifdef IMAGE_BL32
88 /*******************************************************************************
89 * Gets SPSR for BL32 entry
90 ******************************************************************************/
ls_get_spsr_for_bl32_entry(void)91 static uint32_t ls_get_spsr_for_bl32_entry(void)
92 {
93 /*
94 * The Secure Payload Dispatcher service is responsible for
95 * setting the SPSR prior to entry into the BL32 image.
96 */
97 return 0U;
98 }
99 #endif
100
101 /*******************************************************************************
102 * Gets SPSR for BL33 entry
103 ******************************************************************************/
104 #ifndef AARCH32
ls_get_spsr_for_bl33_entry(void)105 static uint32_t ls_get_spsr_for_bl33_entry(void)
106 {
107 unsigned int mode;
108 uint32_t spsr;
109
110 /* Figure out what mode we enter the non-secure world in */
111 mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
112
113 /*
114 * TODO: Consider the possibility of specifying the SPSR in
115 * the FIP ToC and allowing the platform to have a say as
116 * well.
117 */
118 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
119 return spsr;
120 }
121 #else
122 /*******************************************************************************
123 * Gets SPSR for BL33 entry
124 ******************************************************************************/
ls_get_spsr_for_bl33_entry(void)125 static uint32_t ls_get_spsr_for_bl33_entry(void)
126 {
127 unsigned int hyp_status, mode, spsr;
128
129 hyp_status = GET_VIRT_EXT(read_id_pfr1());
130
131 mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
132
133 /*
134 * TODO: Consider the possibility of specifying the SPSR in
135 * the FIP ToC and allowing the platform to have a say as
136 * well.
137 */
138 spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
139 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
140 return spsr;
141 }
142 #endif /* AARCH32 */
143
bl2_early_platform_setup2(u_register_t arg0 __unused,u_register_t arg1 __unused,u_register_t arg2 __unused,u_register_t arg3 __unused)144 void bl2_early_platform_setup2(u_register_t arg0 __unused,
145 u_register_t arg1 __unused,
146 u_register_t arg2 __unused,
147 u_register_t arg3 __unused)
148 {
149 /*
150 * SoC specific early init
151 * Any errata handling or SoC specific early initialization can
152 * be done here
153 * Set Counter Base Frequency in CNTFID0 and in cntfrq_el0.
154 * Initialize the interconnect.
155 * Enable coherency for primary CPU cluster
156 */
157 soc_early_init();
158
159 /* Initialise the IO layer and register platform IO devices */
160 plat_io_setup();
161
162 if (dram_regions_info.total_dram_size > 0) {
163 populate_dram_regions_info();
164 }
165
166 #ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA
167 read_nv_app_data();
168 #if DEBUG
169 const nv_app_data_t *nv_app_data = get_nv_data();
170
171 INFO("Value of warm_reset flag = 0x%x\n", nv_app_data->warm_rst_flag);
172 INFO("Value of WDT flag = 0x%x\n", nv_app_data->wdt_rst_flag);
173 #endif
174 #endif
175 }
176
177 /*******************************************************************************
178 * Perform the very early platform specific architectural setup here. At the
179 * moment this is only initializes the mmu in a quick and dirty way.
180 ******************************************************************************/
ls_bl2_plat_arch_setup(void)181 void ls_bl2_plat_arch_setup(void)
182 {
183 unsigned int flags = 0U;
184 /* Initialise the IO layer and register platform IO devices */
185 ls_setup_page_tables(
186 #if SEPARATE_BL2_NOLOAD_REGION
187 BL2_START,
188 BL2_LIMIT - BL2_START,
189 #else
190 BL2_BASE,
191 (unsigned long)(&__BL2_END__) - BL2_BASE,
192 #endif
193 BL_CODE_BASE,
194 BL_CODE_END,
195 BL_RO_DATA_BASE,
196 BL_RO_DATA_END
197 #if USE_COHERENT_MEM
198 , BL_COHERENT_RAM_BASE,
199 BL_COHERENT_RAM_END
200 #endif
201 );
202
203 if ((dram_regions_info.region[0].addr == 0)
204 && (dram_regions_info.total_dram_size == 0)) {
205 flags = XLAT_TABLE_NC;
206 }
207
208 #ifdef AARCH32
209 enable_mmu_secure(0);
210 #else
211 enable_mmu_el3(flags);
212 #endif
213 }
214
bl2_plat_arch_setup(void)215 void bl2_plat_arch_setup(void)
216 {
217 ls_bl2_plat_arch_setup();
218 }
219
bl2_platform_setup(void)220 void bl2_platform_setup(void)
221 {
222 /*
223 * Perform platform setup before loading the image.
224 */
225 }
226
227 /* Handling image information by platform. */
ls_bl2_handle_post_image_load(unsigned int image_id)228 int ls_bl2_handle_post_image_load(unsigned int image_id)
229 {
230 int err = 0;
231 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
232
233 assert(bl_mem_params);
234
235 switch (image_id) {
236 case BL31_IMAGE_ID:
237 bl_mem_params->ep_info.args.arg3 =
238 (u_register_t) &dram_regions_info;
239
240 /* Pass the value of PORSR1 register in Argument 4 */
241 bl_mem_params->ep_info.args.arg4 =
242 (u_register_t)read_reg_porsr1();
243 flush_dcache_range((uintptr_t)&dram_regions_info,
244 sizeof(dram_regions_info));
245 break;
246 #if defined(AARCH64) && defined(IMAGE_BL32)
247 case BL32_IMAGE_ID:
248 bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl32_entry();
249 break;
250 #endif
251 case BL33_IMAGE_ID:
252 /* BL33 expects to receive the primary CPU MPID (through r0) */
253 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
254 bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl33_entry();
255 break;
256 }
257
258 return err;
259 }
260
261 /*******************************************************************************
262 * This function can be used by the platforms to update/use image
263 * information for given `image_id`.
264 ******************************************************************************/
bl2_plat_handle_post_image_load(unsigned int image_id)265 int bl2_plat_handle_post_image_load(unsigned int image_id)
266 {
267 return ls_bl2_handle_post_image_load(image_id);
268 }
269
bl2_plat_prepare_exit(void)270 void bl2_plat_prepare_exit(void)
271 {
272 return soc_bl2_prepare_exit();
273 }
274
275 /* Called to do the dynamic initialization required
276 * before loading the next image.
277 */
bl2_plat_preload_setup(void)278 void bl2_plat_preload_setup(void)
279 {
280
281 soc_preload_setup();
282
283 #ifdef DDR_INIT
284 if (dram_regions_info.total_dram_size <= 0) {
285 ERROR("Asserting as the DDR is not initialized yet.");
286 assert(false);
287 }
288 #endif
289
290 if ((dram_regions_info.region[0].addr == 0)
291 && (dram_regions_info.total_dram_size > 0)) {
292 populate_dram_regions_info();
293 #ifdef PLAT_XLAT_TABLES_DYNAMIC
294 mmap_add_ddr_region_dynamically();
295 #endif
296 }
297
298 /* setup the memory region access permissions */
299 soc_mem_access();
300
301 #ifdef POLICY_FUSE_PROVISION
302 fip_fuse_provisioning((uintptr_t)FUSE_BUF, FUSE_SZ);
303 #endif
304 }
305