xref: /rk3399_ARM-atf/plat/nxp/common/setup/ls_common.c (revision 08695df91dffb2e45c01866b760d73cb531a071b)
1b53c2c5fSPankaj Gupta /*
2*08695df9SJiafei Pan  * Copyright 2018-2021 NXP
3b53c2c5fSPankaj Gupta  *
4b53c2c5fSPankaj Gupta  * SPDX-License-Identifier: BSD-3-Clause
5b53c2c5fSPankaj Gupta  *
6b53c2c5fSPankaj Gupta  */
7b53c2c5fSPankaj Gupta 
8b53c2c5fSPankaj Gupta #include <assert.h>
9b53c2c5fSPankaj Gupta 
10b53c2c5fSPankaj Gupta #include <arch.h>
11b53c2c5fSPankaj Gupta #include <arch_helpers.h>
12b53c2c5fSPankaj Gupta #include <common/debug.h>
13b53c2c5fSPankaj Gupta #include <lib/mmio.h>
14b53c2c5fSPankaj Gupta #include <lib/xlat_tables/xlat_tables_v2.h>
15b53c2c5fSPankaj Gupta #include <mmu_def.h>
16b53c2c5fSPankaj Gupta #include <plat/common/platform.h>
17b53c2c5fSPankaj Gupta 
18b53c2c5fSPankaj Gupta #include "plat_common.h"
19b53c2c5fSPankaj Gupta #include "platform_def.h"
20b53c2c5fSPankaj Gupta 
21b53c2c5fSPankaj Gupta const mmap_region_t *plat_ls_get_mmap(void);
22b53c2c5fSPankaj Gupta 
23b53c2c5fSPankaj Gupta /*
24b53c2c5fSPankaj Gupta  * Table of memory regions for various BL stages to map using the MMU.
25b53c2c5fSPankaj Gupta  * This doesn't include Trusted SRAM as arm_setup_page_tables() already
26b53c2c5fSPankaj Gupta  * takes care of mapping it.
27b53c2c5fSPankaj Gupta  *
28b53c2c5fSPankaj Gupta  * The flash needs to be mapped as writable in order to erase the FIP's Table of
29b53c2c5fSPankaj Gupta  * Contents in case of unrecoverable error (see plat_error_handler()).
30b53c2c5fSPankaj Gupta  */
31b53c2c5fSPankaj Gupta #ifdef IMAGE_BL2
32b53c2c5fSPankaj Gupta const mmap_region_t plat_ls_mmap[] = {
33b53c2c5fSPankaj Gupta 	LS_MAP_CCSR,
34b53c2c5fSPankaj Gupta 	{0}
35b53c2c5fSPankaj Gupta };
36b53c2c5fSPankaj Gupta #endif
37b53c2c5fSPankaj Gupta 
38b53c2c5fSPankaj Gupta #ifdef IMAGE_BL31
39b53c2c5fSPankaj Gupta const mmap_region_t plat_ls_mmap[] = {
40b53c2c5fSPankaj Gupta 	LS_MAP_CCSR,
41b53c2c5fSPankaj Gupta #ifdef NXP_DCSR_ADDR
42b53c2c5fSPankaj Gupta 	LS_MAP_DCSR,
43b53c2c5fSPankaj Gupta #endif
44b53c2c5fSPankaj Gupta 	LS_MAP_OCRAM,
45b53c2c5fSPankaj Gupta 	{0}
46b53c2c5fSPankaj Gupta };
47b53c2c5fSPankaj Gupta #endif
48b53c2c5fSPankaj Gupta #ifdef IMAGE_BL32
49b53c2c5fSPankaj Gupta const mmap_region_t plat_ls_mmap[] = {
50b53c2c5fSPankaj Gupta 	LS_MAP_CCSR,
51b53c2c5fSPankaj Gupta 	LS_MAP_BL32_SEC_MEM,
52b53c2c5fSPankaj Gupta 	{0}
53b53c2c5fSPankaj Gupta };
54b53c2c5fSPankaj Gupta #endif
55b53c2c5fSPankaj Gupta 
56b53c2c5fSPankaj Gupta /* Weak definitions may be overridden in specific NXP SoC */
57b53c2c5fSPankaj Gupta #pragma weak plat_get_ns_image_entrypoint
58b53c2c5fSPankaj Gupta #pragma weak plat_ls_get_mmap
59b53c2c5fSPankaj Gupta 
60b53c2c5fSPankaj Gupta #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
61b53c2c5fSPankaj Gupta static void mmap_add_ddr_regions_statically(void)
62b53c2c5fSPankaj Gupta {
63b53c2c5fSPankaj Gupta 	int i = 0;
64b53c2c5fSPankaj Gupta 	dram_regions_info_t *info_dram_regions = get_dram_regions_info();
65b53c2c5fSPankaj Gupta 	/* MMU map for Non-Secure DRAM Regions */
66b53c2c5fSPankaj Gupta 	VERBOSE("DRAM Region %d: %p - %p\n", i,
67b53c2c5fSPankaj Gupta 			(void *) info_dram_regions->region[i].addr,
68b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
69b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
70b53c2c5fSPankaj Gupta 				- 1));
71b53c2c5fSPankaj Gupta 	mmap_add_region(info_dram_regions->region[i].addr,
72b53c2c5fSPankaj Gupta 			info_dram_regions->region[i].addr,
73b53c2c5fSPankaj Gupta 			info_dram_regions->region[i].size,
74b53c2c5fSPankaj Gupta 			MT_MEMORY | MT_RW | MT_NS);
75b53c2c5fSPankaj Gupta 
76b53c2c5fSPankaj Gupta 	/* MMU map for Secure DDR Region on DRAM-0 */
77b53c2c5fSPankaj Gupta 	if (info_dram_regions->region[i].size >
78b53c2c5fSPankaj Gupta 		(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
79b53c2c5fSPankaj Gupta 		VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
80b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
81b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
82b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
83b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
84b53c2c5fSPankaj Gupta 				+ NXP_SECURE_DRAM_SIZE
85b53c2c5fSPankaj Gupta 				+ NXP_SP_SHRD_DRAM_SIZE
86b53c2c5fSPankaj Gupta 				- 1));
87b53c2c5fSPankaj Gupta 		mmap_add_region((info_dram_regions->region[i].addr
88b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
89b53c2c5fSPankaj Gupta 				(info_dram_regions->region[i].addr
90b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
91b53c2c5fSPankaj Gupta 				(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
92b53c2c5fSPankaj Gupta 				MT_MEMORY | MT_RW | MT_SECURE);
93b53c2c5fSPankaj Gupta 	}
94b53c2c5fSPankaj Gupta 
95b53c2c5fSPankaj Gupta #ifdef IMAGE_BL31
96b53c2c5fSPankaj Gupta 	for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
97b53c2c5fSPankaj Gupta 		if (info_dram_regions->region[i].size == 0)
98b53c2c5fSPankaj Gupta 			break;
99b53c2c5fSPankaj Gupta 		VERBOSE("DRAM Region %d: %p - %p\n", i,
100b53c2c5fSPankaj Gupta 			(void *) info_dram_regions->region[i].addr,
101b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
102b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
103b53c2c5fSPankaj Gupta 				- 1));
104b53c2c5fSPankaj Gupta 		mmap_add_region(info_dram_regions->region[i].addr,
105b53c2c5fSPankaj Gupta 				info_dram_regions->region[i].addr,
106b53c2c5fSPankaj Gupta 				info_dram_regions->region[i].size,
107b53c2c5fSPankaj Gupta 				MT_MEMORY | MT_RW | MT_NS);
108b53c2c5fSPankaj Gupta 	}
109b53c2c5fSPankaj Gupta #endif
110b53c2c5fSPankaj Gupta }
111b53c2c5fSPankaj Gupta #endif
112b53c2c5fSPankaj Gupta 
113b53c2c5fSPankaj Gupta #if defined(PLAT_XLAT_TABLES_DYNAMIC)
114b53c2c5fSPankaj Gupta void mmap_add_ddr_region_dynamically(void)
115b53c2c5fSPankaj Gupta {
116b53c2c5fSPankaj Gupta 	int i = 0;
117b53c2c5fSPankaj Gupta 	dram_regions_info_t *info_dram_regions = get_dram_regions_info();
118b53c2c5fSPankaj Gupta 	/* MMU map for Non-Secure DRAM Regions */
119b53c2c5fSPankaj Gupta 	VERBOSE("DRAM Region %d: %p - %p\n", i,
120b53c2c5fSPankaj Gupta 			(void *) info_dram_regions->region[i].addr,
121b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
122b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
123b53c2c5fSPankaj Gupta 				- 1));
124b53c2c5fSPankaj Gupta 	mmap_add_dynamic_region(info_dram_regions->region[i].addr,
125b53c2c5fSPankaj Gupta 			info_dram_regions->region[i].addr,
126b53c2c5fSPankaj Gupta 			info_dram_regions->region[i].size,
127b53c2c5fSPankaj Gupta 			MT_MEMORY | MT_RW | MT_NS);
128b53c2c5fSPankaj Gupta 
129b53c2c5fSPankaj Gupta 	/* MMU map for Secure DDR Region on DRAM-0 */
130b53c2c5fSPankaj Gupta 	if (info_dram_regions->region[i].size >
131b53c2c5fSPankaj Gupta 		(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
132b53c2c5fSPankaj Gupta 		VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
133b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
134b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
135b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
136b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
137b53c2c5fSPankaj Gupta 				+ NXP_SECURE_DRAM_SIZE
138b53c2c5fSPankaj Gupta 				+ NXP_SP_SHRD_DRAM_SIZE
139b53c2c5fSPankaj Gupta 				- 1));
140b53c2c5fSPankaj Gupta 		mmap_add_dynamic_region((info_dram_regions->region[i].addr
141b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
142b53c2c5fSPankaj Gupta 				(info_dram_regions->region[i].addr
143b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size),
144b53c2c5fSPankaj Gupta 				(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
145b53c2c5fSPankaj Gupta 				MT_MEMORY | MT_RW | MT_SECURE);
146b53c2c5fSPankaj Gupta 	}
147b53c2c5fSPankaj Gupta 
148b53c2c5fSPankaj Gupta #ifdef IMAGE_BL31
149b53c2c5fSPankaj Gupta 	for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
150b53c2c5fSPankaj Gupta 		if (info_dram_regions->region[i].size == 0) {
151b53c2c5fSPankaj Gupta 			break;
152b53c2c5fSPankaj Gupta 		}
153b53c2c5fSPankaj Gupta 		VERBOSE("DRAM Region %d: %p - %p\n", i,
154b53c2c5fSPankaj Gupta 			(void *) info_dram_regions->region[i].addr,
155b53c2c5fSPankaj Gupta 			(void *) (info_dram_regions->region[i].addr
156b53c2c5fSPankaj Gupta 				+ info_dram_regions->region[i].size
157b53c2c5fSPankaj Gupta 				- 1));
158b53c2c5fSPankaj Gupta 		mmap_add_dynamic_region(info_dram_regions->region[i].addr,
159b53c2c5fSPankaj Gupta 				info_dram_regions->region[i].addr,
160b53c2c5fSPankaj Gupta 				info_dram_regions->region[i].size,
161b53c2c5fSPankaj Gupta 				MT_MEMORY | MT_RW | MT_NS);
162b53c2c5fSPankaj Gupta 	}
163b53c2c5fSPankaj Gupta #endif
164b53c2c5fSPankaj Gupta }
165b53c2c5fSPankaj Gupta #endif
166b53c2c5fSPankaj Gupta 
167b53c2c5fSPankaj Gupta /*
168b53c2c5fSPankaj Gupta  * Set up the page tables for the generic and platform-specific memory regions.
169b53c2c5fSPankaj Gupta  * The extents of the generic memory regions are specified by the function
170b53c2c5fSPankaj Gupta  * arguments and consist of:
171b53c2c5fSPankaj Gupta  * - Trusted SRAM seen by the BL image;
172b53c2c5fSPankaj Gupta  * - Code section;
173b53c2c5fSPankaj Gupta  * - Read-only data section;
174b53c2c5fSPankaj Gupta  * - Coherent memory region, if applicable.
175b53c2c5fSPankaj Gupta  */
176b53c2c5fSPankaj Gupta void ls_setup_page_tables(uintptr_t total_base,
177b53c2c5fSPankaj Gupta 			   size_t total_size,
178b53c2c5fSPankaj Gupta 			   uintptr_t code_start,
179b53c2c5fSPankaj Gupta 			   uintptr_t code_limit,
180b53c2c5fSPankaj Gupta 			   uintptr_t rodata_start,
181b53c2c5fSPankaj Gupta 			   uintptr_t rodata_limit
182b53c2c5fSPankaj Gupta #if USE_COHERENT_MEM
183b53c2c5fSPankaj Gupta 			   ,
184b53c2c5fSPankaj Gupta 			   uintptr_t coh_start,
185b53c2c5fSPankaj Gupta 			   uintptr_t coh_limit
186b53c2c5fSPankaj Gupta #endif
187b53c2c5fSPankaj Gupta 			   )
188b53c2c5fSPankaj Gupta {
189b53c2c5fSPankaj Gupta 	/*
190b53c2c5fSPankaj Gupta 	 * Map the Trusted SRAM with appropriate memory attributes.
191b53c2c5fSPankaj Gupta 	 * Subsequent mappings will adjust the attributes for specific regions.
192b53c2c5fSPankaj Gupta 	 */
193b53c2c5fSPankaj Gupta 	VERBOSE("Memory seen by this BL image: %p - %p\n",
194b53c2c5fSPankaj Gupta 		(void *) total_base, (void *) (total_base + total_size));
195b53c2c5fSPankaj Gupta 	mmap_add_region(total_base, total_base,
196b53c2c5fSPankaj Gupta 			total_size,
197b53c2c5fSPankaj Gupta 			MT_MEMORY | MT_RW | MT_SECURE);
198b53c2c5fSPankaj Gupta 
199b53c2c5fSPankaj Gupta 	/* Re-map the code section */
200b53c2c5fSPankaj Gupta 	VERBOSE("Code region: %p - %p\n",
201b53c2c5fSPankaj Gupta 		(void *) code_start, (void *) code_limit);
202b53c2c5fSPankaj Gupta 	mmap_add_region(code_start, code_start,
203b53c2c5fSPankaj Gupta 			code_limit - code_start,
204b53c2c5fSPankaj Gupta 			MT_CODE | MT_SECURE);
205b53c2c5fSPankaj Gupta 
206b53c2c5fSPankaj Gupta 	/* Re-map the read-only data section */
207b53c2c5fSPankaj Gupta 	VERBOSE("Read-only data region: %p - %p\n",
208b53c2c5fSPankaj Gupta 		(void *) rodata_start, (void *) rodata_limit);
209b53c2c5fSPankaj Gupta 	mmap_add_region(rodata_start, rodata_start,
210b53c2c5fSPankaj Gupta 			rodata_limit - rodata_start,
211b53c2c5fSPankaj Gupta 			MT_RO_DATA | MT_SECURE);
212b53c2c5fSPankaj Gupta 
213b53c2c5fSPankaj Gupta #if USE_COHERENT_MEM
214b53c2c5fSPankaj Gupta 	/* Re-map the coherent memory region */
215b53c2c5fSPankaj Gupta 	VERBOSE("Coherent region: %p - %p\n",
216b53c2c5fSPankaj Gupta 		(void *) coh_start, (void *) coh_limit);
217b53c2c5fSPankaj Gupta 	mmap_add_region(coh_start, coh_start,
218b53c2c5fSPankaj Gupta 			coh_limit - coh_start,
219b53c2c5fSPankaj Gupta 			MT_DEVICE | MT_RW | MT_SECURE);
220b53c2c5fSPankaj Gupta #endif
221b53c2c5fSPankaj Gupta 
222b53c2c5fSPankaj Gupta 	/* Now (re-)map the platform-specific memory regions */
223b53c2c5fSPankaj Gupta 	mmap_add(plat_ls_get_mmap());
224b53c2c5fSPankaj Gupta 
225b53c2c5fSPankaj Gupta 
226b53c2c5fSPankaj Gupta #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
227b53c2c5fSPankaj Gupta 	mmap_add_ddr_regions_statically();
228b53c2c5fSPankaj Gupta #endif
229b53c2c5fSPankaj Gupta 
230b53c2c5fSPankaj Gupta 	/* Create the page tables to reflect the above mappings */
231b53c2c5fSPankaj Gupta 	init_xlat_tables();
232b53c2c5fSPankaj Gupta }
233b53c2c5fSPankaj Gupta 
234b53c2c5fSPankaj Gupta /*******************************************************************************
235b53c2c5fSPankaj Gupta  * Returns NXP platform specific memory map regions.
236b53c2c5fSPankaj Gupta  ******************************************************************************/
237b53c2c5fSPankaj Gupta const mmap_region_t *plat_ls_get_mmap(void)
238b53c2c5fSPankaj Gupta {
239b53c2c5fSPankaj Gupta 	return plat_ls_mmap;
240b53c2c5fSPankaj Gupta }
241*08695df9SJiafei Pan 
242*08695df9SJiafei Pan /*
243*08695df9SJiafei Pan  * This function get the number of clusters and cores count per cluster
244*08695df9SJiafei Pan  * in the SoC.
245*08695df9SJiafei Pan  */
246*08695df9SJiafei Pan void get_cluster_info(const struct soc_type *soc_list, uint8_t ps_count,
247*08695df9SJiafei Pan 		uint8_t *num_clusters, uint8_t *cores_per_cluster)
248*08695df9SJiafei Pan {
249*08695df9SJiafei Pan 	const soc_info_t *soc_info = get_soc_info();
250*08695df9SJiafei Pan 	*num_clusters = NUMBER_OF_CLUSTERS;
251*08695df9SJiafei Pan 	*cores_per_cluster = CORES_PER_CLUSTER;
252*08695df9SJiafei Pan 	unsigned int i;
253*08695df9SJiafei Pan 
254*08695df9SJiafei Pan 	for (i = 0U; i < ps_count; i++) {
255*08695df9SJiafei Pan 		if (soc_list[i].version == soc_info->svr_reg.bf_ver.version) {
256*08695df9SJiafei Pan 			*num_clusters = soc_list[i].num_clusters;
257*08695df9SJiafei Pan 			*cores_per_cluster = soc_list[i].cores_per_cluster;
258*08695df9SJiafei Pan 			break;
259*08695df9SJiafei Pan 		}
260*08695df9SJiafei Pan 	}
261*08695df9SJiafei Pan 
262*08695df9SJiafei Pan 	VERBOSE("NUM of cluster = 0x%x, Cores per cluster = 0x%x\n",
263*08695df9SJiafei Pan 			*num_clusters, *cores_per_cluster);
264*08695df9SJiafei Pan }
265