1b53c2c5fSPankaj Gupta /*
2*5161cfdeSJiafei Pan * Copyright 2018-2022 NXP
3b53c2c5fSPankaj Gupta *
4b53c2c5fSPankaj Gupta * SPDX-License-Identifier: BSD-3-Clause
5b53c2c5fSPankaj Gupta *
6b53c2c5fSPankaj Gupta */
7b53c2c5fSPankaj Gupta
8b53c2c5fSPankaj Gupta #include <assert.h>
9b53c2c5fSPankaj Gupta
10b53c2c5fSPankaj Gupta #include <arch.h>
11b53c2c5fSPankaj Gupta #include <arch_helpers.h>
12b53c2c5fSPankaj Gupta #include <common/debug.h>
13b53c2c5fSPankaj Gupta #include <lib/mmio.h>
14b53c2c5fSPankaj Gupta #include <lib/xlat_tables/xlat_tables_v2.h>
15b53c2c5fSPankaj Gupta #include <mmu_def.h>
16b53c2c5fSPankaj Gupta #include <plat/common/platform.h>
17b53c2c5fSPankaj Gupta
18b53c2c5fSPankaj Gupta #include "plat_common.h"
19b53c2c5fSPankaj Gupta #include "platform_def.h"
20b53c2c5fSPankaj Gupta
21b53c2c5fSPankaj Gupta const mmap_region_t *plat_ls_get_mmap(void);
22b53c2c5fSPankaj Gupta
23b53c2c5fSPankaj Gupta /*
24b53c2c5fSPankaj Gupta * Table of memory regions for various BL stages to map using the MMU.
25b53c2c5fSPankaj Gupta * This doesn't include Trusted SRAM as arm_setup_page_tables() already
26b53c2c5fSPankaj Gupta * takes care of mapping it.
27b53c2c5fSPankaj Gupta *
28b53c2c5fSPankaj Gupta * The flash needs to be mapped as writable in order to erase the FIP's Table of
29b53c2c5fSPankaj Gupta * Contents in case of unrecoverable error (see plat_error_handler()).
30b53c2c5fSPankaj Gupta */
31b53c2c5fSPankaj Gupta #ifdef IMAGE_BL2
32b53c2c5fSPankaj Gupta const mmap_region_t plat_ls_mmap[] = {
33b53c2c5fSPankaj Gupta LS_MAP_CCSR,
34b53c2c5fSPankaj Gupta {0}
35b53c2c5fSPankaj Gupta };
36b53c2c5fSPankaj Gupta #endif
37b53c2c5fSPankaj Gupta
38b53c2c5fSPankaj Gupta #ifdef IMAGE_BL31
39b53c2c5fSPankaj Gupta const mmap_region_t plat_ls_mmap[] = {
40b53c2c5fSPankaj Gupta LS_MAP_CCSR,
41b53c2c5fSPankaj Gupta #ifdef NXP_DCSR_ADDR
42b53c2c5fSPankaj Gupta LS_MAP_DCSR,
43b53c2c5fSPankaj Gupta #endif
44b53c2c5fSPankaj Gupta LS_MAP_OCRAM,
45b53c2c5fSPankaj Gupta {0}
46b53c2c5fSPankaj Gupta };
47b53c2c5fSPankaj Gupta #endif
48b53c2c5fSPankaj Gupta #ifdef IMAGE_BL32
49b53c2c5fSPankaj Gupta const mmap_region_t plat_ls_mmap[] = {
50b53c2c5fSPankaj Gupta LS_MAP_CCSR,
51b53c2c5fSPankaj Gupta LS_MAP_BL32_SEC_MEM,
52b53c2c5fSPankaj Gupta {0}
53b53c2c5fSPankaj Gupta };
54b53c2c5fSPankaj Gupta #endif
55b53c2c5fSPankaj Gupta
56b53c2c5fSPankaj Gupta /* Weak definitions may be overridden in specific NXP SoC */
57b53c2c5fSPankaj Gupta #pragma weak plat_get_ns_image_entrypoint
58b53c2c5fSPankaj Gupta #pragma weak plat_ls_get_mmap
59b53c2c5fSPankaj Gupta
60b53c2c5fSPankaj Gupta #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
mmap_add_ddr_regions_statically(void)61b53c2c5fSPankaj Gupta static void mmap_add_ddr_regions_statically(void)
62b53c2c5fSPankaj Gupta {
63b53c2c5fSPankaj Gupta int i = 0;
64b53c2c5fSPankaj Gupta dram_regions_info_t *info_dram_regions = get_dram_regions_info();
65b53c2c5fSPankaj Gupta /* MMU map for Non-Secure DRAM Regions */
66b53c2c5fSPankaj Gupta VERBOSE("DRAM Region %d: %p - %p\n", i,
67b53c2c5fSPankaj Gupta (void *) info_dram_regions->region[i].addr,
68b53c2c5fSPankaj Gupta (void *) (info_dram_regions->region[i].addr
69b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size
70b53c2c5fSPankaj Gupta - 1));
71b53c2c5fSPankaj Gupta mmap_add_region(info_dram_regions->region[i].addr,
72b53c2c5fSPankaj Gupta info_dram_regions->region[i].addr,
73b53c2c5fSPankaj Gupta info_dram_regions->region[i].size,
74b53c2c5fSPankaj Gupta MT_MEMORY | MT_RW | MT_NS);
75b53c2c5fSPankaj Gupta
76b53c2c5fSPankaj Gupta /* MMU map for Secure DDR Region on DRAM-0 */
77b53c2c5fSPankaj Gupta if (info_dram_regions->region[i].size >
78b53c2c5fSPankaj Gupta (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
79b53c2c5fSPankaj Gupta VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
80b53c2c5fSPankaj Gupta (void *) (info_dram_regions->region[i].addr
81b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size),
82b53c2c5fSPankaj Gupta (void *) (info_dram_regions->region[i].addr
83b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size
84b53c2c5fSPankaj Gupta + NXP_SECURE_DRAM_SIZE
85b53c2c5fSPankaj Gupta + NXP_SP_SHRD_DRAM_SIZE
86b53c2c5fSPankaj Gupta - 1));
87b53c2c5fSPankaj Gupta mmap_add_region((info_dram_regions->region[i].addr
88b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size),
89b53c2c5fSPankaj Gupta (info_dram_regions->region[i].addr
90b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size),
91b53c2c5fSPankaj Gupta (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
92b53c2c5fSPankaj Gupta MT_MEMORY | MT_RW | MT_SECURE);
93b53c2c5fSPankaj Gupta }
94b53c2c5fSPankaj Gupta
95b53c2c5fSPankaj Gupta #ifdef IMAGE_BL31
96b53c2c5fSPankaj Gupta for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
97b53c2c5fSPankaj Gupta if (info_dram_regions->region[i].size == 0)
98b53c2c5fSPankaj Gupta break;
99b53c2c5fSPankaj Gupta VERBOSE("DRAM Region %d: %p - %p\n", i,
100b53c2c5fSPankaj Gupta (void *) info_dram_regions->region[i].addr,
101b53c2c5fSPankaj Gupta (void *) (info_dram_regions->region[i].addr
102b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size
103b53c2c5fSPankaj Gupta - 1));
104b53c2c5fSPankaj Gupta mmap_add_region(info_dram_regions->region[i].addr,
105b53c2c5fSPankaj Gupta info_dram_regions->region[i].addr,
106b53c2c5fSPankaj Gupta info_dram_regions->region[i].size,
107b53c2c5fSPankaj Gupta MT_MEMORY | MT_RW | MT_NS);
108b53c2c5fSPankaj Gupta }
109b53c2c5fSPankaj Gupta #endif
110b53c2c5fSPankaj Gupta }
111b53c2c5fSPankaj Gupta #endif
112b53c2c5fSPankaj Gupta
113b53c2c5fSPankaj Gupta #if defined(PLAT_XLAT_TABLES_DYNAMIC)
mmap_add_ddr_region_dynamically(void)114b53c2c5fSPankaj Gupta void mmap_add_ddr_region_dynamically(void)
115b53c2c5fSPankaj Gupta {
116*5161cfdeSJiafei Pan int ret, i = 0;
117*5161cfdeSJiafei Pan
118b53c2c5fSPankaj Gupta dram_regions_info_t *info_dram_regions = get_dram_regions_info();
119b53c2c5fSPankaj Gupta /* MMU map for Non-Secure DRAM Regions */
120b53c2c5fSPankaj Gupta VERBOSE("DRAM Region %d: %p - %p\n", i,
121b53c2c5fSPankaj Gupta (void *) info_dram_regions->region[i].addr,
122b53c2c5fSPankaj Gupta (void *) (info_dram_regions->region[i].addr
123b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size
124b53c2c5fSPankaj Gupta - 1));
125*5161cfdeSJiafei Pan ret = mmap_add_dynamic_region(info_dram_regions->region[i].addr,
126b53c2c5fSPankaj Gupta info_dram_regions->region[i].addr,
127b53c2c5fSPankaj Gupta info_dram_regions->region[i].size,
128b53c2c5fSPankaj Gupta MT_MEMORY | MT_RW | MT_NS);
129*5161cfdeSJiafei Pan if (ret != 0) {
130*5161cfdeSJiafei Pan ERROR("Failed to add dynamic memory region\n");
131*5161cfdeSJiafei Pan panic();
132*5161cfdeSJiafei Pan }
133b53c2c5fSPankaj Gupta
134b53c2c5fSPankaj Gupta /* MMU map for Secure DDR Region on DRAM-0 */
135b53c2c5fSPankaj Gupta if (info_dram_regions->region[i].size >
136b53c2c5fSPankaj Gupta (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
137b53c2c5fSPankaj Gupta VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
138b53c2c5fSPankaj Gupta (void *) (info_dram_regions->region[i].addr
139b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size),
140b53c2c5fSPankaj Gupta (void *) (info_dram_regions->region[i].addr
141b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size
142b53c2c5fSPankaj Gupta + NXP_SECURE_DRAM_SIZE
143b53c2c5fSPankaj Gupta + NXP_SP_SHRD_DRAM_SIZE
144b53c2c5fSPankaj Gupta - 1));
145*5161cfdeSJiafei Pan ret = mmap_add_dynamic_region((info_dram_regions->region[i].addr
146b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size),
147b53c2c5fSPankaj Gupta (info_dram_regions->region[i].addr
148b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size),
149b53c2c5fSPankaj Gupta (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
150b53c2c5fSPankaj Gupta MT_MEMORY | MT_RW | MT_SECURE);
151*5161cfdeSJiafei Pan if (ret != 0) {
152*5161cfdeSJiafei Pan ERROR("Failed to add dynamic memory region\n");
153*5161cfdeSJiafei Pan panic();
154*5161cfdeSJiafei Pan }
155b53c2c5fSPankaj Gupta }
156b53c2c5fSPankaj Gupta
157b53c2c5fSPankaj Gupta #ifdef IMAGE_BL31
158b53c2c5fSPankaj Gupta for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
159b53c2c5fSPankaj Gupta if (info_dram_regions->region[i].size == 0) {
160b53c2c5fSPankaj Gupta break;
161b53c2c5fSPankaj Gupta }
162b53c2c5fSPankaj Gupta VERBOSE("DRAM Region %d: %p - %p\n", i,
163b53c2c5fSPankaj Gupta (void *) info_dram_regions->region[i].addr,
164b53c2c5fSPankaj Gupta (void *) (info_dram_regions->region[i].addr
165b53c2c5fSPankaj Gupta + info_dram_regions->region[i].size
166b53c2c5fSPankaj Gupta - 1));
167*5161cfdeSJiafei Pan ret = mmap_add_dynamic_region(info_dram_regions->region[i].addr,
168b53c2c5fSPankaj Gupta info_dram_regions->region[i].addr,
169b53c2c5fSPankaj Gupta info_dram_regions->region[i].size,
170b53c2c5fSPankaj Gupta MT_MEMORY | MT_RW | MT_NS);
171*5161cfdeSJiafei Pan if (ret != 0) {
172*5161cfdeSJiafei Pan ERROR("Failed to add dynamic memory region\n");
173*5161cfdeSJiafei Pan panic();
174*5161cfdeSJiafei Pan }
175b53c2c5fSPankaj Gupta }
176b53c2c5fSPankaj Gupta #endif
177b53c2c5fSPankaj Gupta }
178b53c2c5fSPankaj Gupta #endif
179b53c2c5fSPankaj Gupta
180b53c2c5fSPankaj Gupta /*
181b53c2c5fSPankaj Gupta * Set up the page tables for the generic and platform-specific memory regions.
182b53c2c5fSPankaj Gupta * The extents of the generic memory regions are specified by the function
183b53c2c5fSPankaj Gupta * arguments and consist of:
184b53c2c5fSPankaj Gupta * - Trusted SRAM seen by the BL image;
185b53c2c5fSPankaj Gupta * - Code section;
186b53c2c5fSPankaj Gupta * - Read-only data section;
187b53c2c5fSPankaj Gupta * - Coherent memory region, if applicable.
188b53c2c5fSPankaj Gupta */
ls_setup_page_tables(uintptr_t total_base,size_t total_size,uintptr_t code_start,uintptr_t code_limit,uintptr_t rodata_start,uintptr_t rodata_limit,uintptr_t coh_start,uintptr_t coh_limit)189b53c2c5fSPankaj Gupta void ls_setup_page_tables(uintptr_t total_base,
190b53c2c5fSPankaj Gupta size_t total_size,
191b53c2c5fSPankaj Gupta uintptr_t code_start,
192b53c2c5fSPankaj Gupta uintptr_t code_limit,
193b53c2c5fSPankaj Gupta uintptr_t rodata_start,
194b53c2c5fSPankaj Gupta uintptr_t rodata_limit
195b53c2c5fSPankaj Gupta #if USE_COHERENT_MEM
196b53c2c5fSPankaj Gupta ,
197b53c2c5fSPankaj Gupta uintptr_t coh_start,
198b53c2c5fSPankaj Gupta uintptr_t coh_limit
199b53c2c5fSPankaj Gupta #endif
200b53c2c5fSPankaj Gupta )
201b53c2c5fSPankaj Gupta {
202b53c2c5fSPankaj Gupta /*
203b53c2c5fSPankaj Gupta * Map the Trusted SRAM with appropriate memory attributes.
204b53c2c5fSPankaj Gupta * Subsequent mappings will adjust the attributes for specific regions.
205b53c2c5fSPankaj Gupta */
206b53c2c5fSPankaj Gupta VERBOSE("Memory seen by this BL image: %p - %p\n",
207b53c2c5fSPankaj Gupta (void *) total_base, (void *) (total_base + total_size));
208b53c2c5fSPankaj Gupta mmap_add_region(total_base, total_base,
209b53c2c5fSPankaj Gupta total_size,
210b53c2c5fSPankaj Gupta MT_MEMORY | MT_RW | MT_SECURE);
211b53c2c5fSPankaj Gupta
212b53c2c5fSPankaj Gupta /* Re-map the code section */
213b53c2c5fSPankaj Gupta VERBOSE("Code region: %p - %p\n",
214b53c2c5fSPankaj Gupta (void *) code_start, (void *) code_limit);
215b53c2c5fSPankaj Gupta mmap_add_region(code_start, code_start,
216b53c2c5fSPankaj Gupta code_limit - code_start,
217b53c2c5fSPankaj Gupta MT_CODE | MT_SECURE);
218b53c2c5fSPankaj Gupta
219b53c2c5fSPankaj Gupta /* Re-map the read-only data section */
220b53c2c5fSPankaj Gupta VERBOSE("Read-only data region: %p - %p\n",
221b53c2c5fSPankaj Gupta (void *) rodata_start, (void *) rodata_limit);
222b53c2c5fSPankaj Gupta mmap_add_region(rodata_start, rodata_start,
223b53c2c5fSPankaj Gupta rodata_limit - rodata_start,
224b53c2c5fSPankaj Gupta MT_RO_DATA | MT_SECURE);
225b53c2c5fSPankaj Gupta
226b53c2c5fSPankaj Gupta #if USE_COHERENT_MEM
227b53c2c5fSPankaj Gupta /* Re-map the coherent memory region */
228b53c2c5fSPankaj Gupta VERBOSE("Coherent region: %p - %p\n",
229b53c2c5fSPankaj Gupta (void *) coh_start, (void *) coh_limit);
230b53c2c5fSPankaj Gupta mmap_add_region(coh_start, coh_start,
231b53c2c5fSPankaj Gupta coh_limit - coh_start,
232b53c2c5fSPankaj Gupta MT_DEVICE | MT_RW | MT_SECURE);
233b53c2c5fSPankaj Gupta #endif
234b53c2c5fSPankaj Gupta
235b53c2c5fSPankaj Gupta /* Now (re-)map the platform-specific memory regions */
236b53c2c5fSPankaj Gupta mmap_add(plat_ls_get_mmap());
237b53c2c5fSPankaj Gupta
238b53c2c5fSPankaj Gupta
239b53c2c5fSPankaj Gupta #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
240b53c2c5fSPankaj Gupta mmap_add_ddr_regions_statically();
241b53c2c5fSPankaj Gupta #endif
242b53c2c5fSPankaj Gupta
243b53c2c5fSPankaj Gupta /* Create the page tables to reflect the above mappings */
244b53c2c5fSPankaj Gupta init_xlat_tables();
245b53c2c5fSPankaj Gupta }
246b53c2c5fSPankaj Gupta
247b53c2c5fSPankaj Gupta /*******************************************************************************
248b53c2c5fSPankaj Gupta * Returns NXP platform specific memory map regions.
249b53c2c5fSPankaj Gupta ******************************************************************************/
plat_ls_get_mmap(void)250b53c2c5fSPankaj Gupta const mmap_region_t *plat_ls_get_mmap(void)
251b53c2c5fSPankaj Gupta {
252b53c2c5fSPankaj Gupta return plat_ls_mmap;
253b53c2c5fSPankaj Gupta }
25408695df9SJiafei Pan
25508695df9SJiafei Pan /*
25608695df9SJiafei Pan * This function get the number of clusters and cores count per cluster
25708695df9SJiafei Pan * in the SoC.
25808695df9SJiafei Pan */
get_cluster_info(const struct soc_type * soc_list,uint8_t ps_count,uint8_t * num_clusters,uint8_t * cores_per_cluster)25908695df9SJiafei Pan void get_cluster_info(const struct soc_type *soc_list, uint8_t ps_count,
26008695df9SJiafei Pan uint8_t *num_clusters, uint8_t *cores_per_cluster)
26108695df9SJiafei Pan {
26208695df9SJiafei Pan const soc_info_t *soc_info = get_soc_info();
26308695df9SJiafei Pan *num_clusters = NUMBER_OF_CLUSTERS;
26408695df9SJiafei Pan *cores_per_cluster = CORES_PER_CLUSTER;
26508695df9SJiafei Pan unsigned int i;
26608695df9SJiafei Pan
26708695df9SJiafei Pan for (i = 0U; i < ps_count; i++) {
26808695df9SJiafei Pan if (soc_list[i].version == soc_info->svr_reg.bf_ver.version) {
26908695df9SJiafei Pan *num_clusters = soc_list[i].num_clusters;
27008695df9SJiafei Pan *cores_per_cluster = soc_list[i].cores_per_cluster;
27108695df9SJiafei Pan break;
27208695df9SJiafei Pan }
27308695df9SJiafei Pan }
27408695df9SJiafei Pan
27508695df9SJiafei Pan VERBOSE("NUM of cluster = 0x%x, Cores per cluster = 0x%x\n",
27608695df9SJiafei Pan *num_clusters, *cores_per_cluster);
27708695df9SJiafei Pan }
278