1 /* 2 * Copyright 2018-2022 NXP 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 */ 7 8 #include <assert.h> 9 10 #include <arch.h> 11 #include <arch_helpers.h> 12 #include <common/debug.h> 13 #include <lib/mmio.h> 14 #include <lib/xlat_tables/xlat_tables_v2.h> 15 #include <mmu_def.h> 16 #include <plat/common/platform.h> 17 18 #include "plat_common.h" 19 #include "platform_def.h" 20 21 const mmap_region_t *plat_ls_get_mmap(void); 22 23 /* 24 * Table of memory regions for various BL stages to map using the MMU. 25 * This doesn't include Trusted SRAM as arm_setup_page_tables() already 26 * takes care of mapping it. 27 * 28 * The flash needs to be mapped as writable in order to erase the FIP's Table of 29 * Contents in case of unrecoverable error (see plat_error_handler()). 30 */ 31 #ifdef IMAGE_BL2 32 const mmap_region_t plat_ls_mmap[] = { 33 LS_MAP_CCSR, 34 {0} 35 }; 36 #endif 37 38 #ifdef IMAGE_BL31 39 const mmap_region_t plat_ls_mmap[] = { 40 LS_MAP_CCSR, 41 #ifdef NXP_DCSR_ADDR 42 LS_MAP_DCSR, 43 #endif 44 LS_MAP_OCRAM, 45 {0} 46 }; 47 #endif 48 #ifdef IMAGE_BL32 49 const mmap_region_t plat_ls_mmap[] = { 50 LS_MAP_CCSR, 51 LS_MAP_BL32_SEC_MEM, 52 {0} 53 }; 54 #endif 55 56 /* Weak definitions may be overridden in specific NXP SoC */ 57 #pragma weak plat_get_ns_image_entrypoint 58 #pragma weak plat_ls_get_mmap 59 60 #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE) 61 static void mmap_add_ddr_regions_statically(void) 62 { 63 int i = 0; 64 dram_regions_info_t *info_dram_regions = get_dram_regions_info(); 65 /* MMU map for Non-Secure DRAM Regions */ 66 VERBOSE("DRAM Region %d: %p - %p\n", i, 67 (void *) info_dram_regions->region[i].addr, 68 (void *) (info_dram_regions->region[i].addr 69 + info_dram_regions->region[i].size 70 - 1)); 71 mmap_add_region(info_dram_regions->region[i].addr, 72 info_dram_regions->region[i].addr, 73 info_dram_regions->region[i].size, 74 MT_MEMORY | MT_RW | MT_NS); 75 76 /* MMU map for Secure DDR Region on DRAM-0 */ 77 if (info_dram_regions->region[i].size > 78 (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) { 79 VERBOSE("Secure DRAM Region %d: %p - %p\n", i, 80 (void *) (info_dram_regions->region[i].addr 81 + info_dram_regions->region[i].size), 82 (void *) (info_dram_regions->region[i].addr 83 + info_dram_regions->region[i].size 84 + NXP_SECURE_DRAM_SIZE 85 + NXP_SP_SHRD_DRAM_SIZE 86 - 1)); 87 mmap_add_region((info_dram_regions->region[i].addr 88 + info_dram_regions->region[i].size), 89 (info_dram_regions->region[i].addr 90 + info_dram_regions->region[i].size), 91 (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE), 92 MT_MEMORY | MT_RW | MT_SECURE); 93 } 94 95 #ifdef IMAGE_BL31 96 for (i = 1; i < info_dram_regions->num_dram_regions; i++) { 97 if (info_dram_regions->region[i].size == 0) 98 break; 99 VERBOSE("DRAM Region %d: %p - %p\n", i, 100 (void *) info_dram_regions->region[i].addr, 101 (void *) (info_dram_regions->region[i].addr 102 + info_dram_regions->region[i].size 103 - 1)); 104 mmap_add_region(info_dram_regions->region[i].addr, 105 info_dram_regions->region[i].addr, 106 info_dram_regions->region[i].size, 107 MT_MEMORY | MT_RW | MT_NS); 108 } 109 #endif 110 } 111 #endif 112 113 #if defined(PLAT_XLAT_TABLES_DYNAMIC) 114 void mmap_add_ddr_region_dynamically(void) 115 { 116 int ret, i = 0; 117 118 dram_regions_info_t *info_dram_regions = get_dram_regions_info(); 119 /* MMU map for Non-Secure DRAM Regions */ 120 VERBOSE("DRAM Region %d: %p - %p\n", i, 121 (void *) info_dram_regions->region[i].addr, 122 (void *) (info_dram_regions->region[i].addr 123 + info_dram_regions->region[i].size 124 - 1)); 125 ret = mmap_add_dynamic_region(info_dram_regions->region[i].addr, 126 info_dram_regions->region[i].addr, 127 info_dram_regions->region[i].size, 128 MT_MEMORY | MT_RW | MT_NS); 129 if (ret != 0) { 130 ERROR("Failed to add dynamic memory region\n"); 131 panic(); 132 } 133 134 /* MMU map for Secure DDR Region on DRAM-0 */ 135 if (info_dram_regions->region[i].size > 136 (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) { 137 VERBOSE("Secure DRAM Region %d: %p - %p\n", i, 138 (void *) (info_dram_regions->region[i].addr 139 + info_dram_regions->region[i].size), 140 (void *) (info_dram_regions->region[i].addr 141 + info_dram_regions->region[i].size 142 + NXP_SECURE_DRAM_SIZE 143 + NXP_SP_SHRD_DRAM_SIZE 144 - 1)); 145 ret = mmap_add_dynamic_region((info_dram_regions->region[i].addr 146 + info_dram_regions->region[i].size), 147 (info_dram_regions->region[i].addr 148 + info_dram_regions->region[i].size), 149 (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE), 150 MT_MEMORY | MT_RW | MT_SECURE); 151 if (ret != 0) { 152 ERROR("Failed to add dynamic memory region\n"); 153 panic(); 154 } 155 } 156 157 #ifdef IMAGE_BL31 158 for (i = 1; i < info_dram_regions->num_dram_regions; i++) { 159 if (info_dram_regions->region[i].size == 0) { 160 break; 161 } 162 VERBOSE("DRAM Region %d: %p - %p\n", i, 163 (void *) info_dram_regions->region[i].addr, 164 (void *) (info_dram_regions->region[i].addr 165 + info_dram_regions->region[i].size 166 - 1)); 167 ret = mmap_add_dynamic_region(info_dram_regions->region[i].addr, 168 info_dram_regions->region[i].addr, 169 info_dram_regions->region[i].size, 170 MT_MEMORY | MT_RW | MT_NS); 171 if (ret != 0) { 172 ERROR("Failed to add dynamic memory region\n"); 173 panic(); 174 } 175 } 176 #endif 177 } 178 #endif 179 180 /* 181 * Set up the page tables for the generic and platform-specific memory regions. 182 * The extents of the generic memory regions are specified by the function 183 * arguments and consist of: 184 * - Trusted SRAM seen by the BL image; 185 * - Code section; 186 * - Read-only data section; 187 * - Coherent memory region, if applicable. 188 */ 189 void ls_setup_page_tables(uintptr_t total_base, 190 size_t total_size, 191 uintptr_t code_start, 192 uintptr_t code_limit, 193 uintptr_t rodata_start, 194 uintptr_t rodata_limit 195 #if USE_COHERENT_MEM 196 , 197 uintptr_t coh_start, 198 uintptr_t coh_limit 199 #endif 200 ) 201 { 202 /* 203 * Map the Trusted SRAM with appropriate memory attributes. 204 * Subsequent mappings will adjust the attributes for specific regions. 205 */ 206 VERBOSE("Memory seen by this BL image: %p - %p\n", 207 (void *) total_base, (void *) (total_base + total_size)); 208 mmap_add_region(total_base, total_base, 209 total_size, 210 MT_MEMORY | MT_RW | MT_SECURE); 211 212 /* Re-map the code section */ 213 VERBOSE("Code region: %p - %p\n", 214 (void *) code_start, (void *) code_limit); 215 mmap_add_region(code_start, code_start, 216 code_limit - code_start, 217 MT_CODE | MT_SECURE); 218 219 /* Re-map the read-only data section */ 220 VERBOSE("Read-only data region: %p - %p\n", 221 (void *) rodata_start, (void *) rodata_limit); 222 mmap_add_region(rodata_start, rodata_start, 223 rodata_limit - rodata_start, 224 MT_RO_DATA | MT_SECURE); 225 226 #if USE_COHERENT_MEM 227 /* Re-map the coherent memory region */ 228 VERBOSE("Coherent region: %p - %p\n", 229 (void *) coh_start, (void *) coh_limit); 230 mmap_add_region(coh_start, coh_start, 231 coh_limit - coh_start, 232 MT_DEVICE | MT_RW | MT_SECURE); 233 #endif 234 235 /* Now (re-)map the platform-specific memory regions */ 236 mmap_add(plat_ls_get_mmap()); 237 238 239 #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE) 240 mmap_add_ddr_regions_statically(); 241 #endif 242 243 /* Create the page tables to reflect the above mappings */ 244 init_xlat_tables(); 245 } 246 247 /******************************************************************************* 248 * Returns NXP platform specific memory map regions. 249 ******************************************************************************/ 250 const mmap_region_t *plat_ls_get_mmap(void) 251 { 252 return plat_ls_mmap; 253 } 254 255 /* 256 * This function get the number of clusters and cores count per cluster 257 * in the SoC. 258 */ 259 void get_cluster_info(const struct soc_type *soc_list, uint8_t ps_count, 260 uint8_t *num_clusters, uint8_t *cores_per_cluster) 261 { 262 const soc_info_t *soc_info = get_soc_info(); 263 *num_clusters = NUMBER_OF_CLUSTERS; 264 *cores_per_cluster = CORES_PER_CLUSTER; 265 unsigned int i; 266 267 for (i = 0U; i < ps_count; i++) { 268 if (soc_list[i].version == soc_info->svr_reg.bf_ver.version) { 269 *num_clusters = soc_list[i].num_clusters; 270 *cores_per_cluster = soc_list[i].cores_per_cluster; 271 break; 272 } 273 } 274 275 VERBOSE("NUM of cluster = 0x%x, Cores per cluster = 0x%x\n", 276 *num_clusters, *cores_per_cluster); 277 } 278