1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2017-2024, STMicroelectronics 4 * Copyright (c) 2016-2018, Linaro Limited 5 */ 6 7 #include <boot_api.h> 8 #include <config.h> 9 #include <console.h> 10 #include <drivers/firewall_device.h> 11 #include <drivers/gic.h> 12 #include <drivers/pinctrl.h> 13 #include <drivers/stm32_bsec.h> 14 #include <drivers/stm32_gpio.h> 15 #include <drivers/stm32_iwdg.h> 16 #include <drivers/stm32_uart.h> 17 #include <drivers/stm32mp_dt_bindings.h> 18 #ifdef CFG_STM32MP15 19 #include <drivers/stm32mp1_rcc.h> 20 #endif 21 #include <io.h> 22 #include <kernel/boot.h> 23 #include <kernel/dt.h> 24 #include <kernel/dt_driver.h> 25 #include <kernel/misc.h> 26 #include <kernel/panic.h> 27 #include <kernel/spinlock.h> 28 #include <kernel/tee_misc.h> 29 #include <libfdt.h> 30 #include <mm/core_memprot.h> 31 #include <platform_config.h> 32 #include <sm/psci.h> 33 #include <stm32_util.h> 34 #include <string.h> 35 #include <trace.h> 36 37 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB1_BASE, APB1_SIZE); 38 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB2_BASE, APB2_SIZE); 39 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB3_BASE, APB3_SIZE); 40 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB4_BASE, APB4_SIZE); 41 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB5_BASE, APB5_SIZE); 42 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, AHB4_BASE, AHB4_SIZE); 43 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, AHB5_BASE, AHB5_SIZE); 44 45 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB1_BASE, APB1_SIZE); 46 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB3_BASE, APB3_SIZE); 47 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB4_BASE, APB4_SIZE); 48 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB5_BASE, APB5_SIZE); 49 #ifdef CFG_STM32MP13 50 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB6_BASE, APB6_SIZE); 51 #endif 52 register_phys_mem_pgdir(MEM_AREA_IO_SEC, AHB4_BASE, AHB4_SIZE); 53 register_phys_mem_pgdir(MEM_AREA_IO_SEC, AHB5_BASE, AHB5_SIZE); 54 register_phys_mem_pgdir(MEM_AREA_IO_SEC, GIC_BASE, GIC_SIZE); 55 56 register_ddr(DDR_BASE, CFG_DRAM_SIZE); 57 58 #define _ID2STR(id) (#id) 59 #define ID2STR(id) _ID2STR(id) 60 61 static TEE_Result platform_banner(void) 62 { 63 IMSG("Platform stm32mp1: flavor %s - DT %s", 64 ID2STR(PLATFORM_FLAVOR), 65 ID2STR(CFG_EMBED_DTB_SOURCE_FILE)); 66 67 return TEE_SUCCESS; 68 } 69 service_init(platform_banner); 70 71 /* 72 * Console 73 * 74 * CFG_STM32_EARLY_CONSOLE_UART specifies the ID of the UART used for 75 * trace console. Value 0 disables the early console. 76 * 77 * We cannot use the generic serial_console support since probing 78 * the console requires the platform clock driver to be already 79 * up and ready which is done only once service_init are completed. 80 */ 81 static struct stm32_uart_pdata console_data; 82 83 void plat_console_init(void) 84 { 85 /* Early console initialization before MMU setup */ 86 struct uart { 87 paddr_t pa; 88 } uarts[] = { 89 [0] = { .pa = 0 }, 90 [1] = { .pa = USART1_BASE }, 91 [2] = { .pa = USART2_BASE }, 92 [3] = { .pa = USART3_BASE }, 93 [4] = { .pa = UART4_BASE }, 94 [5] = { .pa = UART5_BASE }, 95 [6] = { .pa = USART6_BASE }, 96 [7] = { .pa = UART7_BASE }, 97 [8] = { .pa = UART8_BASE }, 98 }; 99 100 COMPILE_TIME_ASSERT(ARRAY_SIZE(uarts) > CFG_STM32_EARLY_CONSOLE_UART); 101 102 if (!uarts[CFG_STM32_EARLY_CONSOLE_UART].pa) 103 return; 104 105 /* No clock yet bound to the UART console */ 106 console_data.clock = NULL; 107 108 stm32_uart_init(&console_data, uarts[CFG_STM32_EARLY_CONSOLE_UART].pa); 109 110 register_serial_console(&console_data.chip); 111 112 IMSG("Early console on UART#%u", CFG_STM32_EARLY_CONSOLE_UART); 113 } 114 115 static TEE_Result init_console_from_dt(void) 116 { 117 struct stm32_uart_pdata *pd = NULL; 118 void *fdt = NULL; 119 int node = 0; 120 TEE_Result res = TEE_ERROR_GENERIC; 121 122 fdt = get_embedded_dt(); 123 res = get_console_node_from_dt(fdt, &node, NULL, NULL); 124 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 125 fdt = get_external_dt(); 126 res = get_console_node_from_dt(fdt, &node, NULL, NULL); 127 if (res == TEE_ERROR_ITEM_NOT_FOUND) 128 return TEE_SUCCESS; 129 if (res != TEE_SUCCESS) 130 return res; 131 } 132 133 pd = stm32_uart_init_from_dt_node(fdt, node); 134 if (!pd) { 135 IMSG("DTB disables console"); 136 register_serial_console(NULL); 137 return TEE_SUCCESS; 138 } 139 140 /* Replace early console with the new one */ 141 console_flush(); 142 console_data = *pd; 143 register_serial_console(&console_data.chip); 144 IMSG("DTB enables console"); 145 free(pd); 146 147 return TEE_SUCCESS; 148 } 149 150 /* Probe console from DT once clock inits (service init level) are completed */ 151 service_init_late(init_console_from_dt); 152 153 /* 154 * GIC init, used also for primary/secondary boot core wake completion 155 */ 156 void boot_primary_init_intc(void) 157 { 158 gic_init(GIC_BASE + GICC_OFFSET, GIC_BASE + GICD_OFFSET); 159 160 stm32mp_register_online_cpu(); 161 } 162 163 void boot_secondary_init_intc(void) 164 { 165 gic_init_per_cpu(); 166 167 stm32mp_register_online_cpu(); 168 } 169 170 #ifdef CFG_STM32MP15 171 /* 172 * This concerns OP-TEE pager for STM32MP1 to use secure internal 173 * RAMs to execute. TZSRAM refers the TZSRAM_BASE/TZSRAM_SIZE 174 * used in boot.c to locate secure unpaged memory. 175 * 176 * STM32MP15 variants embed 640kB of contiguous securable SRAMs 177 * 178 * +--------------+ <-- SYSRAM_BASE 179 * | | lower part can be assigned to secure world 180 * | SYSRAM 256kB | 4kB granule boundary 181 * | | upper part can be assigned to secure world 182 * +--------------+ <-- SRAM1_BASE (= SYSRAM_BASE + SYSRAM_SIZE) 183 | | full range assigned to non-secure world or 184 * | SRAM1 128kB | to secure world, or to- Cortex-M4 exclusive access 185 * +--------------+ <-- SRAM2_BASE (= SRAM1_BASE + SRAM1_SIZE) 186 | | full range assigned to non-secure world or 187 * | SRAM2 128kB | to secure world, or to- Cortex-M4 exclusive access 188 * +--------------+ <-- SRAM3_BASE (= SRAM2_BASE + SRAM2_SIZE) 189 | | full range assigned to non-secure world or 190 * | SRAM3 64kB | to secure world, or to- Cortex-M4 exclusive access 191 * +--------------+ <-- SRAM4_BASE (= SRAM3_BASE + SRAM3_SIZE) 192 | | full range assigned to non-secure world or 193 * | SRAM4 64kB | to secure world, or to- Cortex-M4 exclusive access 194 * +--------------+ <-- SRAM4_BASE + SRAM4_SIZE 195 * 196 * If SRAMx memories are not used for the companion Cortex-M4 197 * processor, OP-TEE can use this memory. 198 * 199 * SYSRAM configuration for secure/non-secure boundaries requires the 200 * secure SYSRAM memory to start at the SYSRAM physical base address and grow 201 * from there while the non-secure SYSRAM range lies at SYSRAM end addresses 202 * with a 4KB page granule. 203 * 204 * SRAM1, SRAM2, SRAM3 and SRAM4 are independently assigned to secure world, 205 * to non-secure world or possibly to Cortex-M4 exclusive access. Each 206 * assignment covers the full related SRAMx memory range. 207 * 208 * Using non-secure SYSRAM or one of the SRAMx for SCMI message communication 209 * can be done using CFG_STM32MP1_SCMI_SHM_BASE/CFG_STM32MP1_SCMI_SHM_SIZE. 210 * This imposes related memory area is assigned to non-secure world. 211 212 * Using secure internal memories (SYSRAM and/or some SRAMx) with STM32MP15 213 * shall meet this constraints known the TZSRAM physical memory range shall 214 * be contiguous. 215 */ 216 217 #define SYSRAM_END (SYSRAM_BASE + SYSRAM_SIZE) 218 #define SYSRAM_SEC_END (SYSRAM_BASE + SYSRAM_SEC_SIZE) 219 #define SRAMS_END (SRAM4_BASE + SRAM4_SIZE) 220 #define SRAMS_START SRAM1_BASE 221 #define TZSRAM_END (CFG_TZSRAM_START + CFG_TZSRAM_SIZE) 222 223 #define TZSRAM_FITS_IN_SYSRAM_SEC ((CFG_TZSRAM_START >= SYSRAM_BASE) && \ 224 (TZSRAM_END <= SYSRAM_SEC_END)) 225 226 #define TZSRAM_FITS_IN_SYSRAM_AND_SRAMS ((CFG_TZSRAM_START >= SYSRAM_BASE) && \ 227 (CFG_TZSRAM_START < SYSRAM_END) && \ 228 (TZSRAM_END > SYSRAM_END) && \ 229 (TZSRAM_END <= SRAMS_END) && \ 230 (SYSRAM_SIZE == SYSRAM_SEC_SIZE)) 231 232 #define TZSRAM_FITS_IN_SRAMS ((CFG_TZSRAM_START >= SRAMS_START) && \ 233 (CFG_TZSRAM_START < SRAMS_END) && \ 234 (TZSRAM_END <= SRAMS_END)) 235 236 #define TZSRAM_IS_IN_DRAM (CFG_TZSRAM_START >= CFG_DRAM_BASE) 237 238 #ifdef CFG_WITH_PAGER 239 /* 240 * At build time, we enforce that, when pager is used, 241 * either TZSRAM fully fits inside SYSRAM secure address range, 242 * or TZSRAM fully fits inside the full SYSRAM and spread inside SRAMx orderly, 243 * or TZSRAM fully fits some inside SRAMs address range, 244 * or TZSRAM is in DDR for debug and test purpose. 245 */ 246 static_assert(TZSRAM_FITS_IN_SYSRAM_SEC || TZSRAM_FITS_IN_SYSRAM_AND_SRAMS || 247 TZSRAM_FITS_IN_SRAMS || TZSRAM_IS_IN_DRAM); 248 #endif /* CFG_WITH_PAGER */ 249 #endif /* CFG_STM32MP15 */ 250 251 static TEE_Result secure_pager_ram(struct dt_driver_provider *fw_provider, 252 unsigned int decprot_id, 253 paddr_t base, size_t secure_size) 254 { 255 /* Lock firewall configuration for secure internal RAMs used by pager */ 256 uint32_t query_arg = DECPROT(decprot_id, DECPROT_S_RW, DECPROT_LOCK); 257 struct firewall_query fw_query = { 258 .ctrl = dt_driver_provider_priv_data(fw_provider), 259 .args = &query_arg, 260 .arg_count = 1, 261 }; 262 TEE_Result res = TEE_ERROR_GENERIC; 263 bool is_pager_ram = false; 264 265 #if defined(CFG_WITH_PAGER) 266 is_pager_ram = core_is_buffer_intersect(CFG_TZSRAM_START, 267 CFG_TZSRAM_SIZE, 268 base, secure_size); 269 #endif 270 if (!is_pager_ram) 271 return TEE_SUCCESS; 272 273 res = firewall_set_memory_configuration(&fw_query, base, secure_size); 274 if (res) 275 EMSG("Failed to configure secure SRAM %#"PRIxPA"..%#"PRIxPA, 276 base, base + secure_size); 277 278 return res; 279 } 280 281 static TEE_Result non_secure_scmi_ram(struct dt_driver_provider *fw_provider, 282 unsigned int decprot_id, 283 paddr_t base, size_t size) 284 { 285 /* Do not lock firewall configuration for non-secure internal RAMs */ 286 uint32_t query_arg = DECPROT(decprot_id, DECPROT_NS_RW, DECPROT_UNLOCK); 287 struct firewall_query fw_query = { 288 .ctrl = dt_driver_provider_priv_data(fw_provider), 289 .args = &query_arg, 290 .arg_count = 1, 291 }; 292 TEE_Result res = TEE_ERROR_GENERIC; 293 294 if (!core_is_buffer_intersect(CFG_STM32MP1_SCMI_SHM_BASE, 295 CFG_STM32MP1_SCMI_SHM_SIZE, 296 base, size)) 297 return TEE_SUCCESS; 298 299 res = firewall_set_memory_configuration(&fw_query, base, size); 300 if (res) 301 EMSG("Failed to configure non-secure SRAM %#"PRIxPA"..%#"PRIxPA, 302 base, base + size); 303 304 return res; 305 } 306 307 /* At run time we enforce that SRAM1 to SRAM4 are properly assigned if used */ 308 static void configure_srams(struct dt_driver_provider *fw_provider) 309 { 310 bool error = false; 311 312 if (IS_ENABLED(CFG_WITH_PAGER)) { 313 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM1_ID, 314 SRAM1_BASE, SRAM1_SIZE)) 315 error = true; 316 317 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM2_ID, 318 SRAM2_BASE, SRAM2_SIZE)) 319 error = true; 320 321 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM3_ID, 322 SRAM3_BASE, SRAM3_SIZE)) 323 error = true; 324 325 #if defined(CFG_STM32MP15) 326 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM4_ID, 327 SRAM4_BASE, SRAM4_SIZE)) 328 error = true; 329 #endif 330 } 331 if (CFG_STM32MP1_SCMI_SHM_BASE) { 332 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM1_ID, 333 SRAM1_BASE, SRAM1_SIZE)) 334 error = true; 335 336 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM2_ID, 337 SRAM2_BASE, SRAM2_SIZE)) 338 error = true; 339 340 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM3_ID, 341 SRAM3_BASE, SRAM3_SIZE)) 342 error = true; 343 344 #if defined(CFG_STM32MP15) 345 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM4_ID, 346 SRAM4_BASE, SRAM4_SIZE)) 347 error = true; 348 #endif 349 } 350 351 if (error) 352 panic(); 353 } 354 355 static void configure_sysram(struct dt_driver_provider *fw_provider) 356 { 357 uint32_t query_arg = DECPROT(ETZPC_TZMA1_ID, DECPROT_S_RW, 358 DECPROT_UNLOCK); 359 struct firewall_query firewall = { 360 .ctrl = dt_driver_provider_priv_data(fw_provider), 361 .args = &query_arg, 362 .arg_count = 1, 363 }; 364 TEE_Result res = TEE_ERROR_GENERIC; 365 366 res = firewall_set_memory_configuration(&firewall, SYSRAM_BASE, 367 SYSRAM_SEC_SIZE); 368 if (res) 369 panic("Unable to secure SYSRAM"); 370 371 if (SYSRAM_SIZE > SYSRAM_SEC_SIZE) { 372 size_t nsec_size = SYSRAM_SIZE - SYSRAM_SEC_SIZE; 373 paddr_t nsec_start = SYSRAM_BASE + SYSRAM_SEC_SIZE; 374 uint8_t *va = phys_to_virt(nsec_start, MEM_AREA_IO_NSEC, 375 nsec_size); 376 377 IMSG("Non-secure SYSRAM [%p %p]", va, va + nsec_size - 1); 378 379 /* Clear content from the non-secure part */ 380 memset(va, 0, nsec_size); 381 } 382 } 383 384 static TEE_Result init_late_stm32mp1_drivers(void) 385 { 386 uint32_t __maybe_unused state = 0; 387 388 /* Configure SYSRAM and SRAMx secure hardening */ 389 if (IS_ENABLED(CFG_STM32_ETZPC)) { 390 struct dt_driver_provider *prov = NULL; 391 int node = 0; 392 393 node = fdt_node_offset_by_compatible(get_embedded_dt(), -1, 394 "st,stm32-etzpc"); 395 if (node < 0) 396 panic("Could not get ETZPC node"); 397 398 prov = dt_driver_get_provider_by_node(node, DT_DRIVER_FIREWALL); 399 assert(prov); 400 401 configure_sysram(prov); 402 configure_srams(prov); 403 } 404 405 #ifdef CFG_STM32MP15 406 /* Device in Secure Closed state require RCC secure hardening */ 407 if (stm32_bsec_get_state(&state)) 408 panic(); 409 if (state == BSEC_STATE_SEC_CLOSED && !stm32_rcc_is_secure()) 410 panic("Closed device mandates secure RCC"); 411 #endif 412 413 return TEE_SUCCESS; 414 } 415 416 driver_init_late(init_late_stm32mp1_drivers); 417 418 vaddr_t stm32_rcc_base(void) 419 { 420 static struct io_pa_va base = { .pa = RCC_BASE }; 421 422 return io_pa_or_va_secure(&base, 1); 423 } 424 425 vaddr_t get_gicd_base(void) 426 { 427 struct io_pa_va base = { .pa = GIC_BASE + GICD_OFFSET }; 428 429 return io_pa_or_va_secure(&base, 1); 430 } 431 432 void stm32mp_get_bsec_static_cfg(struct stm32_bsec_static_cfg *cfg) 433 { 434 cfg->base = BSEC_BASE; 435 cfg->upper_start = STM32MP1_UPPER_OTP_START; 436 cfg->max_id = STM32MP1_OTP_MAX_ID; 437 } 438 439 bool __weak stm32mp_with_pmic(void) 440 { 441 return false; 442 } 443 444 uint32_t may_spin_lock(unsigned int *lock) 445 { 446 if (!lock || !cpu_mmu_enabled()) 447 return 0; 448 449 return cpu_spin_lock_xsave(lock); 450 } 451 452 void may_spin_unlock(unsigned int *lock, uint32_t exceptions) 453 { 454 if (!lock || !cpu_mmu_enabled()) 455 return; 456 457 cpu_spin_unlock_xrestore(lock, exceptions); 458 } 459 460 static vaddr_t stm32_tamp_base(void) 461 { 462 static struct io_pa_va base = { .pa = TAMP_BASE }; 463 464 return io_pa_or_va_secure(&base, 1); 465 } 466 467 static vaddr_t bkpreg_base(void) 468 { 469 return stm32_tamp_base() + TAMP_BKP_REGISTER_OFF; 470 } 471 472 vaddr_t stm32mp_bkpreg(unsigned int idx) 473 { 474 return bkpreg_base() + (idx * sizeof(uint32_t)); 475 } 476 477 static bool __maybe_unused bank_is_valid(unsigned int bank) 478 { 479 if (IS_ENABLED(CFG_STM32MP15)) 480 return bank == GPIO_BANK_Z || bank <= GPIO_BANK_K; 481 482 if (IS_ENABLED(CFG_STM32MP13)) 483 return bank <= GPIO_BANK_I; 484 485 panic(); 486 } 487 488 #ifdef CFG_STM32_IWDG 489 TEE_Result stm32_get_iwdg_otp_config(paddr_t pbase, 490 struct stm32_iwdg_otp_data *otp_data) 491 { 492 unsigned int idx = 0; 493 uint32_t otp_id = 0; 494 size_t bit_len = 0; 495 uint8_t bit_offset = 0; 496 uint32_t otp_value = 0; 497 498 switch (pbase) { 499 case IWDG1_BASE: 500 idx = 0; 501 break; 502 case IWDG2_BASE: 503 idx = 1; 504 break; 505 default: 506 panic(); 507 } 508 509 if (stm32_bsec_find_otp_in_nvmem_layout("hw2_otp", &otp_id, &bit_offset, 510 &bit_len) || 511 bit_len != 32 || bit_offset != 0) 512 panic(); 513 514 if (stm32_bsec_read_otp(&otp_value, otp_id)) 515 panic(); 516 517 otp_data->hw_enabled = otp_value & 518 BIT(idx + HW2_OTP_IWDG_HW_ENABLE_SHIFT); 519 otp_data->disable_on_stop = otp_value & 520 BIT(idx + HW2_OTP_IWDG_FZ_STOP_SHIFT); 521 otp_data->disable_on_standby = otp_value & 522 BIT(idx + HW2_OTP_IWDG_FZ_STANDBY_SHIFT); 523 524 return TEE_SUCCESS; 525 } 526 #endif /*CFG_STM32_IWDG*/ 527 528 #ifdef CFG_STM32_DEBUG_ACCESS 529 static TEE_Result init_debug(void) 530 { 531 TEE_Result res = TEE_SUCCESS; 532 uint32_t conf = stm32_bsec_read_debug_conf(); 533 struct clk *dbg_clk = stm32mp_rcc_clock_id_to_clk(CK_DBG); 534 uint32_t state = 0; 535 536 res = stm32_bsec_get_state(&state); 537 if (res) 538 return res; 539 540 if (state != BSEC_STATE_SEC_CLOSED && conf) { 541 if (IS_ENABLED(CFG_INSECURE)) 542 IMSG("WARNING: All debug accesses are allowed"); 543 544 res = stm32_bsec_write_debug_conf(conf | BSEC_DEBUG_ALL); 545 if (res) 546 return res; 547 548 /* 549 * Enable DBG clock as used to access coprocessor 550 * debug registers 551 */ 552 clk_enable(dbg_clk); 553 } 554 555 return TEE_SUCCESS; 556 } 557 early_init_late(init_debug); 558 #endif /* CFG_STM32_DEBUG_ACCESS */ 559 560 /* Some generic resources need to be unpaged */ 561 DECLARE_KEEP_PAGER(pinctrl_apply_state); 562 563 bool stm32mp_allow_probe_shared_device(const void *fdt, int node) 564 { 565 static int uart_console_node = -1; 566 const char *compat = NULL; 567 static bool once; 568 569 if (IS_ENABLED(CFG_STM32_ALLOW_UNSAFE_PROBE)) 570 return true; 571 572 if (!once) { 573 get_console_node_from_dt((void *)fdt, &uart_console_node, 574 NULL, NULL); 575 once = true; 576 } 577 578 compat = fdt_stringlist_get(fdt, node, "compatible", 0, NULL); 579 580 /* 581 * Allow OP-TEE console and MP15 I2C and RNG to be shared 582 * with non-secure world. 583 */ 584 if (node == uart_console_node || 585 !strcmp(compat, "st,stm32mp15-i2c-non-secure") || 586 (!strcmp(compat, "st,stm32-rng") && 587 IS_ENABLED(CFG_WITH_SOFTWARE_PRNG))) 588 return true; 589 590 return false; 591 } 592 593 #if defined(CFG_STM32MP15) && defined(CFG_WITH_PAGER) 594 paddr_t stm32mp1_pa_or_sram_alias_pa(paddr_t pa) 595 { 596 /* 597 * OP-TEE uses the alias physical addresses of SRAM1/2/3/4, 598 * not the standard physical addresses. This choice was initially 599 * driven by pager that needs physically contiguous memories 600 * for internal secure memories. 601 */ 602 if (core_is_buffer_inside(pa, 1, SRAM1_ALT_BASE, SRAM1_SIZE)) 603 pa += SRAM1_BASE - SRAM1_ALT_BASE; 604 else if (core_is_buffer_inside(pa, 1, SRAM2_ALT_BASE, SRAM2_SIZE)) 605 pa += SRAM2_BASE - SRAM2_ALT_BASE; 606 else if (core_is_buffer_inside(pa, 1, SRAM3_ALT_BASE, SRAM3_SIZE)) 607 pa += SRAM3_BASE - SRAM3_ALT_BASE; 608 else if (core_is_buffer_inside(pa, 1, SRAM4_ALT_BASE, SRAM4_SIZE)) 609 pa += SRAM4_BASE - SRAM4_ALT_BASE; 610 611 return pa; 612 } 613 614 bool stm32mp1_ram_intersect_pager_ram(paddr_t base, size_t size) 615 { 616 base = stm32mp1_pa_or_sram_alias_pa(base); 617 618 return core_is_buffer_intersect(base, size, CFG_TZSRAM_START, 619 CFG_TZSRAM_SIZE); 620 } 621 #endif 622