1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2017-2024, STMicroelectronics 4 * Copyright (c) 2016-2018, Linaro Limited 5 */ 6 7 #include <boot_api.h> 8 #include <config.h> 9 #include <console.h> 10 #include <drivers/firewall_device.h> 11 #include <drivers/gic.h> 12 #include <drivers/pinctrl.h> 13 #include <drivers/stm32_bsec.h> 14 #include <drivers/stm32_etzpc.h> 15 #include <drivers/stm32_gpio.h> 16 #include <drivers/stm32_iwdg.h> 17 #include <drivers/stm32_uart.h> 18 #include <drivers/stm32mp_dt_bindings.h> 19 #ifdef CFG_STM32MP15 20 #include <drivers/stm32mp1_rcc.h> 21 #endif 22 #include <io.h> 23 #include <kernel/boot.h> 24 #include <kernel/dt.h> 25 #include <kernel/dt_driver.h> 26 #include <kernel/misc.h> 27 #include <kernel/panic.h> 28 #include <kernel/spinlock.h> 29 #include <kernel/tee_misc.h> 30 #include <libfdt.h> 31 #include <mm/core_memprot.h> 32 #include <platform_config.h> 33 #include <sm/psci.h> 34 #include <stm32_util.h> 35 #include <string.h> 36 #include <trace.h> 37 38 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB1_BASE, APB1_SIZE); 39 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB2_BASE, APB2_SIZE); 40 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB3_BASE, APB3_SIZE); 41 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB4_BASE, APB4_SIZE); 42 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB5_BASE, APB5_SIZE); 43 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, AHB4_BASE, AHB4_SIZE); 44 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, AHB5_BASE, AHB5_SIZE); 45 46 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB1_BASE, APB1_SIZE); 47 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB3_BASE, APB3_SIZE); 48 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB4_BASE, APB4_SIZE); 49 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB5_BASE, APB5_SIZE); 50 #ifdef CFG_STM32MP13 51 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB6_BASE, APB6_SIZE); 52 #endif 53 register_phys_mem_pgdir(MEM_AREA_IO_SEC, AHB4_BASE, AHB4_SIZE); 54 register_phys_mem_pgdir(MEM_AREA_IO_SEC, AHB5_BASE, AHB5_SIZE); 55 register_phys_mem_pgdir(MEM_AREA_IO_SEC, GIC_BASE, GIC_SIZE); 56 57 register_ddr(DDR_BASE, CFG_DRAM_SIZE); 58 59 #define _ID2STR(id) (#id) 60 #define ID2STR(id) _ID2STR(id) 61 62 static TEE_Result platform_banner(void) 63 { 64 IMSG("Platform stm32mp1: flavor %s - DT %s", 65 ID2STR(PLATFORM_FLAVOR), 66 ID2STR(CFG_EMBED_DTB_SOURCE_FILE)); 67 68 return TEE_SUCCESS; 69 } 70 service_init(platform_banner); 71 72 /* 73 * Console 74 * 75 * CFG_STM32_EARLY_CONSOLE_UART specifies the ID of the UART used for 76 * trace console. Value 0 disables the early console. 77 * 78 * We cannot use the generic serial_console support since probing 79 * the console requires the platform clock driver to be already 80 * up and ready which is done only once service_init are completed. 81 */ 82 static struct stm32_uart_pdata console_data; 83 84 void plat_console_init(void) 85 { 86 /* Early console initialization before MMU setup */ 87 struct uart { 88 paddr_t pa; 89 bool secure; 90 } uarts[] = { 91 [0] = { .pa = 0 }, 92 [1] = { .pa = USART1_BASE, .secure = true, }, 93 [2] = { .pa = USART2_BASE, .secure = false, }, 94 [3] = { .pa = USART3_BASE, .secure = false, }, 95 [4] = { .pa = UART4_BASE, .secure = false, }, 96 [5] = { .pa = UART5_BASE, .secure = false, }, 97 [6] = { .pa = USART6_BASE, .secure = false, }, 98 [7] = { .pa = UART7_BASE, .secure = false, }, 99 [8] = { .pa = UART8_BASE, .secure = false, }, 100 }; 101 102 COMPILE_TIME_ASSERT(ARRAY_SIZE(uarts) > CFG_STM32_EARLY_CONSOLE_UART); 103 104 if (!uarts[CFG_STM32_EARLY_CONSOLE_UART].pa) 105 return; 106 107 /* No clock yet bound to the UART console */ 108 console_data.clock = NULL; 109 110 console_data.secure = uarts[CFG_STM32_EARLY_CONSOLE_UART].secure; 111 stm32_uart_init(&console_data, uarts[CFG_STM32_EARLY_CONSOLE_UART].pa); 112 113 register_serial_console(&console_data.chip); 114 115 IMSG("Early console on UART#%u", CFG_STM32_EARLY_CONSOLE_UART); 116 } 117 118 static TEE_Result init_console_from_dt(void) 119 { 120 struct stm32_uart_pdata *pd = NULL; 121 void *fdt = NULL; 122 int node = 0; 123 TEE_Result res = TEE_ERROR_GENERIC; 124 125 fdt = get_embedded_dt(); 126 res = get_console_node_from_dt(fdt, &node, NULL, NULL); 127 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 128 fdt = get_external_dt(); 129 res = get_console_node_from_dt(fdt, &node, NULL, NULL); 130 if (res == TEE_ERROR_ITEM_NOT_FOUND) 131 return TEE_SUCCESS; 132 if (res != TEE_SUCCESS) 133 return res; 134 } 135 136 pd = stm32_uart_init_from_dt_node(fdt, node); 137 if (!pd) { 138 IMSG("DTB disables console"); 139 register_serial_console(NULL); 140 return TEE_SUCCESS; 141 } 142 143 /* Replace early console with the new one */ 144 console_flush(); 145 console_data = *pd; 146 register_serial_console(&console_data.chip); 147 IMSG("DTB enables console (%ssecure)", pd->secure ? "" : "non-"); 148 free(pd); 149 150 return TEE_SUCCESS; 151 } 152 153 /* Probe console from DT once clock inits (service init level) are completed */ 154 service_init_late(init_console_from_dt); 155 156 /* 157 * GIC init, used also for primary/secondary boot core wake completion 158 */ 159 void boot_primary_init_intc(void) 160 { 161 gic_init(GIC_BASE + GICC_OFFSET, GIC_BASE + GICD_OFFSET); 162 163 stm32mp_register_online_cpu(); 164 } 165 166 void boot_secondary_init_intc(void) 167 { 168 gic_init_per_cpu(); 169 170 stm32mp_register_online_cpu(); 171 } 172 173 #ifdef CFG_STM32MP15 174 /* 175 * This concerns OP-TEE pager for STM32MP1 to use secure internal 176 * RAMs to execute. TZSRAM refers the TZSRAM_BASE/TZSRAM_SIZE 177 * used in boot.c to locate secure unpaged memory. 178 * 179 * STM32MP15 variants embed 640kB of contiguous securable SRAMs 180 * 181 * +--------------+ <-- SYSRAM_BASE 182 * | | lower part can be assigned to secure world 183 * | SYSRAM 256kB | 4kB granule boundary 184 * | | upper part can be assigned to secure world 185 * +--------------+ <-- SRAM1_BASE (= SYSRAM_BASE + SYSRAM_SIZE) 186 | | full range assigned to non-secure world or 187 * | SRAM1 128kB | to secure world, or to- Cortex-M4 exclusive access 188 * +--------------+ <-- SRAM2_BASE (= SRAM1_BASE + SRAM1_SIZE) 189 | | full range assigned to non-secure world or 190 * | SRAM2 128kB | to secure world, or to- Cortex-M4 exclusive access 191 * +--------------+ <-- SRAM3_BASE (= SRAM2_BASE + SRAM2_SIZE) 192 | | full range assigned to non-secure world or 193 * | SRAM3 64kB | to secure world, or to- Cortex-M4 exclusive access 194 * +--------------+ <-- SRAM4_BASE (= SRAM3_BASE + SRAM3_SIZE) 195 | | full range assigned to non-secure world or 196 * | SRAM4 64kB | to secure world, or to- Cortex-M4 exclusive access 197 * +--------------+ <-- SRAM4_BASE + SRAM4_SIZE 198 * 199 * If SRAMx memories are not used for the companion Cortex-M4 200 * processor, OP-TEE can use this memory. 201 * 202 * SYSRAM configuration for secure/non-secure boundaries requires the 203 * secure SYSRAM memory to start at the SYSRAM physical base address and grow 204 * from there while the non-secure SYSRAM range lies at SYSRAM end addresses 205 * with a 4KB page granule. 206 * 207 * SRAM1, SRAM2, SRAM3 and SRAM4 are independently assigned to secure world, 208 * to non-secure world or possibly to Cortex-M4 exclusive access. Each 209 * assignment covers the full related SRAMx memory range. 210 * 211 * Using non-secure SYSRAM or one of the SRAMx for SCMI message communication 212 * can be done using CFG_STM32MP1_SCMI_SHM_BASE/CFG_STM32MP1_SCMI_SHM_SIZE. 213 * This imposes related memory area is assigned to non-secure world. 214 215 * Using secure internal memories (SYSRAM and/or some SRAMx) with STM32MP15 216 * shall meet this constraints known the TZSRAM physical memory range shall 217 * be contiguous. 218 */ 219 220 #define SYSRAM_END (SYSRAM_BASE + SYSRAM_SIZE) 221 #define SYSRAM_SEC_END (SYSRAM_BASE + SYSRAM_SEC_SIZE) 222 #define SRAMS_END (SRAM4_BASE + SRAM4_SIZE) 223 #define SRAMS_START SRAM1_BASE 224 #define TZSRAM_END (CFG_TZSRAM_START + CFG_TZSRAM_SIZE) 225 226 #define TZSRAM_FITS_IN_SYSRAM_SEC ((CFG_TZSRAM_START >= SYSRAM_BASE) && \ 227 (TZSRAM_END <= SYSRAM_SEC_END)) 228 229 #define TZSRAM_FITS_IN_SYSRAM_AND_SRAMS ((CFG_TZSRAM_START >= SYSRAM_BASE) && \ 230 (CFG_TZSRAM_START < SYSRAM_END) && \ 231 (TZSRAM_END > SYSRAM_END) && \ 232 (TZSRAM_END <= SRAMS_END) && \ 233 (SYSRAM_SIZE == SYSRAM_SEC_SIZE)) 234 235 #define TZSRAM_FITS_IN_SRAMS ((CFG_TZSRAM_START >= SRAMS_START) && \ 236 (CFG_TZSRAM_START < SRAMS_END) && \ 237 (TZSRAM_END <= SRAMS_END)) 238 239 #define TZSRAM_IS_IN_DRAM (CFG_TZSRAM_START >= CFG_DRAM_BASE) 240 241 #ifdef CFG_WITH_PAGER 242 /* 243 * At build time, we enforce that, when pager is used, 244 * either TZSRAM fully fits inside SYSRAM secure address range, 245 * or TZSRAM fully fits inside the full SYSRAM and spread inside SRAMx orderly, 246 * or TZSRAM fully fits some inside SRAMs address range, 247 * or TZSRAM is in DDR for debug and test purpose. 248 */ 249 static_assert(TZSRAM_FITS_IN_SYSRAM_SEC || TZSRAM_FITS_IN_SYSRAM_AND_SRAMS || 250 TZSRAM_FITS_IN_SRAMS || TZSRAM_IS_IN_DRAM); 251 #endif /* CFG_WITH_PAGER */ 252 #endif /* CFG_STM32MP15 */ 253 254 static TEE_Result secure_pager_ram(struct dt_driver_provider *fw_provider, 255 unsigned int decprot_id, 256 paddr_t base, size_t secure_size) 257 { 258 /* Lock firewall configuration for secure internal RAMs used by pager */ 259 uint32_t query_arg = DECPROT(decprot_id, DECPROT_S_RW, DECPROT_LOCK); 260 struct firewall_query fw_query = { 261 .ctrl = dt_driver_provider_priv_data(fw_provider), 262 .args = &query_arg, 263 .arg_count = 1, 264 }; 265 TEE_Result res = TEE_ERROR_GENERIC; 266 bool is_pager_ram = false; 267 268 #if defined(CFG_WITH_PAGER) 269 is_pager_ram = core_is_buffer_intersect(CFG_TZSRAM_START, 270 CFG_TZSRAM_SIZE, 271 base, secure_size); 272 #endif 273 if (!is_pager_ram) 274 return TEE_SUCCESS; 275 276 res = firewall_set_memory_configuration(&fw_query, base, secure_size); 277 if (res) 278 EMSG("Failed to configure secure SRAM %#"PRIxPA"..%#"PRIxPA, 279 base, base + secure_size); 280 281 return res; 282 } 283 284 static TEE_Result non_secure_scmi_ram(struct dt_driver_provider *fw_provider, 285 unsigned int decprot_id, 286 paddr_t base, size_t size) 287 { 288 /* Do not lock firewall configuration for non-secure internal RAMs */ 289 uint32_t query_arg = DECPROT(decprot_id, DECPROT_NS_RW, DECPROT_UNLOCK); 290 struct firewall_query fw_query = { 291 .ctrl = dt_driver_provider_priv_data(fw_provider), 292 .args = &query_arg, 293 .arg_count = 1, 294 }; 295 TEE_Result res = TEE_ERROR_GENERIC; 296 297 if (!core_is_buffer_intersect(CFG_STM32MP1_SCMI_SHM_BASE, 298 CFG_STM32MP1_SCMI_SHM_SIZE, 299 base, size)) 300 return TEE_SUCCESS; 301 302 res = firewall_set_memory_configuration(&fw_query, base, size); 303 if (res) 304 EMSG("Failed to configure non-secure SRAM %#"PRIxPA"..%#"PRIxPA, 305 base, base + size); 306 307 return res; 308 } 309 310 /* At run time we enforce that SRAM1 to SRAM4 are properly assigned if used */ 311 static void configure_srams(struct dt_driver_provider *fw_provider) 312 { 313 bool error = false; 314 315 if (IS_ENABLED(CFG_WITH_PAGER)) { 316 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM1_ID, 317 SRAM1_BASE, SRAM1_SIZE)) 318 error = true; 319 320 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM2_ID, 321 SRAM2_BASE, SRAM2_SIZE)) 322 error = true; 323 324 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM3_ID, 325 SRAM3_BASE, SRAM3_SIZE)) 326 error = true; 327 328 #if defined(CFG_STM32MP15) 329 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM4_ID, 330 SRAM4_BASE, SRAM4_SIZE)) 331 error = true; 332 #endif 333 } 334 if (CFG_STM32MP1_SCMI_SHM_BASE) { 335 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM1_ID, 336 SRAM1_BASE, SRAM1_SIZE)) 337 error = true; 338 339 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM2_ID, 340 SRAM2_BASE, SRAM2_SIZE)) 341 error = true; 342 343 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM3_ID, 344 SRAM3_BASE, SRAM3_SIZE)) 345 error = true; 346 347 #if defined(CFG_STM32MP15) 348 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM4_ID, 349 SRAM4_BASE, SRAM4_SIZE)) 350 error = true; 351 #endif 352 } 353 354 if (error) 355 panic(); 356 } 357 358 static void configure_sysram(struct dt_driver_provider *fw_provider) 359 { 360 uint32_t query_arg = DECPROT(ETZPC_TZMA1_ID, DECPROT_S_RW, 361 DECPROT_UNLOCK); 362 struct firewall_query firewall = { 363 .ctrl = dt_driver_provider_priv_data(fw_provider), 364 .args = &query_arg, 365 .arg_count = 1, 366 }; 367 TEE_Result res = TEE_ERROR_GENERIC; 368 369 res = firewall_set_memory_configuration(&firewall, SYSRAM_BASE, 370 SYSRAM_SEC_SIZE); 371 if (res) 372 panic("Unable to secure SYSRAM"); 373 374 if (SYSRAM_SIZE > SYSRAM_SEC_SIZE) { 375 size_t nsec_size = SYSRAM_SIZE - SYSRAM_SEC_SIZE; 376 paddr_t nsec_start = SYSRAM_BASE + SYSRAM_SEC_SIZE; 377 uint8_t *va = phys_to_virt(nsec_start, MEM_AREA_IO_NSEC, 378 nsec_size); 379 380 IMSG("Non-secure SYSRAM [%p %p]", va, va + nsec_size - 1); 381 382 /* Clear content from the non-secure part */ 383 memset(va, 0, nsec_size); 384 } 385 } 386 387 static TEE_Result init_late_stm32mp1_drivers(void) 388 { 389 uint32_t __maybe_unused state = 0; 390 391 /* Configure SYSRAM and SRAMx secure hardening */ 392 if (IS_ENABLED(CFG_STM32_ETZPC)) { 393 struct dt_driver_provider *prov = NULL; 394 int node = 0; 395 396 node = fdt_node_offset_by_compatible(get_embedded_dt(), -1, 397 "st,stm32-etzpc"); 398 if (node < 0) 399 panic("Could not get ETZPC node"); 400 401 prov = dt_driver_get_provider_by_node(node, DT_DRIVER_FIREWALL); 402 assert(prov); 403 404 configure_sysram(prov); 405 configure_srams(prov); 406 } 407 408 #ifdef CFG_STM32MP15 409 /* Device in Secure Closed state require RCC secure hardening */ 410 if (stm32_bsec_get_state(&state)) 411 panic(); 412 if (state == BSEC_STATE_SEC_CLOSED && !stm32_rcc_is_secure()) 413 panic("Closed device mandates secure RCC"); 414 #endif 415 416 return TEE_SUCCESS; 417 } 418 419 driver_init_late(init_late_stm32mp1_drivers); 420 421 vaddr_t stm32_rcc_base(void) 422 { 423 static struct io_pa_va base = { .pa = RCC_BASE }; 424 425 return io_pa_or_va_secure(&base, 1); 426 } 427 428 vaddr_t get_gicd_base(void) 429 { 430 struct io_pa_va base = { .pa = GIC_BASE + GICD_OFFSET }; 431 432 return io_pa_or_va_secure(&base, 1); 433 } 434 435 void stm32mp_get_bsec_static_cfg(struct stm32_bsec_static_cfg *cfg) 436 { 437 cfg->base = BSEC_BASE; 438 cfg->upper_start = STM32MP1_UPPER_OTP_START; 439 cfg->max_id = STM32MP1_OTP_MAX_ID; 440 } 441 442 bool __weak stm32mp_with_pmic(void) 443 { 444 return false; 445 } 446 447 uint32_t may_spin_lock(unsigned int *lock) 448 { 449 if (!lock || !cpu_mmu_enabled()) 450 return 0; 451 452 return cpu_spin_lock_xsave(lock); 453 } 454 455 void may_spin_unlock(unsigned int *lock, uint32_t exceptions) 456 { 457 if (!lock || !cpu_mmu_enabled()) 458 return; 459 460 cpu_spin_unlock_xrestore(lock, exceptions); 461 } 462 463 static vaddr_t stm32_tamp_base(void) 464 { 465 static struct io_pa_va base = { .pa = TAMP_BASE }; 466 467 return io_pa_or_va_secure(&base, 1); 468 } 469 470 static vaddr_t bkpreg_base(void) 471 { 472 return stm32_tamp_base() + TAMP_BKP_REGISTER_OFF; 473 } 474 475 vaddr_t stm32mp_bkpreg(unsigned int idx) 476 { 477 return bkpreg_base() + (idx * sizeof(uint32_t)); 478 } 479 480 static bool __maybe_unused bank_is_valid(unsigned int bank) 481 { 482 if (IS_ENABLED(CFG_STM32MP15)) 483 return bank == GPIO_BANK_Z || bank <= GPIO_BANK_K; 484 485 if (IS_ENABLED(CFG_STM32MP13)) 486 return bank <= GPIO_BANK_I; 487 488 panic(); 489 } 490 491 #ifdef CFG_STM32_IWDG 492 TEE_Result stm32_get_iwdg_otp_config(paddr_t pbase, 493 struct stm32_iwdg_otp_data *otp_data) 494 { 495 unsigned int idx = 0; 496 uint32_t otp_id = 0; 497 size_t bit_len = 0; 498 uint8_t bit_offset = 0; 499 uint32_t otp_value = 0; 500 501 switch (pbase) { 502 case IWDG1_BASE: 503 idx = 0; 504 break; 505 case IWDG2_BASE: 506 idx = 1; 507 break; 508 default: 509 panic(); 510 } 511 512 if (stm32_bsec_find_otp_in_nvmem_layout("hw2_otp", &otp_id, &bit_offset, 513 &bit_len) || 514 bit_len != 32 || bit_offset != 0) 515 panic(); 516 517 if (stm32_bsec_read_otp(&otp_value, otp_id)) 518 panic(); 519 520 otp_data->hw_enabled = otp_value & 521 BIT(idx + HW2_OTP_IWDG_HW_ENABLE_SHIFT); 522 otp_data->disable_on_stop = otp_value & 523 BIT(idx + HW2_OTP_IWDG_FZ_STOP_SHIFT); 524 otp_data->disable_on_standby = otp_value & 525 BIT(idx + HW2_OTP_IWDG_FZ_STANDBY_SHIFT); 526 527 return TEE_SUCCESS; 528 } 529 #endif /*CFG_STM32_IWDG*/ 530 531 #ifdef CFG_STM32_DEBUG_ACCESS 532 static TEE_Result init_debug(void) 533 { 534 TEE_Result res = TEE_SUCCESS; 535 uint32_t conf = stm32_bsec_read_debug_conf(); 536 struct clk *dbg_clk = stm32mp_rcc_clock_id_to_clk(CK_DBG); 537 uint32_t state = 0; 538 539 res = stm32_bsec_get_state(&state); 540 if (res) 541 return res; 542 543 if (state != BSEC_STATE_SEC_CLOSED && conf) { 544 if (IS_ENABLED(CFG_INSECURE)) 545 IMSG("WARNING: All debug accesses are allowed"); 546 547 res = stm32_bsec_write_debug_conf(conf | BSEC_DEBUG_ALL); 548 if (res) 549 return res; 550 551 /* 552 * Enable DBG clock as used to access coprocessor 553 * debug registers 554 */ 555 clk_enable(dbg_clk); 556 } 557 558 return TEE_SUCCESS; 559 } 560 early_init_late(init_debug); 561 #endif /* CFG_STM32_DEBUG_ACCESS */ 562 563 /* Some generic resources need to be unpaged */ 564 DECLARE_KEEP_PAGER(pinctrl_apply_state); 565 566 bool stm32mp_allow_probe_shared_device(const void *fdt, int node) 567 { 568 static int uart_console_node = -1; 569 const char *compat = NULL; 570 static bool once; 571 572 if (IS_ENABLED(CFG_STM32_ALLOW_UNSAFE_PROBE)) 573 return true; 574 575 if (!once) { 576 get_console_node_from_dt((void *)fdt, &uart_console_node, 577 NULL, NULL); 578 once = true; 579 } 580 581 compat = fdt_stringlist_get(fdt, node, "compatible", 0, NULL); 582 583 /* 584 * Allow OP-TEE console and MP15 I2C and RNG to be shared 585 * with non-secure world. 586 */ 587 if (node == uart_console_node || 588 !strcmp(compat, "st,stm32mp15-i2c-non-secure") || 589 (!strcmp(compat, "st,stm32-rng") && 590 IS_ENABLED(CFG_WITH_SOFTWARE_PRNG))) 591 return true; 592 593 return false; 594 } 595 596 #if defined(CFG_STM32MP15) && defined(CFG_WITH_PAGER) 597 paddr_t stm32mp1_pa_or_sram_alias_pa(paddr_t pa) 598 { 599 /* 600 * OP-TEE uses the alias physical addresses of SRAM1/2/3/4, 601 * not the standard physical addresses. This choice was initially 602 * driven by pager that needs physically contiguous memories 603 * for internal secure memories. 604 */ 605 if (core_is_buffer_inside(pa, 1, SRAM1_ALT_BASE, SRAM1_SIZE)) 606 pa += SRAM1_BASE - SRAM1_ALT_BASE; 607 else if (core_is_buffer_inside(pa, 1, SRAM2_ALT_BASE, SRAM2_SIZE)) 608 pa += SRAM2_BASE - SRAM2_ALT_BASE; 609 else if (core_is_buffer_inside(pa, 1, SRAM3_ALT_BASE, SRAM3_SIZE)) 610 pa += SRAM3_BASE - SRAM3_ALT_BASE; 611 else if (core_is_buffer_inside(pa, 1, SRAM4_ALT_BASE, SRAM4_SIZE)) 612 pa += SRAM4_BASE - SRAM4_ALT_BASE; 613 614 return pa; 615 } 616 617 bool stm32mp1_ram_intersect_pager_ram(paddr_t base, size_t size) 618 { 619 base = stm32mp1_pa_or_sram_alias_pa(base); 620 621 return core_is_buffer_intersect(base, size, CFG_TZSRAM_START, 622 CFG_TZSRAM_SIZE); 623 } 624 #endif 625