1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2017-2024, STMicroelectronics 4 * Copyright (c) 2016-2018, Linaro Limited 5 */ 6 7 #include <boot_api.h> 8 #include <config.h> 9 #include <console.h> 10 #include <drivers/firewall_device.h> 11 #include <drivers/gic.h> 12 #include <drivers/pinctrl.h> 13 #include <drivers/stm32_bsec.h> 14 #include <drivers/stm32_gpio.h> 15 #include <drivers/stm32_iwdg.h> 16 #include <drivers/stm32_uart.h> 17 #include <drivers/stm32mp_dt_bindings.h> 18 #ifdef CFG_STM32MP15 19 #include <drivers/stm32mp1_rcc.h> 20 #endif 21 #include <io.h> 22 #include <kernel/boot.h> 23 #include <kernel/dt.h> 24 #include <kernel/dt_driver.h> 25 #include <kernel/misc.h> 26 #include <kernel/panic.h> 27 #include <kernel/spinlock.h> 28 #include <kernel/tee_misc.h> 29 #include <libfdt.h> 30 #include <mm/core_memprot.h> 31 #include <platform_config.h> 32 #include <sm/psci.h> 33 #include <stm32_util.h> 34 #include <string.h> 35 #include <trace.h> 36 37 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB1_BASE, APB1_SIZE); 38 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB2_BASE, APB2_SIZE); 39 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB3_BASE, APB3_SIZE); 40 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB4_BASE, APB4_SIZE); 41 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB5_BASE, APB5_SIZE); 42 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, AHB4_BASE, AHB4_SIZE); 43 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, AHB5_BASE, AHB5_SIZE); 44 45 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB1_BASE, APB1_SIZE); 46 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB3_BASE, APB3_SIZE); 47 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB4_BASE, APB4_SIZE); 48 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB5_BASE, APB5_SIZE); 49 #ifdef CFG_STM32MP13 50 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB6_BASE, APB6_SIZE); 51 #endif 52 register_phys_mem_pgdir(MEM_AREA_IO_SEC, AHB4_BASE, AHB4_SIZE); 53 register_phys_mem_pgdir(MEM_AREA_IO_SEC, AHB5_BASE, AHB5_SIZE); 54 register_phys_mem_pgdir(MEM_AREA_IO_SEC, GIC_BASE, GIC_SIZE); 55 56 register_ddr(DDR_BASE, CFG_DRAM_SIZE); 57 58 #define _ID2STR(id) (#id) 59 #define ID2STR(id) _ID2STR(id) 60 61 static TEE_Result platform_banner(void) 62 { 63 IMSG("Platform stm32mp1: flavor %s - DT %s", 64 ID2STR(PLATFORM_FLAVOR), 65 ID2STR(CFG_EMBED_DTB_SOURCE_FILE)); 66 67 return TEE_SUCCESS; 68 } 69 service_init(platform_banner); 70 71 /* 72 * Console 73 * 74 * CFG_STM32_EARLY_CONSOLE_UART specifies the ID of the UART used for 75 * trace console. Value 0 disables the early console. 76 * 77 * We cannot use the generic serial_console support since probing 78 * the console requires the platform clock driver to be already 79 * up and ready which is done only once service_init are completed. 80 */ 81 static struct stm32_uart_pdata console_data; 82 83 void plat_console_init(void) 84 { 85 /* Early console initialization before MMU setup */ 86 struct uart { 87 paddr_t pa; 88 } uarts[] = { 89 [0] = { .pa = 0 }, 90 [1] = { .pa = USART1_BASE }, 91 [2] = { .pa = USART2_BASE }, 92 [3] = { .pa = USART3_BASE }, 93 [4] = { .pa = UART4_BASE }, 94 [5] = { .pa = UART5_BASE }, 95 [6] = { .pa = USART6_BASE }, 96 [7] = { .pa = UART7_BASE }, 97 [8] = { .pa = UART8_BASE }, 98 }; 99 100 COMPILE_TIME_ASSERT(ARRAY_SIZE(uarts) > CFG_STM32_EARLY_CONSOLE_UART); 101 102 if (!uarts[CFG_STM32_EARLY_CONSOLE_UART].pa) 103 return; 104 105 /* No clock yet bound to the UART console */ 106 console_data.clock = NULL; 107 108 stm32_uart_init(&console_data, uarts[CFG_STM32_EARLY_CONSOLE_UART].pa); 109 110 register_serial_console(&console_data.chip); 111 112 IMSG("Early console on UART#%u", CFG_STM32_EARLY_CONSOLE_UART); 113 } 114 115 static TEE_Result init_console_from_dt(void) 116 { 117 struct stm32_uart_pdata *pd = NULL; 118 void *fdt = NULL; 119 int node = 0; 120 TEE_Result res = TEE_ERROR_GENERIC; 121 122 fdt = get_embedded_dt(); 123 res = get_console_node_from_dt(fdt, &node, NULL, NULL); 124 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 125 fdt = get_external_dt(); 126 res = get_console_node_from_dt(fdt, &node, NULL, NULL); 127 if (res == TEE_ERROR_ITEM_NOT_FOUND) 128 return TEE_SUCCESS; 129 if (res != TEE_SUCCESS) 130 return res; 131 } 132 133 pd = stm32_uart_init_from_dt_node(fdt, node); 134 if (!pd) { 135 IMSG("DTB disables console"); 136 register_serial_console(NULL); 137 return TEE_SUCCESS; 138 } 139 140 /* Replace early console with the new one */ 141 console_flush(); 142 console_data = *pd; 143 register_serial_console(&console_data.chip); 144 IMSG("DTB enables console"); 145 free(pd); 146 147 return TEE_SUCCESS; 148 } 149 150 /* Probe console from DT once clock inits (service init level) are completed */ 151 service_init_late(init_console_from_dt); 152 153 static uintptr_t stm32_dbgmcu_base(void) 154 { 155 static void *va; 156 157 if (!cpu_mmu_enabled()) 158 return DBGMCU_BASE; 159 160 if (!va) 161 va = phys_to_virt(DBGMCU_BASE, MEM_AREA_IO_SEC, 1); 162 163 return (uintptr_t)va; 164 } 165 166 /* SoC device ID util, returns default ID if can't access DBGMCU */ 167 TEE_Result stm32mp1_dbgmcu_get_chip_dev_id(uint32_t *chip_dev_id) 168 { 169 uint32_t id = STM32MP1_CHIP_ID; 170 171 assert(chip_dev_id); 172 173 if (stm32_bsec_read_debug_conf() & BSEC_DBGSWGEN) 174 id = io_read32(stm32_dbgmcu_base() + DBGMCU_IDC) & 175 DBGMCU_IDC_DEV_ID_MASK; 176 177 *chip_dev_id = id; 178 179 return TEE_SUCCESS; 180 } 181 182 /* 183 * GIC init, used also for primary/secondary boot core wake completion 184 */ 185 void boot_primary_init_intc(void) 186 { 187 gic_init(GIC_BASE + GICC_OFFSET, GIC_BASE + GICD_OFFSET); 188 189 stm32mp_register_online_cpu(); 190 } 191 192 void boot_secondary_init_intc(void) 193 { 194 gic_init_per_cpu(); 195 196 stm32mp_register_online_cpu(); 197 } 198 199 #ifdef CFG_STM32MP15 200 /* 201 * This concerns OP-TEE pager for STM32MP1 to use secure internal 202 * RAMs to execute. TZSRAM refers the TZSRAM_BASE/TZSRAM_SIZE 203 * used in boot.c to locate secure unpaged memory. 204 * 205 * STM32MP15 variants embed 640kB of contiguous securable SRAMs 206 * 207 * +--------------+ <-- SYSRAM_BASE 208 * | | lower part can be assigned to secure world 209 * | SYSRAM 256kB | 4kB granule boundary 210 * | | upper part can be assigned to secure world 211 * +--------------+ <-- SRAM1_BASE (= SYSRAM_BASE + SYSRAM_SIZE) 212 | | full range assigned to non-secure world or 213 * | SRAM1 128kB | to secure world, or to- Cortex-M4 exclusive access 214 * +--------------+ <-- SRAM2_BASE (= SRAM1_BASE + SRAM1_SIZE) 215 | | full range assigned to non-secure world or 216 * | SRAM2 128kB | to secure world, or to- Cortex-M4 exclusive access 217 * +--------------+ <-- SRAM3_BASE (= SRAM2_BASE + SRAM2_SIZE) 218 | | full range assigned to non-secure world or 219 * | SRAM3 64kB | to secure world, or to- Cortex-M4 exclusive access 220 * +--------------+ <-- SRAM4_BASE (= SRAM3_BASE + SRAM3_SIZE) 221 | | full range assigned to non-secure world or 222 * | SRAM4 64kB | to secure world, or to- Cortex-M4 exclusive access 223 * +--------------+ <-- SRAM4_BASE + SRAM4_SIZE 224 * 225 * If SRAMx memories are not used for the companion Cortex-M4 226 * processor, OP-TEE can use this memory. 227 * 228 * SYSRAM configuration for secure/non-secure boundaries requires the 229 * secure SYSRAM memory to start at the SYSRAM physical base address and grow 230 * from there while the non-secure SYSRAM range lies at SYSRAM end addresses 231 * with a 4KB page granule. 232 * 233 * SRAM1, SRAM2, SRAM3 and SRAM4 are independently assigned to secure world, 234 * to non-secure world or possibly to Cortex-M4 exclusive access. Each 235 * assignment covers the full related SRAMx memory range. 236 * 237 * Using non-secure SYSRAM or one of the SRAMx for SCMI message communication 238 * can be done using CFG_STM32MP1_SCMI_SHM_BASE/CFG_STM32MP1_SCMI_SHM_SIZE. 239 * This imposes related memory area is assigned to non-secure world. 240 241 * Using secure internal memories (SYSRAM and/or some SRAMx) with STM32MP15 242 * shall meet this constraints known the TZSRAM physical memory range shall 243 * be contiguous. 244 */ 245 246 #define SYSRAM_END (SYSRAM_BASE + SYSRAM_SIZE) 247 #define SYSRAM_SEC_END (SYSRAM_BASE + SYSRAM_SEC_SIZE) 248 #define SRAMS_END (SRAM4_BASE + SRAM4_SIZE) 249 #define SRAMS_START SRAM1_BASE 250 #define TZSRAM_END (CFG_TZSRAM_START + CFG_TZSRAM_SIZE) 251 252 #define TZSRAM_FITS_IN_SYSRAM_SEC ((CFG_TZSRAM_START >= SYSRAM_BASE) && \ 253 (TZSRAM_END <= SYSRAM_SEC_END)) 254 255 #define TZSRAM_FITS_IN_SYSRAM_AND_SRAMS ((CFG_TZSRAM_START >= SYSRAM_BASE) && \ 256 (CFG_TZSRAM_START < SYSRAM_END) && \ 257 (TZSRAM_END > SYSRAM_END) && \ 258 (TZSRAM_END <= SRAMS_END) && \ 259 (SYSRAM_SIZE == SYSRAM_SEC_SIZE)) 260 261 #define TZSRAM_FITS_IN_SRAMS ((CFG_TZSRAM_START >= SRAMS_START) && \ 262 (CFG_TZSRAM_START < SRAMS_END) && \ 263 (TZSRAM_END <= SRAMS_END)) 264 265 #define TZSRAM_IS_IN_DRAM (CFG_TZSRAM_START >= CFG_DRAM_BASE) 266 267 #ifdef CFG_WITH_PAGER 268 /* 269 * At build time, we enforce that, when pager is used, 270 * either TZSRAM fully fits inside SYSRAM secure address range, 271 * or TZSRAM fully fits inside the full SYSRAM and spread inside SRAMx orderly, 272 * or TZSRAM fully fits some inside SRAMs address range, 273 * or TZSRAM is in DDR for debug and test purpose. 274 */ 275 static_assert(TZSRAM_FITS_IN_SYSRAM_SEC || TZSRAM_FITS_IN_SYSRAM_AND_SRAMS || 276 TZSRAM_FITS_IN_SRAMS || TZSRAM_IS_IN_DRAM); 277 #endif /* CFG_WITH_PAGER */ 278 #endif /* CFG_STM32MP15 */ 279 280 static TEE_Result secure_pager_ram(struct dt_driver_provider *fw_provider, 281 unsigned int decprot_id, 282 paddr_t base, size_t secure_size) 283 { 284 /* Lock firewall configuration for secure internal RAMs used by pager */ 285 uint32_t query_arg = DECPROT(decprot_id, DECPROT_S_RW, DECPROT_LOCK); 286 struct firewall_query fw_query = { 287 .ctrl = dt_driver_provider_priv_data(fw_provider), 288 .args = &query_arg, 289 .arg_count = 1, 290 }; 291 TEE_Result res = TEE_ERROR_GENERIC; 292 bool is_pager_ram = false; 293 294 #if defined(CFG_WITH_PAGER) 295 is_pager_ram = core_is_buffer_intersect(CFG_TZSRAM_START, 296 CFG_TZSRAM_SIZE, 297 base, secure_size); 298 #endif 299 if (!is_pager_ram) 300 return TEE_SUCCESS; 301 302 res = firewall_set_memory_configuration(&fw_query, base, secure_size); 303 if (res) 304 EMSG("Failed to configure secure SRAM %#"PRIxPA"..%#"PRIxPA, 305 base, base + secure_size); 306 307 return res; 308 } 309 310 static TEE_Result non_secure_scmi_ram(struct dt_driver_provider *fw_provider, 311 unsigned int decprot_id, 312 paddr_t base, size_t size) 313 { 314 /* Do not lock firewall configuration for non-secure internal RAMs */ 315 uint32_t query_arg = DECPROT(decprot_id, DECPROT_NS_RW, DECPROT_UNLOCK); 316 struct firewall_query fw_query = { 317 .ctrl = dt_driver_provider_priv_data(fw_provider), 318 .args = &query_arg, 319 .arg_count = 1, 320 }; 321 TEE_Result res = TEE_ERROR_GENERIC; 322 323 if (!core_is_buffer_intersect(CFG_STM32MP1_SCMI_SHM_BASE, 324 CFG_STM32MP1_SCMI_SHM_SIZE, 325 base, size)) 326 return TEE_SUCCESS; 327 328 res = firewall_set_memory_configuration(&fw_query, base, size); 329 if (res) 330 EMSG("Failed to configure non-secure SRAM %#"PRIxPA"..%#"PRIxPA, 331 base, base + size); 332 333 return res; 334 } 335 336 /* At run time we enforce that SRAM1 to SRAM4 are properly assigned if used */ 337 static void configure_srams(struct dt_driver_provider *fw_provider) 338 { 339 bool error = false; 340 341 if (IS_ENABLED(CFG_WITH_PAGER)) { 342 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM1_ID, 343 SRAM1_BASE, SRAM1_SIZE)) 344 error = true; 345 346 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM2_ID, 347 SRAM2_BASE, SRAM2_SIZE)) 348 error = true; 349 350 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM3_ID, 351 SRAM3_BASE, SRAM3_SIZE)) 352 error = true; 353 354 #if defined(CFG_STM32MP15) 355 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM4_ID, 356 SRAM4_BASE, SRAM4_SIZE)) 357 error = true; 358 #endif 359 } 360 if (CFG_STM32MP1_SCMI_SHM_BASE) { 361 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM1_ID, 362 SRAM1_BASE, SRAM1_SIZE)) 363 error = true; 364 365 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM2_ID, 366 SRAM2_BASE, SRAM2_SIZE)) 367 error = true; 368 369 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM3_ID, 370 SRAM3_BASE, SRAM3_SIZE)) 371 error = true; 372 373 #if defined(CFG_STM32MP15) 374 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM4_ID, 375 SRAM4_BASE, SRAM4_SIZE)) 376 error = true; 377 #endif 378 } 379 380 if (error) 381 panic(); 382 } 383 384 static void configure_sysram(struct dt_driver_provider *fw_provider) 385 { 386 uint32_t query_arg = DECPROT(ETZPC_TZMA1_ID, DECPROT_S_RW, 387 DECPROT_UNLOCK); 388 struct firewall_query firewall = { 389 .ctrl = dt_driver_provider_priv_data(fw_provider), 390 .args = &query_arg, 391 .arg_count = 1, 392 }; 393 TEE_Result res = TEE_ERROR_GENERIC; 394 395 res = firewall_set_memory_configuration(&firewall, SYSRAM_BASE, 396 SYSRAM_SEC_SIZE); 397 if (res) 398 panic("Unable to secure SYSRAM"); 399 400 if (SYSRAM_SIZE > SYSRAM_SEC_SIZE) { 401 size_t nsec_size = SYSRAM_SIZE - SYSRAM_SEC_SIZE; 402 paddr_t nsec_start = SYSRAM_BASE + SYSRAM_SEC_SIZE; 403 uint8_t *va = phys_to_virt(nsec_start, MEM_AREA_IO_NSEC, 404 nsec_size); 405 406 IMSG("Non-secure SYSRAM [%p %p]", va, va + nsec_size - 1); 407 408 /* Clear content from the non-secure part */ 409 memset(va, 0, nsec_size); 410 } 411 } 412 413 static TEE_Result init_late_stm32mp1_drivers(void) 414 { 415 uint32_t __maybe_unused state = 0; 416 417 /* Configure SYSRAM and SRAMx secure hardening */ 418 if (IS_ENABLED(CFG_STM32_ETZPC)) { 419 struct dt_driver_provider *prov = NULL; 420 int node = 0; 421 422 node = fdt_node_offset_by_compatible(get_embedded_dt(), -1, 423 "st,stm32-etzpc"); 424 if (node < 0) 425 panic("Could not get ETZPC node"); 426 427 prov = dt_driver_get_provider_by_node(node, DT_DRIVER_FIREWALL); 428 assert(prov); 429 430 configure_sysram(prov); 431 configure_srams(prov); 432 } 433 434 #ifdef CFG_STM32MP15 435 /* Device in Secure Closed state require RCC secure hardening */ 436 if (stm32_bsec_get_state(&state)) 437 panic(); 438 if (state == BSEC_STATE_SEC_CLOSED && !stm32_rcc_is_secure()) 439 panic("Closed device mandates secure RCC"); 440 #endif 441 442 return TEE_SUCCESS; 443 } 444 445 driver_init_late(init_late_stm32mp1_drivers); 446 447 vaddr_t stm32_rcc_base(void) 448 { 449 static struct io_pa_va base = { .pa = RCC_BASE }; 450 451 return io_pa_or_va_secure(&base, 1); 452 } 453 454 vaddr_t get_gicd_base(void) 455 { 456 struct io_pa_va base = { .pa = GIC_BASE + GICD_OFFSET }; 457 458 return io_pa_or_va_secure(&base, 1); 459 } 460 461 void stm32mp_get_bsec_static_cfg(struct stm32_bsec_static_cfg *cfg) 462 { 463 cfg->base = BSEC_BASE; 464 cfg->upper_start = STM32MP1_UPPER_OTP_START; 465 cfg->max_id = STM32MP1_OTP_MAX_ID; 466 } 467 468 bool __weak stm32mp_with_pmic(void) 469 { 470 return false; 471 } 472 473 uint32_t may_spin_lock(unsigned int *lock) 474 { 475 if (!lock || !cpu_mmu_enabled()) 476 return 0; 477 478 return cpu_spin_lock_xsave(lock); 479 } 480 481 void may_spin_unlock(unsigned int *lock, uint32_t exceptions) 482 { 483 if (!lock || !cpu_mmu_enabled()) 484 return; 485 486 cpu_spin_unlock_xrestore(lock, exceptions); 487 } 488 489 static vaddr_t stm32_tamp_base(void) 490 { 491 static struct io_pa_va base = { .pa = TAMP_BASE }; 492 493 return io_pa_or_va_secure(&base, 1); 494 } 495 496 static vaddr_t bkpreg_base(void) 497 { 498 return stm32_tamp_base() + TAMP_BKP_REGISTER_OFF; 499 } 500 501 vaddr_t stm32mp_bkpreg(unsigned int idx) 502 { 503 return bkpreg_base() + (idx * sizeof(uint32_t)); 504 } 505 506 static bool __maybe_unused bank_is_valid(unsigned int bank) 507 { 508 if (IS_ENABLED(CFG_STM32MP15)) 509 return bank == GPIO_BANK_Z || bank <= GPIO_BANK_K; 510 511 if (IS_ENABLED(CFG_STM32MP13)) 512 return bank <= GPIO_BANK_I; 513 514 panic(); 515 } 516 517 #ifdef CFG_STM32_IWDG 518 TEE_Result stm32_get_iwdg_otp_config(paddr_t pbase, 519 struct stm32_iwdg_otp_data *otp_data) 520 { 521 unsigned int idx = 0; 522 uint32_t otp_id = 0; 523 size_t bit_len = 0; 524 uint8_t bit_offset = 0; 525 uint32_t otp_value = 0; 526 527 switch (pbase) { 528 case IWDG1_BASE: 529 idx = 0; 530 break; 531 case IWDG2_BASE: 532 idx = 1; 533 break; 534 default: 535 panic(); 536 } 537 538 if (stm32_bsec_find_otp_in_nvmem_layout("hw2_otp", &otp_id, &bit_offset, 539 &bit_len) || 540 bit_len != 32 || bit_offset != 0) 541 panic(); 542 543 if (stm32_bsec_read_otp(&otp_value, otp_id)) 544 panic(); 545 546 otp_data->hw_enabled = otp_value & 547 BIT(idx + HW2_OTP_IWDG_HW_ENABLE_SHIFT); 548 otp_data->disable_on_stop = otp_value & 549 BIT(idx + HW2_OTP_IWDG_FZ_STOP_SHIFT); 550 otp_data->disable_on_standby = otp_value & 551 BIT(idx + HW2_OTP_IWDG_FZ_STANDBY_SHIFT); 552 553 return TEE_SUCCESS; 554 } 555 #endif /*CFG_STM32_IWDG*/ 556 557 #ifdef CFG_STM32_DEBUG_ACCESS 558 static TEE_Result init_debug(void) 559 { 560 TEE_Result res = TEE_SUCCESS; 561 uint32_t conf = stm32_bsec_read_debug_conf(); 562 struct clk *dbg_clk = stm32mp_rcc_clock_id_to_clk(CK_DBG); 563 uint32_t state = 0; 564 565 res = stm32_bsec_get_state(&state); 566 if (res) 567 return res; 568 569 if (state != BSEC_STATE_SEC_CLOSED && conf) { 570 if (IS_ENABLED(CFG_INSECURE)) 571 IMSG("WARNING: All debug accesses are allowed"); 572 573 res = stm32_bsec_write_debug_conf(conf | BSEC_DEBUG_ALL); 574 if (res) 575 return res; 576 577 /* 578 * Enable DBG clock as used to access coprocessor 579 * debug registers 580 */ 581 clk_enable(dbg_clk); 582 } 583 584 return TEE_SUCCESS; 585 } 586 early_init_late(init_debug); 587 #endif /* CFG_STM32_DEBUG_ACCESS */ 588 589 /* Some generic resources need to be unpaged */ 590 DECLARE_KEEP_PAGER(pinctrl_apply_state); 591 592 bool stm32mp_allow_probe_shared_device(const void *fdt, int node) 593 { 594 static int uart_console_node = -1; 595 const char *compat = NULL; 596 static bool once; 597 598 if (IS_ENABLED(CFG_STM32_ALLOW_UNSAFE_PROBE)) 599 return true; 600 601 if (!once) { 602 get_console_node_from_dt((void *)fdt, &uart_console_node, 603 NULL, NULL); 604 once = true; 605 } 606 607 compat = fdt_stringlist_get(fdt, node, "compatible", 0, NULL); 608 609 /* 610 * Allow OP-TEE console and MP15 I2C and RNG to be shared 611 * with non-secure world. 612 */ 613 if (node == uart_console_node || 614 !strcmp(compat, "st,stm32mp15-i2c-non-secure") || 615 (!strcmp(compat, "st,stm32-rng") && 616 IS_ENABLED(CFG_WITH_SOFTWARE_PRNG))) 617 return true; 618 619 return false; 620 } 621 622 #if defined(CFG_STM32MP15) && defined(CFG_WITH_PAGER) 623 paddr_t stm32mp1_pa_or_sram_alias_pa(paddr_t pa) 624 { 625 /* 626 * OP-TEE uses the alias physical addresses of SRAM1/2/3/4, 627 * not the standard physical addresses. This choice was initially 628 * driven by pager that needs physically contiguous memories 629 * for internal secure memories. 630 */ 631 if (core_is_buffer_inside(pa, 1, SRAM1_ALT_BASE, SRAM1_SIZE)) 632 pa += SRAM1_BASE - SRAM1_ALT_BASE; 633 else if (core_is_buffer_inside(pa, 1, SRAM2_ALT_BASE, SRAM2_SIZE)) 634 pa += SRAM2_BASE - SRAM2_ALT_BASE; 635 else if (core_is_buffer_inside(pa, 1, SRAM3_ALT_BASE, SRAM3_SIZE)) 636 pa += SRAM3_BASE - SRAM3_ALT_BASE; 637 else if (core_is_buffer_inside(pa, 1, SRAM4_ALT_BASE, SRAM4_SIZE)) 638 pa += SRAM4_BASE - SRAM4_ALT_BASE; 639 640 return pa; 641 } 642 643 bool stm32mp1_ram_intersect_pager_ram(paddr_t base, size_t size) 644 { 645 base = stm32mp1_pa_or_sram_alias_pa(base); 646 647 return core_is_buffer_intersect(base, size, CFG_TZSRAM_START, 648 CFG_TZSRAM_SIZE); 649 } 650 #endif 651 652 static TEE_Result get_chip_dev_id(uint32_t *dev_id) 653 { 654 #ifdef CFG_STM32MP13 655 *dev_id = stm32mp_syscfg_get_chip_dev_id(); 656 return TEE_SUCCESS; 657 #else /* assume CFG_STM32MP15 */ 658 return stm32mp1_dbgmcu_get_chip_dev_id(dev_id); 659 #endif 660 } 661 662 static TEE_Result get_part_number(uint32_t *part_nb) 663 { 664 static uint32_t part_number; 665 uint32_t dev_id = 0; 666 uint32_t otp = 0; 667 size_t bit_len = 0; 668 TEE_Result res = TEE_ERROR_GENERIC; 669 670 assert(part_nb); 671 672 if (part_number) { 673 *part_nb = part_number; 674 return TEE_SUCCESS; 675 } 676 677 res = get_chip_dev_id(&dev_id); 678 if (res) 679 return res; 680 681 res = stm32_bsec_find_otp_in_nvmem_layout("part_number_otp", 682 &otp, NULL, &bit_len); 683 if (res) 684 return res; 685 686 res = stm32_bsec_read_otp(&part_number, otp); 687 if (res) 688 return res; 689 690 assert(bit_len < 16); 691 part_number = (part_number & GENMASK_32(bit_len, 0)) | 692 SHIFT_U32(dev_id, 16); 693 694 *part_nb = part_number; 695 696 return TEE_SUCCESS; 697 } 698 699 bool stm32mp_supports_cpu_opp(uint32_t opp_id) 700 { 701 uint32_t part_number = 0; 702 uint32_t id = 0; 703 704 if (get_part_number(&part_number)) { 705 DMSG("Cannot get part number"); 706 panic(); 707 } 708 709 switch (part_number) { 710 case STM32MP135F_PART_NB: 711 case STM32MP135D_PART_NB: 712 case STM32MP133F_PART_NB: 713 case STM32MP133D_PART_NB: 714 case STM32MP131F_PART_NB: 715 case STM32MP131D_PART_NB: 716 case STM32MP157F_PART_NB: 717 case STM32MP157D_PART_NB: 718 case STM32MP153F_PART_NB: 719 case STM32MP153D_PART_NB: 720 case STM32MP151F_PART_NB: 721 case STM32MP151D_PART_NB: 722 id = BIT(1); 723 break; 724 default: 725 id = BIT(0); 726 break; 727 } 728 729 return opp_id & id; 730 } 731 732 bool stm32mp_supports_hw_cryp(void) 733 { 734 uint32_t part_number = 0; 735 736 if (!IS_ENABLED(CFG_STM32_CRYP)) 737 return false; 738 739 if (get_part_number(&part_number)) { 740 DMSG("Cannot get part number"); 741 panic(); 742 } 743 744 switch (part_number) { 745 case STM32MP135F_PART_NB: 746 case STM32MP135C_PART_NB: 747 case STM32MP133F_PART_NB: 748 case STM32MP133C_PART_NB: 749 case STM32MP131F_PART_NB: 750 case STM32MP131C_PART_NB: 751 return true; 752 case STM32MP157F_PART_NB: 753 case STM32MP157C_PART_NB: 754 case STM32MP153F_PART_NB: 755 case STM32MP153C_PART_NB: 756 case STM32MP151F_PART_NB: 757 case STM32MP151C_PART_NB: 758 return true; 759 default: 760 return false; 761 } 762 } 763 764 bool stm32mp_supports_second_core(void) 765 { 766 uint32_t part_number = 0; 767 768 if (CFG_TEE_CORE_NB_CORE == 1) 769 return false; 770 771 if (get_part_number(&part_number)) { 772 DMSG("Cannot get part number"); 773 panic(); 774 } 775 776 switch (part_number) { 777 case STM32MP151F_PART_NB: 778 case STM32MP151D_PART_NB: 779 case STM32MP151C_PART_NB: 780 case STM32MP151A_PART_NB: 781 return false; 782 default: 783 return true; 784 } 785 } 786