1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2017-2025, STMicroelectronics 4 * Copyright (c) 2016-2018, Linaro Limited 5 */ 6 7 #include <boot_api.h> 8 #include <config.h> 9 #include <console.h> 10 #include <drivers/firewall_device.h> 11 #include <drivers/gic.h> 12 #include <drivers/rstctrl.h> 13 #include <drivers/pinctrl.h> 14 #include <drivers/stm32_bsec.h> 15 #include <drivers/stm32_gpio.h> 16 #include <drivers/stm32_uart.h> 17 #include <drivers/stm32mp_dt_bindings.h> 18 #ifdef CFG_STM32MP15 19 #include <drivers/stm32mp1_rcc.h> 20 #endif 21 #include <io.h> 22 #include <kernel/boot.h> 23 #include <kernel/dt.h> 24 #include <kernel/dt_driver.h> 25 #include <kernel/misc.h> 26 #include <kernel/panic.h> 27 #include <kernel/spinlock.h> 28 #include <kernel/tee_misc.h> 29 #include <libfdt.h> 30 #include <mm/core_memprot.h> 31 #include <platform_config.h> 32 #include <sm/psci.h> 33 #include <stm32_util.h> 34 #include <string.h> 35 #include <trace.h> 36 37 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB1_BASE, APB1_SIZE); 38 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB2_BASE, APB2_SIZE); 39 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB3_BASE, APB3_SIZE); 40 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB4_BASE, APB4_SIZE); 41 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, APB5_BASE, APB5_SIZE); 42 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, AHB4_BASE, AHB4_SIZE); 43 register_phys_mem_pgdir(MEM_AREA_IO_NSEC, AHB5_BASE, AHB5_SIZE); 44 45 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB1_BASE, APB1_SIZE); 46 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB3_BASE, APB3_SIZE); 47 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB4_BASE, APB4_SIZE); 48 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB5_BASE, APB5_SIZE); 49 #ifdef CFG_STM32MP13 50 register_phys_mem_pgdir(MEM_AREA_IO_SEC, APB6_BASE, APB6_SIZE); 51 #endif 52 register_phys_mem_pgdir(MEM_AREA_IO_SEC, AHB4_BASE, AHB4_SIZE); 53 register_phys_mem_pgdir(MEM_AREA_IO_SEC, AHB5_BASE, AHB5_SIZE); 54 register_phys_mem_pgdir(MEM_AREA_IO_SEC, GIC_BASE, GIC_SIZE); 55 56 register_ddr(DDR_BASE, CFG_DRAM_SIZE); 57 58 #define _ID2STR(id) (#id) 59 #define ID2STR(id) _ID2STR(id) 60 61 static TEE_Result platform_banner(void) 62 { 63 IMSG("Platform stm32mp1: flavor %s - DT %s", 64 ID2STR(PLATFORM_FLAVOR), 65 ID2STR(CFG_EMBED_DTB_SOURCE_FILE)); 66 67 return TEE_SUCCESS; 68 } 69 service_init(platform_banner); 70 71 /* 72 * Console 73 * 74 * CFG_STM32_EARLY_CONSOLE_UART specifies the ID of the UART used for 75 * trace console. Value 0 disables the early console. 76 * 77 * We cannot use the generic serial_console support since probing 78 * the console requires the platform clock driver to be already 79 * up and ready which is done only once service_init are completed. 80 */ 81 static struct stm32_uart_pdata console_data; 82 83 void plat_console_init(void) 84 { 85 /* Early console initialization before MMU setup */ 86 struct uart { 87 paddr_t pa; 88 } uarts[] = { 89 [0] = { .pa = 0 }, 90 [1] = { .pa = USART1_BASE }, 91 [2] = { .pa = USART2_BASE }, 92 [3] = { .pa = USART3_BASE }, 93 [4] = { .pa = UART4_BASE }, 94 [5] = { .pa = UART5_BASE }, 95 [6] = { .pa = USART6_BASE }, 96 [7] = { .pa = UART7_BASE }, 97 [8] = { .pa = UART8_BASE }, 98 }; 99 100 COMPILE_TIME_ASSERT(ARRAY_SIZE(uarts) > CFG_STM32_EARLY_CONSOLE_UART); 101 102 if (!uarts[CFG_STM32_EARLY_CONSOLE_UART].pa) 103 return; 104 105 /* No clock yet bound to the UART console */ 106 console_data.clock = NULL; 107 108 stm32_uart_init(&console_data, uarts[CFG_STM32_EARLY_CONSOLE_UART].pa); 109 110 register_serial_console(&console_data.chip); 111 112 IMSG("Early console on UART#%u", CFG_STM32_EARLY_CONSOLE_UART); 113 } 114 115 static TEE_Result init_console_from_dt(void) 116 { 117 struct stm32_uart_pdata *pd = NULL; 118 void *fdt = NULL; 119 int node = 0; 120 TEE_Result res = TEE_ERROR_GENERIC; 121 122 fdt = get_embedded_dt(); 123 res = get_console_node_from_dt(fdt, &node, NULL, NULL); 124 if (res == TEE_ERROR_ITEM_NOT_FOUND) { 125 fdt = get_external_dt(); 126 res = get_console_node_from_dt(fdt, &node, NULL, NULL); 127 if (res == TEE_ERROR_ITEM_NOT_FOUND) 128 return TEE_SUCCESS; 129 if (res != TEE_SUCCESS) 130 return res; 131 } 132 133 pd = stm32_uart_init_from_dt_node(fdt, node); 134 if (!pd) { 135 IMSG("DTB disables console"); 136 register_serial_console(NULL); 137 return TEE_SUCCESS; 138 } 139 140 /* Replace early console with the new one */ 141 console_flush(); 142 console_data = *pd; 143 register_serial_console(&console_data.chip); 144 IMSG("DTB enables console"); 145 free(pd); 146 147 return TEE_SUCCESS; 148 } 149 150 /* Probe console from DT once clock inits (service init level) are completed */ 151 service_init_late(init_console_from_dt); 152 153 static uintptr_t stm32_dbgmcu_base(void) 154 { 155 static void *va; 156 157 if (!cpu_mmu_enabled()) 158 return DBGMCU_BASE; 159 160 if (!va) 161 va = phys_to_virt(DBGMCU_BASE, MEM_AREA_IO_SEC, 1); 162 163 return (uintptr_t)va; 164 } 165 166 /* SoC device ID util, returns default ID if can't access DBGMCU */ 167 TEE_Result stm32mp1_dbgmcu_get_chip_dev_id(uint32_t *chip_dev_id) 168 { 169 uint32_t id = STM32MP1_CHIP_ID; 170 171 assert(chip_dev_id); 172 173 if (stm32_bsec_read_debug_conf() & BSEC_DBGSWGEN) 174 id = io_read32(stm32_dbgmcu_base() + DBGMCU_IDC) & 175 DBGMCU_IDC_DEV_ID_MASK; 176 177 *chip_dev_id = id; 178 179 return TEE_SUCCESS; 180 } 181 182 /* 183 * GIC init, used also for primary/secondary boot core wake completion 184 */ 185 void boot_primary_init_intc(void) 186 { 187 gic_init(GIC_BASE + GICC_OFFSET, GIC_BASE + GICD_OFFSET); 188 189 stm32mp_register_online_cpu(); 190 } 191 192 void boot_secondary_init_intc(void) 193 { 194 gic_init_per_cpu(); 195 196 stm32mp_register_online_cpu(); 197 } 198 199 #ifdef CFG_STM32MP15 200 /* 201 * This concerns OP-TEE pager for STM32MP1 to use secure internal 202 * RAMs to execute. TZSRAM refers the TZSRAM_BASE/TZSRAM_SIZE 203 * used in boot.c to locate secure unpaged memory. 204 * 205 * STM32MP15 variants embed 640kB of contiguous securable SRAMs 206 * 207 * +--------------+ <-- SYSRAM_BASE 208 * | | lower part can be assigned to secure world 209 * | SYSRAM 256kB | 4kB granule boundary 210 * | | upper part can be assigned to secure world 211 * +--------------+ <-- SRAM1_BASE (= SYSRAM_BASE + SYSRAM_SIZE) 212 | | full range assigned to non-secure world or 213 * | SRAM1 128kB | to secure world, or to- Cortex-M4 exclusive access 214 * +--------------+ <-- SRAM2_BASE (= SRAM1_BASE + SRAM1_SIZE) 215 | | full range assigned to non-secure world or 216 * | SRAM2 128kB | to secure world, or to- Cortex-M4 exclusive access 217 * +--------------+ <-- SRAM3_BASE (= SRAM2_BASE + SRAM2_SIZE) 218 | | full range assigned to non-secure world or 219 * | SRAM3 64kB | to secure world, or to- Cortex-M4 exclusive access 220 * +--------------+ <-- SRAM4_BASE (= SRAM3_BASE + SRAM3_SIZE) 221 | | full range assigned to non-secure world or 222 * | SRAM4 64kB | to secure world, or to- Cortex-M4 exclusive access 223 * +--------------+ <-- SRAM4_BASE + SRAM4_SIZE 224 * 225 * If SRAMx memories are not used for the companion Cortex-M4 226 * processor, OP-TEE can use this memory. 227 * 228 * SYSRAM configuration for secure/non-secure boundaries requires the 229 * secure SYSRAM memory to start at the SYSRAM physical base address and grow 230 * from there while the non-secure SYSRAM range lies at SYSRAM end addresses 231 * with a 4KB page granule. 232 * 233 * SRAM1, SRAM2, SRAM3 and SRAM4 are independently assigned to secure world, 234 * to non-secure world or possibly to Cortex-M4 exclusive access. Each 235 * assignment covers the full related SRAMx memory range. 236 * 237 * Using non-secure SYSRAM or one of the SRAMx for SCMI message communication 238 * can be done using CFG_STM32MP1_SCMI_SHM_BASE/CFG_STM32MP1_SCMI_SHM_SIZE. 239 * This imposes related memory area is assigned to non-secure world. 240 241 * Using secure internal memories (SYSRAM and/or some SRAMx) with STM32MP15 242 * shall meet this constraints known the TZSRAM physical memory range shall 243 * be contiguous. 244 */ 245 246 #define SYSRAM_END (SYSRAM_BASE + SYSRAM_SIZE) 247 #define SYSRAM_SEC_END (SYSRAM_BASE + SYSRAM_SEC_SIZE) 248 #define SRAMS_END (SRAM4_BASE + SRAM4_SIZE) 249 #define SRAMS_START SRAM1_BASE 250 #define TZSRAM_END (CFG_TZSRAM_START + CFG_TZSRAM_SIZE) 251 252 #define TZSRAM_FITS_IN_SYSRAM_SEC ((CFG_TZSRAM_START >= SYSRAM_BASE) && \ 253 (TZSRAM_END <= SYSRAM_SEC_END)) 254 255 #define TZSRAM_FITS_IN_SYSRAM_AND_SRAMS ((CFG_TZSRAM_START >= SYSRAM_BASE) && \ 256 (CFG_TZSRAM_START < SYSRAM_END) && \ 257 (TZSRAM_END > SYSRAM_END) && \ 258 (TZSRAM_END <= SRAMS_END) && \ 259 (SYSRAM_SIZE == SYSRAM_SEC_SIZE)) 260 261 #define TZSRAM_FITS_IN_SRAMS ((CFG_TZSRAM_START >= SRAMS_START) && \ 262 (CFG_TZSRAM_START < SRAMS_END) && \ 263 (TZSRAM_END <= SRAMS_END)) 264 265 #define TZSRAM_IS_IN_DRAM (CFG_TZSRAM_START >= CFG_DRAM_BASE) 266 267 #ifdef CFG_WITH_PAGER 268 /* 269 * At build time, we enforce that, when pager is used, 270 * either TZSRAM fully fits inside SYSRAM secure address range, 271 * or TZSRAM fully fits inside the full SYSRAM and spread inside SRAMx orderly, 272 * or TZSRAM fully fits some inside SRAMs address range, 273 * or TZSRAM is in DDR for debug and test purpose. 274 */ 275 static_assert(TZSRAM_FITS_IN_SYSRAM_SEC || TZSRAM_FITS_IN_SYSRAM_AND_SRAMS || 276 TZSRAM_FITS_IN_SRAMS || TZSRAM_IS_IN_DRAM); 277 #endif /* CFG_WITH_PAGER */ 278 #endif /* CFG_STM32MP15 */ 279 280 static TEE_Result secure_pager_ram(struct dt_driver_provider *fw_provider, 281 unsigned int decprot_id, 282 paddr_t base, size_t secure_size) 283 { 284 /* Lock firewall configuration for secure internal RAMs used by pager */ 285 uint32_t query_arg = DECPROT(decprot_id, DECPROT_S_RW, DECPROT_LOCK); 286 struct firewall_query fw_query = { 287 .ctrl = dt_driver_provider_priv_data(fw_provider), 288 .args = &query_arg, 289 .arg_count = 1, 290 }; 291 TEE_Result res = TEE_ERROR_GENERIC; 292 bool is_pager_ram = false; 293 294 #if defined(CFG_WITH_PAGER) 295 is_pager_ram = core_is_buffer_intersect(CFG_TZSRAM_START, 296 CFG_TZSRAM_SIZE, 297 base, secure_size); 298 #endif 299 if (!is_pager_ram) 300 return TEE_SUCCESS; 301 302 res = firewall_set_memory_configuration(&fw_query, base, secure_size); 303 if (res) 304 EMSG("Failed to configure secure SRAM %#"PRIxPA"..%#"PRIxPA, 305 base, base + secure_size); 306 307 return res; 308 } 309 310 static TEE_Result non_secure_scmi_ram(struct dt_driver_provider *fw_provider, 311 unsigned int decprot_id, 312 paddr_t base, size_t size) 313 { 314 /* Do not lock firewall configuration for non-secure internal RAMs */ 315 uint32_t query_arg = DECPROT(decprot_id, DECPROT_NS_RW, DECPROT_UNLOCK); 316 struct firewall_query fw_query = { 317 .ctrl = dt_driver_provider_priv_data(fw_provider), 318 .args = &query_arg, 319 .arg_count = 1, 320 }; 321 TEE_Result res = TEE_ERROR_GENERIC; 322 323 if (!core_is_buffer_intersect(CFG_STM32MP1_SCMI_SHM_BASE, 324 CFG_STM32MP1_SCMI_SHM_SIZE, 325 base, size)) 326 return TEE_SUCCESS; 327 328 res = firewall_set_memory_configuration(&fw_query, base, size); 329 if (res) 330 EMSG("Failed to configure non-secure SRAM %#"PRIxPA"..%#"PRIxPA, 331 base, base + size); 332 333 return res; 334 } 335 336 /* At run time we enforce that SRAM1 to SRAM4 are properly assigned if used */ 337 static void configure_srams(struct dt_driver_provider *fw_provider) 338 { 339 bool error = false; 340 341 if (IS_ENABLED(CFG_WITH_PAGER)) { 342 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM1_ID, 343 SRAM1_BASE, SRAM1_SIZE)) 344 error = true; 345 346 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM2_ID, 347 SRAM2_BASE, SRAM2_SIZE)) 348 error = true; 349 350 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM3_ID, 351 SRAM3_BASE, SRAM3_SIZE)) 352 error = true; 353 354 #if defined(CFG_STM32MP15) 355 if (secure_pager_ram(fw_provider, STM32MP1_ETZPC_SRAM4_ID, 356 SRAM4_BASE, SRAM4_SIZE)) 357 error = true; 358 #endif 359 } 360 if (CFG_STM32MP1_SCMI_SHM_BASE) { 361 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM1_ID, 362 SRAM1_BASE, SRAM1_SIZE)) 363 error = true; 364 365 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM2_ID, 366 SRAM2_BASE, SRAM2_SIZE)) 367 error = true; 368 369 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM3_ID, 370 SRAM3_BASE, SRAM3_SIZE)) 371 error = true; 372 373 #if defined(CFG_STM32MP15) 374 if (non_secure_scmi_ram(fw_provider, STM32MP1_ETZPC_SRAM4_ID, 375 SRAM4_BASE, SRAM4_SIZE)) 376 error = true; 377 #endif 378 } 379 380 if (error) 381 panic(); 382 } 383 384 static void configure_sysram(struct dt_driver_provider *fw_provider) 385 { 386 uint32_t query_arg = DECPROT(ETZPC_TZMA1_ID, DECPROT_S_RW, 387 DECPROT_UNLOCK); 388 struct firewall_query firewall = { 389 .ctrl = dt_driver_provider_priv_data(fw_provider), 390 .args = &query_arg, 391 .arg_count = 1, 392 }; 393 TEE_Result res = TEE_ERROR_GENERIC; 394 395 res = firewall_set_memory_configuration(&firewall, SYSRAM_BASE, 396 SYSRAM_SEC_SIZE); 397 if (res) 398 panic("Unable to secure SYSRAM"); 399 400 if (SYSRAM_SIZE > SYSRAM_SEC_SIZE) { 401 size_t nsec_size = SYSRAM_SIZE - SYSRAM_SEC_SIZE; 402 paddr_t nsec_start = SYSRAM_BASE + SYSRAM_SEC_SIZE; 403 uint8_t *va = phys_to_virt(nsec_start, MEM_AREA_IO_NSEC, 404 nsec_size); 405 406 IMSG("Non-secure SYSRAM [%p %p]", va, va + nsec_size - 1); 407 408 /* Clear content from the non-secure part */ 409 memset(va, 0, nsec_size); 410 } 411 } 412 413 static TEE_Result init_late_stm32mp1_drivers(void) 414 { 415 uint32_t __maybe_unused state = 0; 416 417 /* Configure SYSRAM and SRAMx secure hardening */ 418 if (IS_ENABLED(CFG_STM32_ETZPC)) { 419 struct dt_driver_provider *prov = NULL; 420 int node = 0; 421 422 node = fdt_node_offset_by_compatible(get_embedded_dt(), -1, 423 "st,stm32-etzpc"); 424 if (node < 0) 425 panic("Could not get ETZPC node"); 426 427 prov = dt_driver_get_provider_by_node(node, DT_DRIVER_FIREWALL); 428 assert(prov); 429 430 configure_sysram(prov); 431 configure_srams(prov); 432 } 433 434 #ifdef CFG_STM32MP15 435 /* Device in Secure Closed state require RCC secure hardening */ 436 if (stm32_bsec_get_state(&state)) 437 panic(); 438 if (state == BSEC_STATE_SEC_CLOSED && !stm32_rcc_is_secure()) 439 panic("Closed device mandates secure RCC"); 440 #endif 441 442 return TEE_SUCCESS; 443 } 444 445 driver_init_late(init_late_stm32mp1_drivers); 446 447 vaddr_t stm32_rcc_base(void) 448 { 449 static struct io_pa_va base = { .pa = RCC_BASE }; 450 451 return io_pa_or_va_secure(&base, 1); 452 } 453 454 vaddr_t get_gicd_base(void) 455 { 456 struct io_pa_va base = { .pa = GIC_BASE + GICD_OFFSET }; 457 458 return io_pa_or_va_secure(&base, 1); 459 } 460 461 void stm32mp_get_bsec_static_cfg(struct stm32_bsec_static_cfg *cfg) 462 { 463 cfg->base = BSEC_BASE; 464 cfg->upper_start = STM32MP1_UPPER_OTP_START; 465 cfg->max_id = STM32MP1_OTP_MAX_ID; 466 } 467 468 bool __weak stm32mp_with_pmic(void) 469 { 470 return false; 471 } 472 473 uint32_t may_spin_lock(unsigned int *lock) 474 { 475 if (!lock || !cpu_mmu_enabled()) 476 return 0; 477 478 return cpu_spin_lock_xsave(lock); 479 } 480 481 void may_spin_unlock(unsigned int *lock, uint32_t exceptions) 482 { 483 if (!lock || !cpu_mmu_enabled()) 484 return; 485 486 cpu_spin_unlock_xrestore(lock, exceptions); 487 } 488 489 static vaddr_t stm32_tamp_base(void) 490 { 491 static struct io_pa_va base = { .pa = TAMP_BASE }; 492 493 return io_pa_or_va_secure(&base, 1); 494 } 495 496 static vaddr_t bkpreg_base(void) 497 { 498 return stm32_tamp_base() + TAMP_BKP_REGISTER_OFF; 499 } 500 501 vaddr_t stm32mp_bkpreg(unsigned int idx) 502 { 503 return bkpreg_base() + (idx * sizeof(uint32_t)); 504 } 505 506 static bool __maybe_unused bank_is_valid(unsigned int bank) 507 { 508 if (IS_ENABLED(CFG_STM32MP15)) 509 return bank == GPIO_BANK_Z || bank <= GPIO_BANK_K; 510 511 if (IS_ENABLED(CFG_STM32MP13)) 512 return bank <= GPIO_BANK_I; 513 514 panic(); 515 } 516 517 #ifdef CFG_STM32_DEBUG_ACCESS 518 static TEE_Result init_debug(void) 519 { 520 TEE_Result res = TEE_SUCCESS; 521 uint32_t conf = stm32_bsec_read_debug_conf(); 522 struct clk *dbg_clk = stm32mp_rcc_clock_id_to_clk(CK_DBG); 523 uint32_t state = 0; 524 525 res = stm32_bsec_get_state(&state); 526 if (res) 527 return res; 528 529 if (state != BSEC_STATE_SEC_CLOSED && conf) { 530 if (IS_ENABLED(CFG_INSECURE)) 531 IMSG("WARNING: All debug accesses are allowed"); 532 533 res = stm32_bsec_write_debug_conf(conf | BSEC_DEBUG_ALL); 534 if (res) 535 return res; 536 537 /* 538 * Enable DBG clock as used to access coprocessor 539 * debug registers 540 */ 541 clk_enable(dbg_clk); 542 } 543 544 return TEE_SUCCESS; 545 } 546 early_init_late(init_debug); 547 #endif /* CFG_STM32_DEBUG_ACCESS */ 548 549 /* Some generic resources need to be unpaged */ 550 DECLARE_KEEP_PAGER(pinctrl_apply_state); 551 552 bool stm32mp_allow_probe_shared_device(const void *fdt, int node) 553 { 554 static int uart_console_node = -1; 555 const char *compat = NULL; 556 static bool once; 557 558 if (IS_ENABLED(CFG_STM32_ALLOW_UNSAFE_PROBE)) 559 return true; 560 561 if (!once) { 562 get_console_node_from_dt((void *)fdt, &uart_console_node, 563 NULL, NULL); 564 once = true; 565 } 566 567 compat = fdt_stringlist_get(fdt, node, "compatible", 0, NULL); 568 569 /* 570 * Allow OP-TEE console and MP15 I2C and RNG to be shared 571 * with non-secure world. 572 */ 573 if (node == uart_console_node || 574 !strcmp(compat, "st,stm32mp15-i2c-non-secure") || 575 (!strcmp(compat, "st,stm32-rng") && 576 IS_ENABLED(CFG_WITH_SOFTWARE_PRNG))) 577 return true; 578 579 return false; 580 } 581 582 #if defined(CFG_STM32MP15) && defined(CFG_WITH_PAGER) 583 paddr_t stm32mp1_pa_or_sram_alias_pa(paddr_t pa) 584 { 585 /* 586 * OP-TEE uses the alias physical addresses of SRAM1/2/3/4, 587 * not the standard physical addresses. This choice was initially 588 * driven by pager that needs physically contiguous memories 589 * for internal secure memories. 590 */ 591 if (core_is_buffer_inside(pa, 1, SRAM1_ALT_BASE, SRAM1_SIZE)) 592 pa += SRAM1_BASE - SRAM1_ALT_BASE; 593 else if (core_is_buffer_inside(pa, 1, SRAM2_ALT_BASE, SRAM2_SIZE)) 594 pa += SRAM2_BASE - SRAM2_ALT_BASE; 595 else if (core_is_buffer_inside(pa, 1, SRAM3_ALT_BASE, SRAM3_SIZE)) 596 pa += SRAM3_BASE - SRAM3_ALT_BASE; 597 else if (core_is_buffer_inside(pa, 1, SRAM4_ALT_BASE, SRAM4_SIZE)) 598 pa += SRAM4_BASE - SRAM4_ALT_BASE; 599 600 return pa; 601 } 602 603 bool stm32mp1_ram_intersect_pager_ram(paddr_t base, size_t size) 604 { 605 base = stm32mp1_pa_or_sram_alias_pa(base); 606 607 return core_is_buffer_intersect(base, size, CFG_TZSRAM_START, 608 CFG_TZSRAM_SIZE); 609 } 610 #endif 611 612 static TEE_Result get_chip_dev_id(uint32_t *dev_id) 613 { 614 #ifdef CFG_STM32MP13 615 *dev_id = stm32mp_syscfg_get_chip_dev_id(); 616 return TEE_SUCCESS; 617 #else /* assume CFG_STM32MP15 */ 618 return stm32mp1_dbgmcu_get_chip_dev_id(dev_id); 619 #endif 620 } 621 622 static TEE_Result get_part_number(uint32_t *part_nb) 623 { 624 static uint32_t part_number; 625 uint32_t dev_id = 0; 626 uint32_t otp = 0; 627 size_t bit_len = 0; 628 TEE_Result res = TEE_ERROR_GENERIC; 629 630 assert(part_nb); 631 632 if (part_number) { 633 *part_nb = part_number; 634 return TEE_SUCCESS; 635 } 636 637 res = get_chip_dev_id(&dev_id); 638 if (res) 639 return res; 640 641 res = stm32_bsec_find_otp_in_nvmem_layout("part_number_otp", 642 &otp, NULL, &bit_len); 643 if (res) 644 return res; 645 646 res = stm32_bsec_read_otp(&part_number, otp); 647 if (res) 648 return res; 649 650 assert(bit_len < 16); 651 part_number = (part_number & GENMASK_32(bit_len, 0)) | 652 SHIFT_U32(dev_id, 16); 653 654 *part_nb = part_number; 655 656 return TEE_SUCCESS; 657 } 658 659 bool stm32mp_supports_cpu_opp(uint32_t opp_id) 660 { 661 uint32_t part_number = 0; 662 uint32_t id = 0; 663 664 if (get_part_number(&part_number)) { 665 DMSG("Cannot get part number"); 666 panic(); 667 } 668 669 switch (part_number) { 670 case STM32MP135F_PART_NB: 671 case STM32MP135D_PART_NB: 672 case STM32MP133F_PART_NB: 673 case STM32MP133D_PART_NB: 674 case STM32MP131F_PART_NB: 675 case STM32MP131D_PART_NB: 676 case STM32MP157F_PART_NB: 677 case STM32MP157D_PART_NB: 678 case STM32MP153F_PART_NB: 679 case STM32MP153D_PART_NB: 680 case STM32MP151F_PART_NB: 681 case STM32MP151D_PART_NB: 682 id = BIT(1); 683 break; 684 default: 685 id = BIT(0); 686 break; 687 } 688 689 return opp_id & id; 690 } 691 692 bool stm32mp_supports_hw_cryp(void) 693 { 694 uint32_t part_number = 0; 695 696 if (!IS_ENABLED(CFG_STM32_CRYP)) 697 return false; 698 699 if (get_part_number(&part_number)) { 700 DMSG("Cannot get part number"); 701 panic(); 702 } 703 704 switch (part_number) { 705 case STM32MP135F_PART_NB: 706 case STM32MP135C_PART_NB: 707 case STM32MP133F_PART_NB: 708 case STM32MP133C_PART_NB: 709 case STM32MP131F_PART_NB: 710 case STM32MP131C_PART_NB: 711 return true; 712 case STM32MP157F_PART_NB: 713 case STM32MP157C_PART_NB: 714 case STM32MP153F_PART_NB: 715 case STM32MP153C_PART_NB: 716 case STM32MP151F_PART_NB: 717 case STM32MP151C_PART_NB: 718 return true; 719 default: 720 return false; 721 } 722 } 723 724 bool stm32mp_supports_second_core(void) 725 { 726 uint32_t part_number = 0; 727 728 if (CFG_TEE_CORE_NB_CORE == 1) 729 return false; 730 731 if (get_part_number(&part_number)) { 732 DMSG("Cannot get part number"); 733 panic(); 734 } 735 736 switch (part_number) { 737 case STM32MP151F_PART_NB: 738 case STM32MP151D_PART_NB: 739 case STM32MP151C_PART_NB: 740 case STM32MP151A_PART_NB: 741 return false; 742 default: 743 return true; 744 } 745 } 746 747 void __noreturn do_reset(const char *str __maybe_unused) 748 { 749 struct rstctrl *rstctrl = NULL; 750 751 if (CFG_TEE_CORE_NB_CORE > 1) { 752 /* Halt execution of other CPUs */ 753 interrupt_raise_sgi(interrupt_get_main_chip(), 754 CFG_HALT_CORES_SGI, 755 ITR_CPU_MASK_TO_OTHER_CPUS); 756 mdelay(1); 757 } 758 759 IMSG("Forced system reset %s", str); 760 console_flush(); 761 762 /* Request system reset to RCC driver */ 763 rstctrl = stm32mp_rcc_reset_id_to_rstctrl(MPSYST_R); 764 rstctrl_assert(rstctrl); 765 udelay(100); 766 767 /* Cannot occur */ 768 panic(); 769 } 770