1 /* 2 * Copyright (c) 2022-2025 Arm Limited. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 * DRTM service 7 * 8 * Authors: 9 * Lucian Paul-Trifu <lucian.paultrifu@gmail.com> 10 * Brian Nezvadovitz <brinez@microsoft.com> 2021-02-01 11 */ 12 13 #include <stdint.h> 14 15 #include <arch.h> 16 #include <arch_helpers.h> 17 #include <common/bl_common.h> 18 #include <common/debug.h> 19 #include <common/runtime_svc.h> 20 #include <drivers/auth/crypto_mod.h> 21 #include "drtm_main.h" 22 #include "drtm_measurements.h" 23 #include "drtm_remediation.h" 24 #include <lib/el3_runtime/context_mgmt.h> 25 #include <lib/psci/psci_lib.h> 26 #include <lib/xlat_tables/xlat_tables_v2.h> 27 #include <plat/common/platform.h> 28 #include <services/drtm_svc.h> 29 #include <services/sdei.h> 30 #include <platform_def.h> 31 32 /* Structure to store DRTM features specific to the platform. */ 33 static drtm_features_t plat_drtm_features; 34 35 /* DRTM-formatted memory map. */ 36 static drtm_memory_region_descriptor_table_t *plat_drtm_mem_map; 37 38 /* DLME header */ 39 struct_dlme_data_header dlme_data_hdr_init; 40 41 /* Minimum data memory requirement */ 42 uint64_t dlme_data_min_size; 43 44 int drtm_setup(void) 45 { 46 bool rc; 47 const plat_drtm_tpm_features_t *plat_tpm_feat; 48 const plat_drtm_dma_prot_features_t *plat_dma_prot_feat; 49 50 INFO("DRTM service setup\n"); 51 52 /* Read boot PE ID from MPIDR */ 53 plat_drtm_features.boot_pe_id = read_mpidr_el1() & MPIDR_AFFINITY_MASK; 54 55 rc = drtm_dma_prot_init(); 56 if (rc) { 57 return INTERNAL_ERROR; 58 } 59 60 /* 61 * initialise the platform supported crypto module that will 62 * be used by the DRTM-service to calculate hash of DRTM- 63 * implementation specific components 64 */ 65 crypto_mod_init(); 66 67 /* Build DRTM-compatible address map. */ 68 plat_drtm_mem_map = drtm_build_address_map(); 69 if (plat_drtm_mem_map == NULL) { 70 return INTERNAL_ERROR; 71 } 72 73 /* Get DRTM features from platform hooks. */ 74 plat_tpm_feat = plat_drtm_get_tpm_features(); 75 if (plat_tpm_feat == NULL) { 76 return INTERNAL_ERROR; 77 } 78 79 plat_dma_prot_feat = plat_drtm_get_dma_prot_features(); 80 if (plat_dma_prot_feat == NULL) { 81 return INTERNAL_ERROR; 82 } 83 84 /* 85 * Add up minimum DLME data memory. 86 * 87 * For systems with complete DMA protection there is only one entry in 88 * the protected regions table. 89 */ 90 if (plat_dma_prot_feat->dma_protection_support == 91 ARM_DRTM_DMA_PROT_FEATURES_DMA_SUPPORT_COMPLETE) { 92 dlme_data_min_size = 93 sizeof(drtm_memory_region_descriptor_table_t) + 94 sizeof(drtm_mem_region_t); 95 dlme_data_hdr_init.dlme_prot_regions_size = dlme_data_min_size; 96 } else { 97 /* 98 * TODO set protected regions table size based on platform DMA 99 * protection configuration 100 */ 101 panic(); 102 } 103 104 dlme_data_hdr_init.dlme_addr_map_size = drtm_get_address_map_size(); 105 dlme_data_hdr_init.dlme_tcb_hashes_table_size = 106 plat_drtm_get_tcb_hash_table_size(); 107 dlme_data_hdr_init.dlme_impdef_region_size = 108 plat_drtm_get_imp_def_dlme_region_size(); 109 110 dlme_data_min_size += dlme_data_hdr_init.dlme_addr_map_size + 111 ARM_DRTM_MIN_EVENT_LOG_SIZE + 112 dlme_data_hdr_init.dlme_tcb_hashes_table_size + 113 dlme_data_hdr_init.dlme_impdef_region_size; 114 115 /* Fill out platform DRTM features structure */ 116 /* Only support default PCR schema (0x1) in this implementation. */ 117 ARM_DRTM_TPM_FEATURES_SET_PCR_SCHEMA(plat_drtm_features.tpm_features, 118 ARM_DRTM_TPM_FEATURES_PCR_SCHEMA_DEFAULT); 119 ARM_DRTM_TPM_FEATURES_SET_TPM_HASH(plat_drtm_features.tpm_features, 120 plat_tpm_feat->tpm_based_hash_support); 121 ARM_DRTM_TPM_FEATURES_SET_FW_HASH(plat_drtm_features.tpm_features, 122 plat_tpm_feat->firmware_hash_algorithm); 123 ARM_DRTM_MIN_MEM_REQ_SET_MIN_DLME_DATA_SIZE(plat_drtm_features.minimum_memory_requirement, 124 page_align(dlme_data_min_size, UP)/PAGE_SIZE); 125 ARM_DRTM_MIN_MEM_REQ_SET_DCE_SIZE(plat_drtm_features.minimum_memory_requirement, 126 plat_drtm_get_min_size_normal_world_dce()); 127 ARM_DRTM_DMA_PROT_FEATURES_SET_MAX_REGIONS(plat_drtm_features.dma_prot_features, 128 plat_dma_prot_feat->max_num_mem_prot_regions); 129 ARM_DRTM_DMA_PROT_FEATURES_SET_DMA_SUPPORT(plat_drtm_features.dma_prot_features, 130 plat_dma_prot_feat->dma_protection_support); 131 ARM_DRTM_TCB_HASH_FEATURES_SET_MAX_NUM_HASHES(plat_drtm_features.tcb_hash_features, 132 plat_drtm_get_tcb_hash_features()); 133 134 return 0; 135 } 136 137 static inline void invalidate_icache_all(void) 138 { 139 __asm__ volatile("ic ialluis"); 140 dsb(); 141 isb(); 142 } 143 144 static inline uint64_t drtm_features_tpm(void *ctx) 145 { 146 SMC_RET2(ctx, 1ULL, /* TPM feature is supported */ 147 plat_drtm_features.tpm_features); 148 } 149 150 static inline uint64_t drtm_features_mem_req(void *ctx) 151 { 152 SMC_RET2(ctx, 1ULL, /* memory req Feature is supported */ 153 plat_drtm_features.minimum_memory_requirement); 154 } 155 156 static inline uint64_t drtm_features_boot_pe_id(void *ctx) 157 { 158 SMC_RET2(ctx, 1ULL, /* Boot PE feature is supported */ 159 plat_drtm_features.boot_pe_id); 160 } 161 162 static inline uint64_t drtm_features_dma_prot(void *ctx) 163 { 164 SMC_RET2(ctx, 1ULL, /* DMA protection feature is supported */ 165 plat_drtm_features.dma_prot_features); 166 } 167 168 static inline uint64_t drtm_features_tcb_hashes(void *ctx) 169 { 170 SMC_RET2(ctx, 1ULL, /* TCB hash feature is supported */ 171 plat_drtm_features.tcb_hash_features); 172 } 173 174 static enum drtm_retc drtm_dl_check_caller_el(void *ctx) 175 { 176 uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3); 177 uint64_t dl_caller_el; 178 uint64_t dl_caller_aarch; 179 180 dl_caller_el = spsr_el3 >> MODE_EL_SHIFT & MODE_EL_MASK; 181 dl_caller_aarch = spsr_el3 >> MODE_RW_SHIFT & MODE_RW_MASK; 182 183 /* Caller's security state is checked from drtm_smc_handle function */ 184 185 /* Caller can be NS-EL2/EL1 */ 186 if (dl_caller_el == MODE_EL3) { 187 ERROR("DRTM: invalid launch from EL3\n"); 188 return DENIED; 189 } 190 191 if (dl_caller_aarch != MODE_RW_64) { 192 ERROR("DRTM: invalid launch from non-AArch64 execution state\n"); 193 return DENIED; 194 } 195 196 return SUCCESS; 197 } 198 199 static enum drtm_retc drtm_dl_check_cores(void) 200 { 201 bool running_on_single_core; 202 uint64_t this_pe_aff_value = read_mpidr_el1() & MPIDR_AFFINITY_MASK; 203 204 if (this_pe_aff_value != plat_drtm_features.boot_pe_id) { 205 ERROR("DRTM: invalid launch on a non-boot PE\n"); 206 return DENIED; 207 } 208 209 running_on_single_core = psci_is_last_on_cpu_safe(plat_my_core_pos()); 210 if (!running_on_single_core) { 211 ERROR("DRTM: invalid launch due to non-boot PE not being turned off\n"); 212 return SECONDARY_PE_NOT_OFF; 213 } 214 215 return SUCCESS; 216 } 217 218 static enum drtm_retc drtm_dl_prepare_dlme_data(const struct_drtm_dl_args *args) 219 { 220 int rc; 221 uint64_t dlme_data_paddr; 222 size_t dlme_data_max_size; 223 uintptr_t dlme_data_mapping; 224 struct_dlme_data_header *dlme_data_hdr; 225 uint8_t *dlme_data_cursor; 226 size_t dlme_data_mapping_bytes; 227 size_t serialised_bytes_actual; 228 229 dlme_data_paddr = args->dlme_paddr + args->dlme_data_off; 230 dlme_data_max_size = args->dlme_size - args->dlme_data_off; 231 232 /* 233 * The capacity of the given DLME data region is checked when 234 * the other dynamic launch arguments are. 235 */ 236 if (dlme_data_max_size < dlme_data_min_size) { 237 ERROR("%s: assertion failed:" 238 " dlme_data_max_size (%ld) < dlme_data_min_size (%ld)\n", 239 __func__, dlme_data_max_size, dlme_data_min_size); 240 panic(); 241 } 242 243 /* Map the DLME data region as NS memory. */ 244 dlme_data_mapping_bytes = ALIGNED_UP(dlme_data_max_size, DRTM_PAGE_SIZE); 245 rc = mmap_add_dynamic_region_alloc_va(dlme_data_paddr, 246 &dlme_data_mapping, 247 dlme_data_mapping_bytes, 248 MT_RW_DATA | MT_NS | 249 MT_SHAREABILITY_ISH); 250 if (rc != 0) { 251 WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n", 252 __func__, rc); 253 return INTERNAL_ERROR; 254 } 255 dlme_data_hdr = (struct_dlme_data_header *)dlme_data_mapping; 256 dlme_data_cursor = (uint8_t *)dlme_data_hdr + sizeof(*dlme_data_hdr); 257 258 memcpy(dlme_data_hdr, (const void *)&dlme_data_hdr_init, 259 sizeof(*dlme_data_hdr)); 260 261 /* Set the header version and size. */ 262 dlme_data_hdr->version = 1; 263 dlme_data_hdr->this_hdr_size = sizeof(*dlme_data_hdr); 264 265 /* Prepare DLME protected regions. */ 266 drtm_dma_prot_serialise_table(dlme_data_cursor, 267 &serialised_bytes_actual); 268 assert(serialised_bytes_actual == 269 dlme_data_hdr->dlme_prot_regions_size); 270 dlme_data_cursor += serialised_bytes_actual; 271 272 /* Prepare DLME address map. */ 273 if (plat_drtm_mem_map != NULL) { 274 memcpy(dlme_data_cursor, plat_drtm_mem_map, 275 dlme_data_hdr->dlme_addr_map_size); 276 } else { 277 WARN("DRTM: DLME address map is not in the cache\n"); 278 } 279 dlme_data_cursor += dlme_data_hdr->dlme_addr_map_size; 280 281 /* Prepare DRTM event log for DLME. */ 282 drtm_serialise_event_log(dlme_data_cursor, &serialised_bytes_actual); 283 assert(serialised_bytes_actual <= ARM_DRTM_MIN_EVENT_LOG_SIZE); 284 dlme_data_hdr->dlme_tpm_log_size = ARM_DRTM_MIN_EVENT_LOG_SIZE; 285 dlme_data_cursor += dlme_data_hdr->dlme_tpm_log_size; 286 287 /* 288 * TODO: Prepare the TCB hashes for DLME, currently its size 289 * 0 290 */ 291 dlme_data_cursor += dlme_data_hdr->dlme_tcb_hashes_table_size; 292 293 /* Implementation-specific region size is unused. */ 294 dlme_data_cursor += dlme_data_hdr->dlme_impdef_region_size; 295 296 /* 297 * Prepare DLME data size, includes all data region referenced above 298 * alongwith the DLME data header 299 */ 300 dlme_data_hdr->dlme_data_size = dlme_data_cursor - (uint8_t *)dlme_data_hdr; 301 302 /* Unmap the DLME data region. */ 303 rc = mmap_remove_dynamic_region(dlme_data_mapping, dlme_data_mapping_bytes); 304 if (rc != 0) { 305 ERROR("%s(): mmap_remove_dynamic_region() failed" 306 " unexpectedly rc=%d\n", __func__, rc); 307 panic(); 308 } 309 310 return SUCCESS; 311 } 312 313 /* 314 * Note: accesses to the dynamic launch args, and to the DLME data are 315 * little-endian as required, thanks to TF-A BL31 init requirements. 316 */ 317 static enum drtm_retc drtm_dl_check_args(uint64_t x1, 318 struct_drtm_dl_args *a_out) 319 { 320 uint64_t dlme_start, dlme_end; 321 uint64_t dlme_img_start, dlme_img_ep, dlme_img_end; 322 uint64_t dlme_data_start, dlme_data_end; 323 uintptr_t va_mapping; 324 size_t va_mapping_size; 325 struct_drtm_dl_args *a; 326 struct_drtm_dl_args args_buf; 327 int rc; 328 329 if (x1 % DRTM_PAGE_SIZE != 0) { 330 ERROR("DRTM: parameters structure is not " 331 DRTM_PAGE_SIZE_STR "-aligned\n"); 332 return INVALID_PARAMETERS; 333 } 334 335 va_mapping_size = ALIGNED_UP(sizeof(struct_drtm_dl_args), DRTM_PAGE_SIZE); 336 337 /* check DRTM parameters are within NS address region */ 338 rc = plat_drtm_validate_ns_region(x1, va_mapping_size); 339 if (rc != 0) { 340 ERROR("DRTM: parameters lies within secure memory\n"); 341 return INVALID_PARAMETERS; 342 } 343 344 rc = mmap_add_dynamic_region_alloc_va(x1, &va_mapping, va_mapping_size, 345 MT_MEMORY | MT_NS | MT_RO | 346 MT_SHAREABILITY_ISH); 347 if (rc != 0) { 348 WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n", 349 __func__, rc); 350 return INTERNAL_ERROR; 351 } 352 a = (struct_drtm_dl_args *)va_mapping; 353 354 /* Sanitize cache of data passed in args by the DCE Preamble. */ 355 flush_dcache_range(va_mapping, va_mapping_size); 356 357 args_buf = *a; 358 359 rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size); 360 if (rc) { 361 ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly" 362 " rc=%d\n", __func__, rc); 363 panic(); 364 } 365 a = &args_buf; 366 367 if (!((a->version >= ARM_DRTM_PARAMS_MIN_VERSION) && 368 (a->version <= ARM_DRTM_PARAMS_MAX_VERSION))) { 369 ERROR("DRTM: parameters structure version %u is unsupported\n", 370 a->version); 371 return NOT_SUPPORTED; 372 } 373 374 if (!(a->dlme_img_off < a->dlme_size && 375 a->dlme_data_off < a->dlme_size)) { 376 ERROR("DRTM: argument offset is outside of the DLME region\n"); 377 return INVALID_PARAMETERS; 378 } 379 dlme_start = a->dlme_paddr; 380 dlme_end = a->dlme_paddr + a->dlme_size; 381 dlme_img_start = a->dlme_paddr + a->dlme_img_off; 382 dlme_img_ep = dlme_img_start + a->dlme_img_ep_off; 383 dlme_img_end = dlme_img_start + a->dlme_img_size; 384 dlme_data_start = a->dlme_paddr + a->dlme_data_off; 385 dlme_data_end = dlme_end; 386 387 /* Check the DLME regions arguments. */ 388 if ((dlme_start % DRTM_PAGE_SIZE) != 0) { 389 ERROR("DRTM: argument DLME region is not " 390 DRTM_PAGE_SIZE_STR "-aligned\n"); 391 return INVALID_PARAMETERS; 392 } 393 394 if (!(dlme_start < dlme_end && 395 dlme_start <= dlme_img_start && dlme_img_start < dlme_img_end && 396 dlme_start <= dlme_data_start && dlme_data_start < dlme_data_end)) { 397 ERROR("DRTM: argument DLME region is discontiguous\n"); 398 return INVALID_PARAMETERS; 399 } 400 401 if (dlme_img_start < dlme_data_end && dlme_data_start < dlme_img_end) { 402 ERROR("DRTM: argument DLME regions overlap\n"); 403 return INVALID_PARAMETERS; 404 } 405 406 /* Check the DLME image region arguments. */ 407 if ((dlme_img_start % DRTM_PAGE_SIZE) != 0) { 408 ERROR("DRTM: argument DLME image region is not " 409 DRTM_PAGE_SIZE_STR "-aligned\n"); 410 return INVALID_PARAMETERS; 411 } 412 413 if (!(dlme_img_start <= dlme_img_ep && dlme_img_ep < dlme_img_end)) { 414 ERROR("DRTM: DLME entry point is outside of the DLME image region\n"); 415 return INVALID_PARAMETERS; 416 } 417 418 if ((dlme_img_ep % 4) != 0) { 419 ERROR("DRTM: DLME image entry point is not 4-byte-aligned\n"); 420 return INVALID_PARAMETERS; 421 } 422 423 /* Check the DLME data region arguments. */ 424 if ((dlme_data_start % DRTM_PAGE_SIZE) != 0) { 425 ERROR("DRTM: argument DLME data region is not " 426 DRTM_PAGE_SIZE_STR "-aligned\n"); 427 return INVALID_PARAMETERS; 428 } 429 430 if (dlme_data_end - dlme_data_start < dlme_data_min_size) { 431 ERROR("DRTM: argument DLME data region is short of %lu bytes\n", 432 dlme_data_min_size - (size_t)(dlme_data_end - dlme_data_start)); 433 return INVALID_PARAMETERS; 434 } 435 436 /* check DLME region (paddr + size) is within a NS address region */ 437 rc = plat_drtm_validate_ns_region(dlme_start, (size_t)a->dlme_size); 438 if (rc != 0) { 439 ERROR("DRTM: DLME region lies within secure memory\n"); 440 return INVALID_PARAMETERS; 441 } 442 443 /* Check the Normal World DCE region arguments. */ 444 if (a->dce_nwd_paddr != 0) { 445 uint32_t dce_nwd_start = a->dce_nwd_paddr; 446 uint32_t dce_nwd_end = dce_nwd_start + a->dce_nwd_size; 447 448 if (!(dce_nwd_start < dce_nwd_end)) { 449 ERROR("DRTM: argument Normal World DCE region is dicontiguous\n"); 450 return INVALID_PARAMETERS; 451 } 452 453 if (dce_nwd_start < dlme_end && dlme_start < dce_nwd_end) { 454 ERROR("DRTM: argument Normal World DCE regions overlap\n"); 455 return INVALID_PARAMETERS; 456 } 457 } 458 459 /* 460 * Map and sanitize the cache of data range passed by DCE Preamble. This 461 * is required to avoid / defend against racing with cache evictions 462 */ 463 va_mapping_size = ALIGNED_UP((dlme_end - dlme_start), DRTM_PAGE_SIZE); 464 rc = mmap_add_dynamic_region_alloc_va(dlme_start, &va_mapping, va_mapping_size, 465 MT_MEMORY | MT_NS | MT_RO | 466 MT_SHAREABILITY_ISH); 467 if (rc != 0) { 468 ERROR("DRTM: %s: mmap_add_dynamic_region_alloc_va() failed rc=%d\n", 469 __func__, rc); 470 return INTERNAL_ERROR; 471 } 472 flush_dcache_range(va_mapping, va_mapping_size); 473 474 rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size); 475 if (rc) { 476 ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly" 477 " rc=%d\n", __func__, rc); 478 panic(); 479 } 480 481 *a_out = *a; 482 return SUCCESS; 483 } 484 485 static void drtm_dl_reset_dlme_el_state(enum drtm_dlme_el dlme_el) 486 { 487 uint64_t sctlr; 488 489 /* 490 * TODO: Set PE state according to the PSCI's specification of the initial 491 * state after CPU_ON, or to reset values if unspecified, where they exist, 492 * or define sensible values otherwise. 493 */ 494 495 switch (dlme_el) { 496 case DLME_AT_EL1: 497 sctlr = read_sctlr_el1(); 498 break; 499 500 case DLME_AT_EL2: 501 sctlr = read_sctlr_el2(); 502 break; 503 504 default: /* Not reached */ 505 ERROR("%s(): dlme_el has the unexpected value %d\n", 506 __func__, dlme_el); 507 panic(); 508 } 509 510 sctlr &= ~(/* Disable DLME's EL MMU, since the existing page-tables are untrusted. */ 511 SCTLR_M_BIT 512 | SCTLR_EE_BIT /* Little-endian data accesses. */ 513 | SCTLR_C_BIT /* disable data caching */ 514 | SCTLR_I_BIT /* disable instruction caching */ 515 ); 516 517 switch (dlme_el) { 518 case DLME_AT_EL1: 519 write_sctlr_el1(sctlr); 520 break; 521 522 case DLME_AT_EL2: 523 write_sctlr_el2(sctlr); 524 break; 525 } 526 } 527 528 static void drtm_dl_reset_dlme_context(enum drtm_dlme_el dlme_el) 529 { 530 void *ns_ctx = cm_get_context(NON_SECURE); 531 gp_regs_t *gpregs = get_gpregs_ctx(ns_ctx); 532 uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3); 533 534 /* Reset all gpregs, including SP_EL0. */ 535 memset(gpregs, 0, sizeof(*gpregs)); 536 537 /* Reset SP_ELx. */ 538 switch (dlme_el) { 539 case DLME_AT_EL1: 540 write_sp_el1(0); 541 break; 542 543 case DLME_AT_EL2: 544 write_sp_el2(0); 545 break; 546 } 547 548 /* 549 * DLME's async exceptions are masked to avoid a NWd attacker's timed 550 * interference with any state we established trust in or measured. 551 */ 552 spsr_el3 |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT; 553 554 write_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3, spsr_el3); 555 } 556 557 static void drtm_dl_prepare_eret_to_dlme(const struct_drtm_dl_args *args, enum drtm_dlme_el dlme_el) 558 { 559 void *ctx = cm_get_context(NON_SECURE); 560 uint64_t dlme_ep = DL_ARGS_GET_DLME_ENTRY_POINT(args); 561 uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3); 562 563 /* Next ERET is to the DLME's EL. */ 564 spsr_el3 &= ~(MODE_EL_MASK << MODE_EL_SHIFT); 565 switch (dlme_el) { 566 case DLME_AT_EL1: 567 spsr_el3 |= MODE_EL1 << MODE_EL_SHIFT; 568 break; 569 570 case DLME_AT_EL2: 571 spsr_el3 |= MODE_EL2 << MODE_EL_SHIFT; 572 break; 573 } 574 575 /* Next ERET is to the DLME entry point. */ 576 cm_set_elr_spsr_el3(NON_SECURE, dlme_ep, spsr_el3); 577 } 578 579 static uint64_t drtm_dynamic_launch(uint64_t x1, void *handle) 580 { 581 enum drtm_retc ret = SUCCESS; 582 enum drtm_retc dma_prot_ret; 583 struct_drtm_dl_args args; 584 /* DLME should be highest NS exception level */ 585 enum drtm_dlme_el dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1; 586 587 /* Ensure that only boot PE is powered on */ 588 ret = drtm_dl_check_cores(); 589 if (ret != SUCCESS) { 590 SMC_RET1(handle, ret); 591 } 592 593 /* 594 * Ensure that execution state is AArch64 and the caller 595 * is highest non-secure exception level 596 */ 597 ret = drtm_dl_check_caller_el(handle); 598 if (ret != SUCCESS) { 599 SMC_RET1(handle, ret); 600 } 601 602 ret = drtm_dl_check_args(x1, &args); 603 if (ret != SUCCESS) { 604 SMC_RET1(handle, ret); 605 } 606 607 /* Ensure that there are no SDEI event registered */ 608 #if SDEI_SUPPORT 609 if (sdei_get_registered_event_count() != 0) { 610 SMC_RET1(handle, DENIED); 611 } 612 #endif /* SDEI_SUPPORT */ 613 614 /* 615 * Engage the DMA protections. The launch cannot proceed without the DMA 616 * protections due to potential TOC/TOU vulnerabilities w.r.t. the DLME 617 * region (and to the NWd DCE region). 618 */ 619 ret = drtm_dma_prot_engage(&args.dma_prot_args, 620 DL_ARGS_GET_DMA_PROT_TYPE(&args)); 621 if (ret != SUCCESS) { 622 SMC_RET1(handle, ret); 623 } 624 625 /* 626 * The DMA protection is now engaged. Note that any failure mode that 627 * returns an error to the DRTM-launch caller must now disengage DMA 628 * protections before returning to the caller. 629 */ 630 631 ret = drtm_take_measurements(&args); 632 if (ret != SUCCESS) { 633 goto err_undo_dma_prot; 634 } 635 636 ret = drtm_dl_prepare_dlme_data(&args); 637 if (ret != SUCCESS) { 638 goto err_undo_dma_prot; 639 } 640 641 /* 642 * Note that, at the time of writing, the DRTM spec allows a successful 643 * launch from NS-EL1 to return to a DLME in NS-EL2. The practical risk 644 * of a privilege escalation, e.g. due to a compromised hypervisor, is 645 * considered small enough not to warrant the specification of additional 646 * DRTM conduits that would be necessary to maintain OSs' abstraction from 647 * the presence of EL2 were the dynamic launch only be allowed from the 648 * highest NS EL. 649 */ 650 651 dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1; 652 653 drtm_dl_reset_dlme_el_state(dlme_el); 654 drtm_dl_reset_dlme_context(dlme_el); 655 656 /* 657 * Setting the Generic Timer frequency is required before launching 658 * DLME and is already done for running CPU during PSCI setup. 659 */ 660 drtm_dl_prepare_eret_to_dlme(&args, dlme_el); 661 662 /* 663 * As per DRTM 1.0 spec table #30 invalidate the instruction cache 664 * before jumping to the DLME. This is required to defend against 665 * potentially-malicious cache contents. 666 */ 667 invalidate_icache_all(); 668 669 /* Return the DLME region's address in x0, and the DLME data offset in x1.*/ 670 SMC_RET2(handle, args.dlme_paddr, args.dlme_data_off); 671 672 err_undo_dma_prot: 673 dma_prot_ret = drtm_dma_prot_disengage(); 674 if (dma_prot_ret != SUCCESS) { 675 ERROR("%s(): drtm_dma_prot_disengage() failed unexpectedly" 676 " rc=%d\n", __func__, ret); 677 panic(); 678 } 679 680 SMC_RET1(handle, ret); 681 } 682 683 uint64_t drtm_smc_handler(uint32_t smc_fid, 684 uint64_t x1, 685 uint64_t x2, 686 uint64_t x3, 687 uint64_t x4, 688 void *cookie, 689 void *handle, 690 uint64_t flags) 691 { 692 /* Check that the SMC call is from the Normal World. */ 693 if (!is_caller_non_secure(flags)) { 694 SMC_RET1(handle, NOT_SUPPORTED); 695 } 696 697 switch (smc_fid) { 698 case ARM_DRTM_SVC_VERSION: 699 INFO("DRTM service handler: version\n"); 700 /* Return the version of current implementation */ 701 SMC_RET1(handle, ARM_DRTM_VERSION); 702 break; /* not reached */ 703 704 case ARM_DRTM_SVC_FEATURES: 705 if (((x1 >> ARM_DRTM_FUNC_SHIFT) & ARM_DRTM_FUNC_MASK) == 706 ARM_DRTM_FUNC_ID) { 707 /* Dispatch function-based queries. */ 708 switch (x1 & FUNCID_MASK) { 709 case ARM_DRTM_SVC_VERSION: 710 SMC_RET1(handle, SUCCESS); 711 break; /* not reached */ 712 713 case ARM_DRTM_SVC_FEATURES: 714 SMC_RET1(handle, SUCCESS); 715 break; /* not reached */ 716 717 case ARM_DRTM_SVC_UNPROTECT_MEM: 718 SMC_RET1(handle, SUCCESS); 719 break; /* not reached */ 720 721 case ARM_DRTM_SVC_DYNAMIC_LAUNCH: 722 SMC_RET1(handle, SUCCESS); 723 break; /* not reached */ 724 725 case ARM_DRTM_SVC_CLOSE_LOCALITY: 726 WARN("ARM_DRTM_SVC_CLOSE_LOCALITY feature %s", 727 "is not supported\n"); 728 SMC_RET1(handle, NOT_SUPPORTED); 729 break; /* not reached */ 730 731 case ARM_DRTM_SVC_GET_ERROR: 732 SMC_RET1(handle, SUCCESS); 733 break; /* not reached */ 734 735 case ARM_DRTM_SVC_SET_ERROR: 736 SMC_RET1(handle, SUCCESS); 737 break; /* not reached */ 738 739 case ARM_DRTM_SVC_SET_TCB_HASH: 740 WARN("ARM_DRTM_SVC_TCB_HASH feature %s", 741 "is not supported\n"); 742 SMC_RET1(handle, NOT_SUPPORTED); 743 break; /* not reached */ 744 745 case ARM_DRTM_SVC_LOCK_TCB_HASH: 746 WARN("ARM_DRTM_SVC_LOCK_TCB_HASH feature %s", 747 "is not supported\n"); 748 SMC_RET1(handle, NOT_SUPPORTED); 749 break; /* not reached */ 750 751 default: 752 ERROR("Unknown DRTM service function\n"); 753 SMC_RET1(handle, NOT_SUPPORTED); 754 break; /* not reached */ 755 } 756 } else { 757 /* Dispatch feature-based queries. */ 758 switch (x1 & ARM_DRTM_FEAT_ID_MASK) { 759 case ARM_DRTM_FEATURES_TPM: 760 INFO("++ DRTM service handler: TPM features\n"); 761 return drtm_features_tpm(handle); 762 break; /* not reached */ 763 764 case ARM_DRTM_FEATURES_MEM_REQ: 765 INFO("++ DRTM service handler: Min. mem." 766 " requirement features\n"); 767 return drtm_features_mem_req(handle); 768 break; /* not reached */ 769 770 case ARM_DRTM_FEATURES_DMA_PROT: 771 INFO("++ DRTM service handler: " 772 "DMA protection features\n"); 773 return drtm_features_dma_prot(handle); 774 break; /* not reached */ 775 776 case ARM_DRTM_FEATURES_BOOT_PE_ID: 777 INFO("++ DRTM service handler: " 778 "Boot PE ID features\n"); 779 return drtm_features_boot_pe_id(handle); 780 break; /* not reached */ 781 782 case ARM_DRTM_FEATURES_TCB_HASHES: 783 INFO("++ DRTM service handler: " 784 "TCB-hashes features\n"); 785 return drtm_features_tcb_hashes(handle); 786 break; /* not reached */ 787 788 default: 789 ERROR("Unknown ARM DRTM service feature\n"); 790 SMC_RET1(handle, NOT_SUPPORTED); 791 break; /* not reached */ 792 } 793 } 794 795 case ARM_DRTM_SVC_UNPROTECT_MEM: 796 INFO("DRTM service handler: unprotect mem\n"); 797 return drtm_unprotect_mem(handle); 798 break; /* not reached */ 799 800 case ARM_DRTM_SVC_DYNAMIC_LAUNCH: 801 INFO("DRTM service handler: dynamic launch\n"); 802 return drtm_dynamic_launch(x1, handle); 803 break; /* not reached */ 804 805 case ARM_DRTM_SVC_CLOSE_LOCALITY: 806 WARN("DRTM service handler: close locality %s\n", 807 "is not supported"); 808 SMC_RET1(handle, NOT_SUPPORTED); 809 break; /* not reached */ 810 811 case ARM_DRTM_SVC_GET_ERROR: 812 INFO("DRTM service handler: get error\n"); 813 return drtm_get_error(handle); 814 break; /* not reached */ 815 816 case ARM_DRTM_SVC_SET_ERROR: 817 INFO("DRTM service handler: set error\n"); 818 return drtm_set_error(x1, handle); 819 break; /* not reached */ 820 821 case ARM_DRTM_SVC_SET_TCB_HASH: 822 WARN("DRTM service handler: set TCB hash %s\n", 823 "is not supported"); 824 SMC_RET1(handle, NOT_SUPPORTED); 825 break; /* not reached */ 826 827 case ARM_DRTM_SVC_LOCK_TCB_HASH: 828 WARN("DRTM service handler: lock TCB hash %s\n", 829 "is not supported"); 830 SMC_RET1(handle, NOT_SUPPORTED); 831 break; /* not reached */ 832 833 default: 834 ERROR("Unknown DRTM service function: 0x%x\n", smc_fid); 835 SMC_RET1(handle, SMC_UNK); 836 break; /* not reached */ 837 } 838 839 /* not reached */ 840 SMC_RET1(handle, SMC_UNK); 841 } 842