1 /*
2 * Copyright (c) 2022-2026 Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 * DRTM service
7 *
8 * Authors:
9 * Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
10 * Brian Nezvadovitz <brinez@microsoft.com> 2021-02-01
11 */
12
13 #include <stdint.h>
14
15 #include <arch.h>
16 #include <arch_helpers.h>
17 #include <common/bl_common.h>
18 #include <common/debug.h>
19 #include <common/runtime_svc.h>
20 #include <drivers/auth/crypto_mod.h>
21 #include "drtm_main.h"
22 #include "drtm_measurements.h"
23 #include "drtm_remediation.h"
24 #include <lib/el3_runtime/context_mgmt.h>
25 #include <lib/psci/psci_lib.h>
26 #include <lib/utils_def.h>
27 #include <lib/xlat_tables/xlat_tables_v2.h>
28 #include <plat/common/platform.h>
29 #include <services/drtm_svc.h>
30 #include <services/sdei.h>
31 #include <platform_def.h>
32
33 /* Structure to store DRTM features specific to the platform. */
34 static drtm_features_t plat_drtm_features;
35 static bool dlme_img_auth_supported;
36
37 /* DRTM-formatted memory map. */
38 static drtm_memory_region_descriptor_table_t *plat_drtm_mem_map;
39 static const plat_drtm_dma_prot_features_t *plat_dma_prot_feat;
40 static const plat_drtm_tpm_features_t *plat_tpm_feat;
41
42 /* DLME header */
43 struct_dlme_data_header dlme_data_hdr_init;
44
45 /* Minimum data memory requirement */
46 uint64_t dlme_data_min_size;
47
drtm_setup(void)48 int drtm_setup(void)
49 {
50 bool rc;
51
52 INFO("DRTM service setup\n");
53
54 /* Read boot PE ID from MPIDR */
55 plat_drtm_features.boot_pe_id = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
56
57 rc = drtm_dma_prot_init();
58 if (rc) {
59 return INTERNAL_ERROR;
60 }
61
62 /*
63 * initialise the platform supported crypto module that will
64 * be used by the DRTM-service to calculate hash of DRTM-
65 * implementation specific components
66 */
67 crypto_mod_init();
68
69 /* Build DRTM-compatible address map. */
70 plat_drtm_mem_map = drtm_build_address_map();
71 if (plat_drtm_mem_map == NULL) {
72 return INTERNAL_ERROR;
73 }
74
75 /* Get DRTM features from platform hooks. */
76 plat_tpm_feat = plat_drtm_get_tpm_features();
77 if (plat_tpm_feat == NULL) {
78 return INTERNAL_ERROR;
79 }
80
81 plat_dma_prot_feat = plat_drtm_get_dma_prot_features();
82 if (plat_dma_prot_feat == NULL) {
83 return INTERNAL_ERROR;
84 }
85
86 /*
87 * Add up minimum DLME data memory.
88 *
89 * For systems with complete DMA protection there is only one entry in
90 * the protected regions table.
91 */
92 if (plat_dma_prot_feat->dma_protection_support ==
93 ARM_DRTM_DMA_PROT_FEATURES_DMA_SUPPORT_COMPLETE) {
94 dlme_data_min_size =
95 sizeof(drtm_memory_region_descriptor_table_t) +
96 sizeof(drtm_mem_region_t);
97 dlme_data_hdr_init.dlme_prot_regions_size = dlme_data_min_size;
98 } else {
99 /*
100 * TODO set protected regions table size based on platform DMA
101 * protection configuration
102 */
103 panic();
104 }
105
106 dlme_data_hdr_init.dlme_addr_map_size = drtm_get_address_map_size();
107 dlme_data_hdr_init.dlme_tcb_hashes_table_size =
108 plat_drtm_get_tcb_hash_table_size();
109 dlme_data_hdr_init.dlme_acpi_tables_region_size =
110 plat_drtm_get_acpi_tables_region_size();
111 dlme_data_hdr_init.dlme_impdef_region_size =
112 plat_drtm_get_imp_def_dlme_region_size();
113
114 dlme_data_min_size += sizeof(struct_dlme_data_header) +
115 dlme_data_hdr_init.dlme_addr_map_size +
116 ARM_DRTM_MIN_EVENT_LOG_SIZE +
117 dlme_data_hdr_init.dlme_tcb_hashes_table_size +
118 dlme_data_hdr_init.dlme_acpi_tables_region_size +
119 dlme_data_hdr_init.dlme_impdef_region_size;
120
121 /* Fill out platform DRTM features structure */
122 /* Only support default PCR schema (0x1) in this implementation. */
123 ARM_DRTM_TPM_FEATURES_SET_PCR_SCHEMA(plat_drtm_features.tpm_features,
124 ARM_DRTM_TPM_FEATURES_PCR_SCHEMA_DEFAULT);
125 ARM_DRTM_TPM_FEATURES_SET_TPM_HASH(plat_drtm_features.tpm_features,
126 plat_tpm_feat->tpm_based_hash_support);
127 ARM_DRTM_TPM_FEATURES_SET_FW_HASH(plat_drtm_features.tpm_features,
128 plat_tpm_feat->firmware_hash_algorithm);
129 ARM_DRTM_MIN_MEM_REQ_SET_MIN_DLME_DATA_SIZE(plat_drtm_features.minimum_memory_requirement,
130 page_align(dlme_data_min_size, UP)/PAGE_SIZE);
131 ARM_DRTM_MIN_MEM_REQ_SET_DCE_SIZE(plat_drtm_features.minimum_memory_requirement,
132 plat_drtm_get_min_size_normal_world_dce());
133 ARM_DRTM_DMA_PROT_FEATURES_SET_MAX_REGIONS(plat_drtm_features.dma_prot_features,
134 plat_dma_prot_feat->max_num_mem_prot_regions);
135 ARM_DRTM_DMA_PROT_FEATURES_SET_DMA_SUPPORT(plat_drtm_features.dma_prot_features,
136 plat_dma_prot_feat->dma_protection_support);
137 ARM_DRTM_TCB_HASH_FEATURES_SET_MAX_NUM_HASHES(plat_drtm_features.tcb_hash_features,
138 plat_drtm_get_tcb_hash_features());
139 ARM_DRTM_DLME_IMG_AUTH_SUPPORT(plat_drtm_features.dlme_image_auth_features,
140 plat_drtm_get_dlme_img_auth_features());
141 dlme_img_auth_supported =
142 ((plat_drtm_features.dlme_image_auth_features &
143 (ARM_DRTM_DLME_IMAGE_AUTH_SUPPORT_MASK <<
144 ARM_DRTM_DLME_IMAGE_AUTH_SUPPORT_SHIFT)) != 0ULL);
145
146 return 0;
147 }
148
invalidate_icache_all(void)149 static inline void invalidate_icache_all(void)
150 {
151 __asm__ volatile("ic ialluis");
152 dsb();
153 isb();
154 }
155
drtm_features_tpm(void * ctx)156 static inline uint64_t drtm_features_tpm(void *ctx)
157 {
158 SMC_RET2(ctx, 1ULL, /* TPM feature is supported */
159 plat_drtm_features.tpm_features);
160 }
161
drtm_features_mem_req(void * ctx)162 static inline uint64_t drtm_features_mem_req(void *ctx)
163 {
164 SMC_RET2(ctx, 1ULL, /* memory req Feature is supported */
165 plat_drtm_features.minimum_memory_requirement);
166 }
167
drtm_features_boot_pe_id(void * ctx)168 static inline uint64_t drtm_features_boot_pe_id(void *ctx)
169 {
170 SMC_RET2(ctx, 1ULL, /* Boot PE feature is supported */
171 plat_drtm_features.boot_pe_id);
172 }
173
drtm_features_dma_prot(void * ctx)174 static inline uint64_t drtm_features_dma_prot(void *ctx)
175 {
176 SMC_RET2(ctx, 1ULL, /* DMA protection feature is supported */
177 plat_drtm_features.dma_prot_features);
178 }
179
drtm_features_tcb_hashes(void * ctx)180 static inline uint64_t drtm_features_tcb_hashes(void *ctx)
181 {
182 SMC_RET2(ctx, 1ULL, /* TCB hash feature is supported */
183 plat_drtm_features.tcb_hash_features);
184 }
185
drtm_features_dlme_img_auth_features(void * ctx)186 static inline uint64_t drtm_features_dlme_img_auth_features(void *ctx)
187 {
188 SMC_RET2(ctx, 1ULL, /* DLME Image auth is supported */
189 plat_drtm_features.dlme_image_auth_features);
190 }
191
drtm_dl_check_caller_el(void * ctx)192 static enum drtm_retc drtm_dl_check_caller_el(void *ctx)
193 {
194 uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
195 uint64_t dl_caller_el;
196 uint64_t dl_caller_aarch;
197
198 dl_caller_el = spsr_el3 >> MODE_EL_SHIFT & MODE_EL_MASK;
199 dl_caller_aarch = spsr_el3 >> MODE_RW_SHIFT & MODE_RW_MASK;
200
201 /* Caller's security state is checked from drtm_smc_handle function */
202
203 /* Caller can be NS-EL2/EL1 */
204 if (dl_caller_el == MODE_EL3) {
205 ERROR("DRTM: invalid launch from EL3\n");
206 return DENIED;
207 }
208
209 if (dl_caller_aarch != MODE_RW_64) {
210 ERROR("DRTM: invalid launch from non-AArch64 execution state\n");
211 return DENIED;
212 }
213
214 return SUCCESS;
215 }
216
drtm_dl_check_cores(void)217 static enum drtm_retc drtm_dl_check_cores(void)
218 {
219 bool running_on_single_core;
220 uint64_t this_pe_aff_value = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
221
222 if (this_pe_aff_value != plat_drtm_features.boot_pe_id) {
223 ERROR("DRTM: invalid launch on a non-boot PE\n");
224 return DENIED;
225 }
226
227 running_on_single_core = psci_is_last_on_cpu_safe(plat_my_core_pos());
228 if (!running_on_single_core) {
229 ERROR("DRTM: invalid launch due to non-boot PE not being turned off\n");
230 return SECONDARY_PE_NOT_OFF;
231 }
232
233 return SUCCESS;
234 }
235
drtm_dl_prepare_dlme_data(const struct_drtm_dl_args * args)236 static enum drtm_retc drtm_dl_prepare_dlme_data(const struct_drtm_dl_args *args)
237 {
238 int rc;
239 uint64_t dlme_data_paddr;
240 size_t dlme_data_max_size;
241 uintptr_t dlme_data_mapping;
242 struct_dlme_data_header *dlme_data_hdr;
243 uint8_t *dlme_data_cursor;
244 size_t dlme_data_mapping_bytes;
245 size_t serialised_bytes_actual;
246
247 dlme_data_paddr = args->dlme_paddr + args->dlme_data_off;
248 dlme_data_max_size = args->dlme_size - args->dlme_data_off;
249
250 /*
251 * The capacity of the given DLME data region is checked when
252 * the other dynamic launch arguments are.
253 */
254 if (dlme_data_max_size < dlme_data_min_size) {
255 ERROR("%s: assertion failed:"
256 " dlme_data_max_size (%ld) < dlme_data_min_size (%ld)\n",
257 __func__, dlme_data_max_size, dlme_data_min_size);
258 panic();
259 }
260
261 /* Map the DLME data region as NS memory. */
262 dlme_data_mapping_bytes = ALIGNED_UP(dlme_data_max_size, DRTM_PAGE_SIZE);
263 rc = mmap_add_dynamic_region_alloc_va(dlme_data_paddr,
264 &dlme_data_mapping,
265 dlme_data_mapping_bytes,
266 MT_RW_DATA | MT_NS |
267 MT_SHAREABILITY_ISH);
268 if (rc != 0) {
269 WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
270 __func__, rc);
271 return INTERNAL_ERROR;
272 }
273 dlme_data_hdr = (struct_dlme_data_header *)dlme_data_mapping;
274 dlme_data_cursor = (uint8_t *)dlme_data_hdr + sizeof(*dlme_data_hdr);
275
276 memcpy(dlme_data_hdr, (const void *)&dlme_data_hdr_init,
277 sizeof(*dlme_data_hdr));
278
279 /* Set the header version and size. */
280 dlme_data_hdr->version = 1;
281 dlme_data_hdr->this_hdr_size = sizeof(*dlme_data_hdr);
282
283 /* Prepare DLME protected regions. */
284 drtm_dma_prot_serialise_table(dlme_data_cursor,
285 &serialised_bytes_actual);
286 assert(serialised_bytes_actual ==
287 dlme_data_hdr->dlme_prot_regions_size);
288 dlme_data_cursor += serialised_bytes_actual;
289
290 /* Prepare DLME address map. */
291 if (plat_drtm_mem_map != NULL) {
292 memcpy(dlme_data_cursor, plat_drtm_mem_map,
293 dlme_data_hdr->dlme_addr_map_size);
294 } else {
295 WARN("DRTM: DLME address map is not in the cache\n");
296 }
297 dlme_data_cursor += dlme_data_hdr->dlme_addr_map_size;
298
299 /* Prepare DRTM event log for DLME. */
300 drtm_serialise_event_log(dlme_data_cursor, &serialised_bytes_actual);
301 assert(serialised_bytes_actual <= ARM_DRTM_MIN_EVENT_LOG_SIZE);
302 dlme_data_hdr->dlme_tpm_log_size = serialised_bytes_actual;
303 dlme_data_cursor += serialised_bytes_actual;
304
305 /*
306 * TODO: Prepare the TCB hashes for DLME, currently its size
307 * 0
308 */
309 dlme_data_cursor += dlme_data_hdr->dlme_tcb_hashes_table_size;
310
311 /* Implementation-specific region size is unused. */
312 dlme_data_cursor += dlme_data_hdr->dlme_impdef_region_size;
313
314 /*
315 * Prepare DLME data size, includes all data region referenced above
316 * alongwith the DLME data header
317 */
318 dlme_data_hdr->dlme_data_size = dlme_data_cursor - (uint8_t *)dlme_data_hdr;
319
320 /* Unmap the DLME data region. */
321 rc = mmap_remove_dynamic_region(dlme_data_mapping, dlme_data_mapping_bytes);
322 if (rc != 0) {
323 ERROR("%s(): mmap_remove_dynamic_region() failed"
324 " unexpectedly rc=%d\n", __func__, rc);
325 panic();
326 }
327
328 return SUCCESS;
329 }
330
331 /* Function to check if the value is valid for each bit field */
drtm_dl_check_features_sanity(uint32_t val)332 static int drtm_dl_check_features_sanity(uint32_t val)
333 {
334 /**
335 * Ensure that if DLME Authorities Schema (Bits [2:1]) is set, then
336 * DLME image authentication (Bit[6]) must also be set
337 */
338 if ((EXTRACT(DRTM_LAUNCH_FEAT_PCR_USAGE_SCHEMA, val) == DLME_AUTH_SCHEMA) &&
339 (EXTRACT(DRTM_LAUNCH_FEAT_DLME_IMG_AUTH, val) != DLME_IMG_AUTH)) {
340 return INVALID_PARAMETERS;
341 }
342
343 /**
344 * Check if DLME image authentication (Bit[6]) is supported by platform.
345 */
346 if (EXTRACT(DRTM_LAUNCH_FEAT_DLME_IMG_AUTH, val) == DLME_IMG_AUTH) {
347 if (!dlme_img_auth_supported) {
348 return INVALID_PARAMETERS;
349 }
350 }
351
352 /**
353 * Check if Bits [5:3] (Memory protection type) matches with platform's
354 * memory protection type
355 */
356 if (EXTRACT(DRTM_LAUNCH_FEAT_MEM_PROTECTION_TYPE, val) !=
357 __builtin_ctz(plat_dma_prot_feat->dma_protection_support)) {
358 return INVALID_PARAMETERS;
359 }
360
361 /**
362 * Check if Bits [0] (Type of hashing) matches with platform's
363 * supported hash type.
364 */
365 if (EXTRACT(DRTM_LAUNCH_FEAT_HASHING_TYPE, val) !=
366 plat_tpm_feat->tpm_based_hash_support) {
367 return INVALID_PARAMETERS;
368 }
369
370 return 0;
371 }
372
373 /*
374 * Note: accesses to the dynamic launch args, and to the DLME data are
375 * little-endian as required, thanks to TF-A BL31 init requirements.
376 */
drtm_dl_check_args(uint64_t x1,struct_drtm_dl_args * a_out)377 static enum drtm_retc drtm_dl_check_args(uint64_t x1,
378 struct_drtm_dl_args *a_out)
379 {
380 uint64_t dlme_start, dlme_end;
381 uint64_t dlme_img_start, dlme_img_ep, dlme_img_end;
382 uint64_t dlme_data_start, dlme_data_end;
383 uintptr_t va_mapping;
384 size_t va_mapping_size;
385 struct_drtm_dl_args *a;
386 struct_drtm_dl_args args_buf;
387 int rc;
388
389 if (x1 % DRTM_PAGE_SIZE != 0) {
390 ERROR("DRTM: parameters structure is not "
391 DRTM_PAGE_SIZE_STR "-aligned\n");
392 return INVALID_PARAMETERS;
393 }
394
395 va_mapping_size = ALIGNED_UP(sizeof(struct_drtm_dl_args), DRTM_PAGE_SIZE);
396
397 /* check DRTM parameters are within NS address region */
398 rc = plat_drtm_validate_ns_region(x1, va_mapping_size);
399 if (rc != 0) {
400 ERROR("DRTM: parameters lies within secure memory\n");
401 return INVALID_PARAMETERS;
402 }
403
404 rc = mmap_add_dynamic_region_alloc_va(x1, &va_mapping, va_mapping_size,
405 MT_NS | MT_RO_DATA |
406 MT_SHAREABILITY_ISH);
407 if (rc != 0) {
408 WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
409 __func__, rc);
410 return INTERNAL_ERROR;
411 }
412 a = (struct_drtm_dl_args *)va_mapping;
413
414 /* Sanitize cache of data passed in args by the DCE Preamble. */
415 flush_dcache_range(va_mapping, va_mapping_size);
416
417 args_buf = *a;
418
419 rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size);
420 if (rc != 0) {
421 ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
422 " rc=%d\n", __func__, rc);
423 panic();
424 }
425 a = &args_buf;
426
427 if (!((a->version >= ARM_DRTM_PARAMS_MIN_VERSION) &&
428 (a->version <= ARM_DRTM_PARAMS_MAX_VERSION))) {
429 ERROR("DRTM: parameters structure version %u is unsupported\n",
430 a->version);
431 return NOT_SUPPORTED;
432 }
433
434 rc = drtm_dl_check_features_sanity(a->features);
435 if (rc != 0) {
436 ERROR("%s(): drtm_dl_check_features_sanity() failed.\n"
437 " rc=%d\n", __func__, rc);
438 return rc;
439 }
440
441 if (!(a->dlme_img_off < a->dlme_size &&
442 a->dlme_data_off < a->dlme_size)) {
443 ERROR("DRTM: argument offset is outside of the DLME region\n");
444 return INVALID_PARAMETERS;
445 }
446 dlme_start = a->dlme_paddr;
447 dlme_end = a->dlme_paddr + a->dlme_size;
448 dlme_img_start = a->dlme_paddr + a->dlme_img_off;
449 dlme_img_ep = dlme_img_start + a->dlme_img_ep_off;
450 dlme_img_end = dlme_img_start + a->dlme_img_size;
451 dlme_data_start = a->dlme_paddr + a->dlme_data_off;
452 dlme_data_end = dlme_end;
453
454 /* Check the DLME regions arguments. */
455 if ((dlme_start % DRTM_PAGE_SIZE) != 0) {
456 ERROR("DRTM: argument DLME region is not "
457 DRTM_PAGE_SIZE_STR "-aligned\n");
458 return INVALID_PARAMETERS;
459 }
460
461 if (!(dlme_start < dlme_end &&
462 dlme_start <= dlme_img_start && dlme_img_start < dlme_img_end &&
463 dlme_start <= dlme_data_start && dlme_data_start < dlme_data_end)) {
464 ERROR("DRTM: argument DLME region is discontiguous\n");
465 return INVALID_PARAMETERS;
466 }
467
468 if (dlme_img_start < dlme_data_end && dlme_data_start < dlme_img_end) {
469 ERROR("DRTM: argument DLME regions overlap\n");
470 return INVALID_PARAMETERS;
471 }
472
473 /* Check the DLME image region arguments. */
474 if ((dlme_img_start % DRTM_PAGE_SIZE) != 0) {
475 ERROR("DRTM: argument DLME image region is not "
476 DRTM_PAGE_SIZE_STR "-aligned\n");
477 return INVALID_PARAMETERS;
478 }
479
480 if (!(dlme_img_start <= dlme_img_ep && dlme_img_ep < dlme_img_end)) {
481 ERROR("DRTM: DLME entry point is outside of the DLME image region\n");
482 return INVALID_PARAMETERS;
483 }
484
485 if ((dlme_img_ep % 4) != 0) {
486 ERROR("DRTM: DLME image entry point is not 4-byte-aligned\n");
487 return INVALID_PARAMETERS;
488 }
489
490 /* Check the DLME data region arguments. */
491 if ((dlme_data_start % DRTM_PAGE_SIZE) != 0) {
492 ERROR("DRTM: argument DLME data region is not "
493 DRTM_PAGE_SIZE_STR "-aligned\n");
494 return INVALID_PARAMETERS;
495 }
496
497 if (dlme_data_end - dlme_data_start < dlme_data_min_size) {
498 ERROR("DRTM: argument DLME data region is short of %lu bytes\n",
499 dlme_data_min_size - (size_t)(dlme_data_end - dlme_data_start));
500 return INVALID_PARAMETERS;
501 }
502
503 /* check DLME region (paddr + size) is within a NS address region */
504 rc = plat_drtm_validate_ns_region(dlme_start, (size_t)a->dlme_size);
505 if (rc != 0) {
506 ERROR("DRTM: DLME region lies within secure memory\n");
507 return INVALID_PARAMETERS;
508 }
509
510 /* Check the Normal World DCE region arguments. */
511 if (a->dce_nwd_paddr != 0) {
512 uint64_t dce_nwd_start = a->dce_nwd_paddr;
513 uint64_t dce_nwd_size = a->dce_nwd_size;
514 uint64_t dce_nwd_end;
515
516 if (check_u64_overflow(dce_nwd_start, dce_nwd_size)) {
517 ERROR("DRTM: argument Normal World DCE region overflows\n");
518 return INVALID_PARAMETERS;
519 }
520
521 dce_nwd_end = dce_nwd_start + dce_nwd_size;
522
523 if (!(dce_nwd_start < dce_nwd_end)) {
524 ERROR("DRTM: argument Normal World DCE region is dicontiguous\n");
525 return INVALID_PARAMETERS;
526 }
527
528 if (dce_nwd_start < dlme_end && dlme_start < dce_nwd_end) {
529 ERROR("DRTM: argument Normal World DCE regions overlap\n");
530 return INVALID_PARAMETERS;
531 }
532 }
533
534 /*
535 * Map and sanitize the cache of data range passed by DCE Preamble. This
536 * is required to avoid / defend against racing with cache evictions
537 */
538 va_mapping_size = ALIGNED_UP((dlme_end - dlme_start), DRTM_PAGE_SIZE);
539 rc = mmap_add_dynamic_region_alloc_va(dlme_start, &va_mapping, va_mapping_size,
540 MT_NS | MT_RO_DATA |
541 MT_SHAREABILITY_ISH);
542 if (rc != 0) {
543 ERROR("DRTM: %s: mmap_add_dynamic_region_alloc_va() failed rc=%d\n",
544 __func__, rc);
545 return INTERNAL_ERROR;
546 }
547 flush_dcache_range(va_mapping, va_mapping_size);
548
549 rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size);
550 if (rc) {
551 ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
552 " rc=%d\n", __func__, rc);
553 panic();
554 }
555
556 *a_out = *a;
557 return SUCCESS;
558 }
559
drtm_dl_reset_dlme_el_state(enum drtm_dlme_el dlme_el)560 static void drtm_dl_reset_dlme_el_state(enum drtm_dlme_el dlme_el)
561 {
562 uint64_t sctlr;
563
564 switch (dlme_el) {
565 case DLME_AT_EL1:
566 sctlr = read_sctlr_el1();
567 break;
568
569 case DLME_AT_EL2:
570 sctlr = read_sctlr_el2();
571 break;
572
573 default: /* Not reached */
574 ERROR("%s(): dlme_el has the unexpected value %d\n",
575 __func__, dlme_el);
576 panic();
577 }
578
579 sctlr &= ~(/* Disable DLME's EL MMU, since the existing page-tables are untrusted. */
580 SCTLR_M_BIT
581 | SCTLR_EE_BIT /* Little-endian data accesses. */
582 | SCTLR_C_BIT /* disable data caching */
583 | SCTLR_I_BIT /* disable instruction caching */
584 );
585
586 switch (dlme_el) {
587 case DLME_AT_EL1:
588 write_sctlr_el1(sctlr);
589 break;
590
591 case DLME_AT_EL2:
592 write_sctlr_el2(sctlr);
593 break;
594 }
595 }
596
drtm_dl_reset_dlme_context(enum drtm_dlme_el dlme_el)597 static void drtm_dl_reset_dlme_context(enum drtm_dlme_el dlme_el)
598 {
599 void *ns_ctx = cm_get_context(NON_SECURE);
600 gp_regs_t *gpregs = get_gpregs_ctx(ns_ctx);
601 uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3);
602
603 /* Reset all gpregs, including SP_EL0. */
604 memset(gpregs, 0, sizeof(*gpregs));
605
606 /* Reset SP_ELx. */
607 switch (dlme_el) {
608 case DLME_AT_EL1:
609 write_sp_el1(0);
610 break;
611
612 case DLME_AT_EL2:
613 write_sp_el2(0);
614 break;
615 }
616
617 /*
618 * DLME's async exceptions are masked to avoid a NWd attacker's timed
619 * interference with any state we established trust in or measured.
620 */
621 spsr_el3 |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
622
623 write_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3, spsr_el3);
624 }
625
drtm_dl_prepare_eret_to_dlme(const struct_drtm_dl_args * args,enum drtm_dlme_el dlme_el)626 static void drtm_dl_prepare_eret_to_dlme(const struct_drtm_dl_args *args, enum drtm_dlme_el dlme_el)
627 {
628 void *ctx = cm_get_context(NON_SECURE);
629 uint64_t dlme_ep = DL_ARGS_GET_DLME_ENTRY_POINT(args);
630 uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
631
632 /* Next ERET is to the DLME's EL. */
633 spsr_el3 &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
634 switch (dlme_el) {
635 case DLME_AT_EL1:
636 spsr_el3 |= MODE_EL1 << MODE_EL_SHIFT;
637 break;
638
639 case DLME_AT_EL2:
640 spsr_el3 |= MODE_EL2 << MODE_EL_SHIFT;
641 break;
642 }
643
644 /* Next ERET is to the DLME entry point. */
645 cm_set_elr_spsr_el3(NON_SECURE, dlme_ep, spsr_el3);
646 }
647
drtm_dynamic_launch(uint64_t x1,void * handle)648 static uint64_t drtm_dynamic_launch(uint64_t x1, void *handle)
649 {
650 enum drtm_retc ret = SUCCESS;
651 enum drtm_retc dma_prot_ret;
652 struct_drtm_dl_args args;
653 /* DLME should be highest NS exception level */
654 enum drtm_dlme_el dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
655
656 /* Ensure that only boot PE is powered on */
657 ret = drtm_dl_check_cores();
658 if (ret != SUCCESS) {
659 SMC_RET1(handle, ret);
660 }
661
662 /*
663 * Ensure that execution state is AArch64 and the caller
664 * is highest non-secure exception level
665 */
666 ret = drtm_dl_check_caller_el(handle);
667 if (ret != SUCCESS) {
668 SMC_RET1(handle, ret);
669 }
670
671 ret = drtm_dl_check_args(x1, &args);
672 if (ret != SUCCESS) {
673 SMC_RET1(handle, ret);
674 }
675
676 /* Ensure that there are no SDEI event registered */
677 #if SDEI_SUPPORT
678 if (sdei_get_registered_event_count() != 0) {
679 SMC_RET1(handle, DENIED);
680 }
681 #endif /* SDEI_SUPPORT */
682
683 /*
684 * Engage the DMA protections. The launch cannot proceed without the DMA
685 * protections due to potential TOC/TOU vulnerabilities w.r.t. the DLME
686 * region (and to the NWd DCE region).
687 */
688 ret = drtm_dma_prot_engage(&args.dma_prot_args,
689 DL_ARGS_GET_DMA_PROT_TYPE(&args));
690 if (ret != SUCCESS) {
691 SMC_RET1(handle, ret);
692 }
693
694 /*
695 * The DMA protection is now engaged. Note that any failure mode that
696 * returns an error to the DRTM-launch caller must now disengage DMA
697 * protections before returning to the caller.
698 */
699
700 ret = drtm_take_measurements(&args);
701 if (ret != SUCCESS) {
702 goto err_undo_dma_prot;
703 }
704
705 ret = drtm_dl_prepare_dlme_data(&args);
706 if (ret != SUCCESS) {
707 goto err_undo_dma_prot;
708 }
709
710 /*
711 * Note that, at the time of writing, the DRTM spec allows a successful
712 * launch from NS-EL1 to return to a DLME in NS-EL2. The practical risk
713 * of a privilege escalation, e.g. due to a compromised hypervisor, is
714 * considered small enough not to warrant the specification of additional
715 * DRTM conduits that would be necessary to maintain OSs' abstraction from
716 * the presence of EL2 were the dynamic launch only be allowed from the
717 * highest NS EL.
718 */
719
720 dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
721
722 drtm_dl_reset_dlme_el_state(dlme_el);
723 drtm_dl_reset_dlme_context(dlme_el);
724
725 /*
726 * Setting the Generic Timer frequency is required before launching
727 * DLME and is already done for running CPU during PSCI setup.
728 */
729 drtm_dl_prepare_eret_to_dlme(&args, dlme_el);
730
731 /*
732 * As per DRTM 1.0 spec table #30 invalidate the instruction cache
733 * before jumping to the DLME. This is required to defend against
734 * potentially-malicious cache contents.
735 */
736 invalidate_icache_all();
737
738 /* Return the DLME region's address in x0, and the DLME data offset in x1.*/
739 SMC_RET2(handle, args.dlme_paddr, args.dlme_data_off);
740
741 err_undo_dma_prot:
742 dma_prot_ret = drtm_dma_prot_disengage();
743 if (dma_prot_ret != SUCCESS) {
744 ERROR("%s(): drtm_dma_prot_disengage() failed unexpectedly"
745 " rc=%d\n", __func__, ret);
746 panic();
747 }
748
749 SMC_RET1(handle, ret);
750 }
751
drtm_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)752 uint64_t drtm_smc_handler(uint32_t smc_fid,
753 uint64_t x1,
754 uint64_t x2,
755 uint64_t x3,
756 uint64_t x4,
757 void *cookie,
758 void *handle,
759 uint64_t flags)
760 {
761 /* Check that the SMC call is from the Normal World. */
762 if (!is_caller_non_secure(flags)) {
763 SMC_RET1(handle, NOT_SUPPORTED);
764 }
765
766 switch (smc_fid) {
767 case ARM_DRTM_SVC_VERSION:
768 INFO("DRTM service handler: version\n");
769 /* Return the version of current implementation */
770 SMC_RET1(handle, ARM_DRTM_VERSION);
771 break; /* not reached */
772
773 case ARM_DRTM_SVC_FEATURES:
774 if (((x1 >> ARM_DRTM_FUNC_SHIFT) & ARM_DRTM_FUNC_MASK) ==
775 ARM_DRTM_FUNC_ID) {
776 /* Dispatch function-based queries. */
777 switch (x1 & FUNCID_MASK) {
778 case ARM_DRTM_SVC_VERSION:
779 SMC_RET1(handle, SUCCESS);
780 break; /* not reached */
781
782 case ARM_DRTM_SVC_FEATURES:
783 SMC_RET1(handle, SUCCESS);
784 break; /* not reached */
785
786 case ARM_DRTM_SVC_UNPROTECT_MEM:
787 SMC_RET1(handle, SUCCESS);
788 break; /* not reached */
789
790 case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
791 SMC_RET1(handle, SUCCESS);
792 break; /* not reached */
793
794 case ARM_DRTM_SVC_CLOSE_LOCALITY:
795 WARN("ARM_DRTM_SVC_CLOSE_LOCALITY feature %s",
796 "is not supported\n");
797 SMC_RET1(handle, NOT_SUPPORTED);
798 break; /* not reached */
799
800 case ARM_DRTM_SVC_GET_ERROR:
801 SMC_RET1(handle, SUCCESS);
802 break; /* not reached */
803
804 case ARM_DRTM_SVC_SET_ERROR:
805 SMC_RET1(handle, SUCCESS);
806 break; /* not reached */
807
808 case ARM_DRTM_SVC_SET_TCB_HASH:
809 WARN("ARM_DRTM_SVC_TCB_HASH feature %s",
810 "is not supported\n");
811 SMC_RET1(handle, NOT_SUPPORTED);
812 break; /* not reached */
813
814 case ARM_DRTM_SVC_LOCK_TCB_HASH:
815 WARN("ARM_DRTM_SVC_LOCK_TCB_HASH feature %s",
816 "is not supported\n");
817 SMC_RET1(handle, NOT_SUPPORTED);
818 break; /* not reached */
819
820 default:
821 ERROR("Unknown DRTM service function\n");
822 SMC_RET1(handle, NOT_SUPPORTED);
823 break; /* not reached */
824 }
825 } else {
826 /* Dispatch feature-based queries. */
827 switch (x1 & ARM_DRTM_FEAT_ID_MASK) {
828 case ARM_DRTM_FEATURES_TPM:
829 INFO("++ DRTM service handler: TPM features\n");
830 return drtm_features_tpm(handle);
831 break; /* not reached */
832
833 case ARM_DRTM_FEATURES_MEM_REQ:
834 INFO("++ DRTM service handler: Min. mem."
835 " requirement features\n");
836 return drtm_features_mem_req(handle);
837 break; /* not reached */
838
839 case ARM_DRTM_FEATURES_DMA_PROT:
840 INFO("++ DRTM service handler: "
841 "DMA protection features\n");
842 return drtm_features_dma_prot(handle);
843 break; /* not reached */
844
845 case ARM_DRTM_FEATURES_BOOT_PE_ID:
846 INFO("++ DRTM service handler: "
847 "Boot PE ID features\n");
848 return drtm_features_boot_pe_id(handle);
849 break; /* not reached */
850
851 case ARM_DRTM_FEATURES_TCB_HASHES:
852 INFO("++ DRTM service handler: "
853 "TCB-hashes features\n");
854 return drtm_features_tcb_hashes(handle);
855 break; /* not reached */
856
857 case ARM_DRTM_FEATURES_DLME_IMG_AUTH:
858 INFO("++ DRTM service handler: "
859 "DLME Image authentication features\n");
860 return drtm_features_dlme_img_auth_features(handle);
861 break; /* not reached */
862
863 default:
864 ERROR("Unknown ARM DRTM service feature\n");
865 SMC_RET1(handle, NOT_SUPPORTED);
866 break; /* not reached */
867 }
868 }
869
870 case ARM_DRTM_SVC_UNPROTECT_MEM:
871 INFO("DRTM service handler: unprotect mem\n");
872 return drtm_unprotect_mem(handle);
873 break; /* not reached */
874
875 case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
876 INFO("DRTM service handler: dynamic launch\n");
877 return drtm_dynamic_launch(x1, handle);
878 break; /* not reached */
879
880 case ARM_DRTM_SVC_CLOSE_LOCALITY:
881 WARN("DRTM service handler: close locality %s\n",
882 "is not supported");
883 SMC_RET1(handle, NOT_SUPPORTED);
884 break; /* not reached */
885
886 case ARM_DRTM_SVC_GET_ERROR:
887 INFO("DRTM service handler: get error\n");
888 return drtm_get_error(handle);
889 break; /* not reached */
890
891 case ARM_DRTM_SVC_SET_ERROR:
892 INFO("DRTM service handler: set error\n");
893 return drtm_set_error(x1, handle);
894 break; /* not reached */
895
896 case ARM_DRTM_SVC_SET_TCB_HASH:
897 WARN("DRTM service handler: set TCB hash %s\n",
898 "is not supported");
899 SMC_RET1(handle, NOT_SUPPORTED);
900 break; /* not reached */
901
902 case ARM_DRTM_SVC_LOCK_TCB_HASH:
903 WARN("DRTM service handler: lock TCB hash %s\n",
904 "is not supported");
905 SMC_RET1(handle, NOT_SUPPORTED);
906 break; /* not reached */
907
908 default:
909 ERROR("Unknown DRTM service function: 0x%x\n", smc_fid);
910 SMC_RET1(handle, SMC_UNK);
911 break; /* not reached */
912 }
913
914 /* not reached */
915 SMC_RET1(handle, SMC_UNK);
916 }
917