xref: /rk3399_ARM-atf/services/std_svc/drtm/drtm_main.c (revision d1747e1b8e617ad024456791ce0ab8950bb282ca)
1 /*
2  * Copyright (c) 2022 Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier:    BSD-3-Clause
5  *
6  * DRTM service
7  *
8  * Authors:
9  *	Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
10  *	Brian Nezvadovitz <brinez@microsoft.com> 2021-02-01
11  */
12 
13 #include <stdint.h>
14 
15 #include <arch.h>
16 #include <arch_helpers.h>
17 #include <common/bl_common.h>
18 #include <common/debug.h>
19 #include <common/runtime_svc.h>
20 #include <drivers/auth/crypto_mod.h>
21 #include "drtm_main.h"
22 #include "drtm_measurements.h"
23 #include "drtm_remediation.h"
24 #include <lib/el3_runtime/context_mgmt.h>
25 #include <lib/psci/psci_lib.h>
26 #include <lib/xlat_tables/xlat_tables_v2.h>
27 #include <plat/common/platform.h>
28 #include <services/drtm_svc.h>
29 #include <platform_def.h>
30 
31 /* Structure to store DRTM features specific to the platform. */
32 static drtm_features_t plat_drtm_features;
33 
34 /* DRTM-formatted memory map. */
35 static drtm_memory_region_descriptor_table_t *plat_drtm_mem_map;
36 
37 /* DLME header */
38 struct_dlme_data_header dlme_data_hdr_init;
39 
40 /* Minimum data memory requirement */
41 uint64_t dlme_data_min_size;
42 
43 int drtm_setup(void)
44 {
45 	bool rc;
46 	const plat_drtm_tpm_features_t *plat_tpm_feat;
47 	const plat_drtm_dma_prot_features_t *plat_dma_prot_feat;
48 
49 	INFO("DRTM service setup\n");
50 
51 	/* Read boot PE ID from MPIDR */
52 	plat_drtm_features.boot_pe_id = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
53 
54 	rc = drtm_dma_prot_init();
55 	if (rc) {
56 		return INTERNAL_ERROR;
57 	}
58 
59 	/*
60 	 * initialise the platform supported crypto module that will
61 	 * be used by the DRTM-service to calculate hash of DRTM-
62 	 * implementation specific components
63 	 */
64 	crypto_mod_init();
65 
66 	/* Build DRTM-compatible address map. */
67 	plat_drtm_mem_map = drtm_build_address_map();
68 	if (plat_drtm_mem_map == NULL) {
69 		return INTERNAL_ERROR;
70 	}
71 
72 	/* Get DRTM features from platform hooks. */
73 	plat_tpm_feat = plat_drtm_get_tpm_features();
74 	if (plat_tpm_feat == NULL) {
75 		return INTERNAL_ERROR;
76 	}
77 
78 	plat_dma_prot_feat = plat_drtm_get_dma_prot_features();
79 	if (plat_dma_prot_feat == NULL) {
80 		return INTERNAL_ERROR;
81 	}
82 
83 	/*
84 	 * Add up minimum DLME data memory.
85 	 *
86 	 * For systems with complete DMA protection there is only one entry in
87 	 * the protected regions table.
88 	 */
89 	if (plat_dma_prot_feat->dma_protection_support ==
90 			ARM_DRTM_DMA_PROT_FEATURES_DMA_SUPPORT_COMPLETE) {
91 		dlme_data_min_size =
92 			sizeof(drtm_memory_region_descriptor_table_t) +
93 			sizeof(drtm_mem_region_t);
94 		dlme_data_hdr_init.dlme_prot_regions_size = dlme_data_min_size;
95 	} else {
96 		/*
97 		 * TODO set protected regions table size based on platform DMA
98 		 * protection configuration
99 		 */
100 		panic();
101 	}
102 
103 	dlme_data_hdr_init.dlme_addr_map_size = drtm_get_address_map_size();
104 	dlme_data_hdr_init.dlme_tcb_hashes_table_size =
105 				plat_drtm_get_tcb_hash_table_size();
106 	dlme_data_hdr_init.dlme_impdef_region_size =
107 				plat_drtm_get_imp_def_dlme_region_size();
108 
109 	dlme_data_min_size += dlme_data_hdr_init.dlme_addr_map_size +
110 			      PLAT_DRTM_EVENT_LOG_MAX_SIZE +
111 			      dlme_data_hdr_init.dlme_tcb_hashes_table_size +
112 			      dlme_data_hdr_init.dlme_impdef_region_size;
113 
114 	dlme_data_min_size = page_align(dlme_data_min_size, UP)/PAGE_SIZE;
115 
116 	/* Fill out platform DRTM features structure */
117 	/* Only support default PCR schema (0x1) in this implementation. */
118 	ARM_DRTM_TPM_FEATURES_SET_PCR_SCHEMA(plat_drtm_features.tpm_features,
119 		ARM_DRTM_TPM_FEATURES_PCR_SCHEMA_DEFAULT);
120 	ARM_DRTM_TPM_FEATURES_SET_TPM_HASH(plat_drtm_features.tpm_features,
121 		plat_tpm_feat->tpm_based_hash_support);
122 	ARM_DRTM_TPM_FEATURES_SET_FW_HASH(plat_drtm_features.tpm_features,
123 		plat_tpm_feat->firmware_hash_algorithm);
124 	ARM_DRTM_MIN_MEM_REQ_SET_MIN_DLME_DATA_SIZE(plat_drtm_features.minimum_memory_requirement,
125 		dlme_data_min_size);
126 	ARM_DRTM_MIN_MEM_REQ_SET_DCE_SIZE(plat_drtm_features.minimum_memory_requirement,
127 		plat_drtm_get_min_size_normal_world_dce());
128 	ARM_DRTM_DMA_PROT_FEATURES_SET_MAX_REGIONS(plat_drtm_features.dma_prot_features,
129 		plat_dma_prot_feat->max_num_mem_prot_regions);
130 	ARM_DRTM_DMA_PROT_FEATURES_SET_DMA_SUPPORT(plat_drtm_features.dma_prot_features,
131 		plat_dma_prot_feat->dma_protection_support);
132 	ARM_DRTM_TCB_HASH_FEATURES_SET_MAX_NUM_HASHES(plat_drtm_features.tcb_hash_features,
133 		plat_drtm_get_tcb_hash_features());
134 
135 	return 0;
136 }
137 
138 static inline uint64_t drtm_features_tpm(void *ctx)
139 {
140 	SMC_RET2(ctx, 1ULL, /* TPM feature is supported */
141 		 plat_drtm_features.tpm_features);
142 }
143 
144 static inline uint64_t drtm_features_mem_req(void *ctx)
145 {
146 	SMC_RET2(ctx, 1ULL, /* memory req Feature is supported */
147 		 plat_drtm_features.minimum_memory_requirement);
148 }
149 
150 static inline uint64_t drtm_features_boot_pe_id(void *ctx)
151 {
152 	SMC_RET2(ctx, 1ULL, /* Boot PE feature is supported */
153 		 plat_drtm_features.boot_pe_id);
154 }
155 
156 static inline uint64_t drtm_features_dma_prot(void *ctx)
157 {
158 	SMC_RET2(ctx, 1ULL, /* DMA protection feature is supported */
159 		 plat_drtm_features.dma_prot_features);
160 }
161 
162 static inline uint64_t drtm_features_tcb_hashes(void *ctx)
163 {
164 	SMC_RET2(ctx, 1ULL, /* TCB hash feature is supported */
165 		 plat_drtm_features.tcb_hash_features);
166 }
167 
168 static enum drtm_retc drtm_dl_check_caller_el(void *ctx)
169 {
170 	uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
171 	uint64_t dl_caller_el;
172 	uint64_t dl_caller_aarch;
173 
174 	dl_caller_el = spsr_el3 >> MODE_EL_SHIFT & MODE_EL_MASK;
175 	dl_caller_aarch = spsr_el3 >> MODE_RW_SHIFT & MODE_RW_MASK;
176 
177 	/* Caller's security state is checked from drtm_smc_handle function */
178 
179 	/* Caller can be NS-EL2/EL1 */
180 	if (dl_caller_el == MODE_EL3) {
181 		ERROR("DRTM: invalid launch from EL3\n");
182 		return DENIED;
183 	}
184 
185 	if (dl_caller_aarch != MODE_RW_64) {
186 		ERROR("DRTM: invalid launch from non-AArch64 execution state\n");
187 		return DENIED;
188 	}
189 
190 	return SUCCESS;
191 }
192 
193 static enum drtm_retc drtm_dl_check_cores(void)
194 {
195 	bool running_on_single_core;
196 	uint64_t this_pe_aff_value = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
197 
198 	if (this_pe_aff_value != plat_drtm_features.boot_pe_id) {
199 		ERROR("DRTM: invalid launch on a non-boot PE\n");
200 		return DENIED;
201 	}
202 
203 	running_on_single_core = psci_is_last_on_cpu_safe();
204 	if (!running_on_single_core) {
205 		ERROR("DRTM: invalid launch due to non-boot PE not being turned off\n");
206 		return DENIED;
207 	}
208 
209 	return SUCCESS;
210 }
211 
212 static enum drtm_retc drtm_dl_prepare_dlme_data(const struct_drtm_dl_args *args)
213 {
214 	int rc;
215 	uint64_t dlme_data_paddr;
216 	size_t dlme_data_max_size;
217 	uintptr_t dlme_data_mapping;
218 	struct_dlme_data_header *dlme_data_hdr;
219 	uint8_t *dlme_data_cursor;
220 	size_t dlme_data_mapping_bytes;
221 	size_t serialised_bytes_actual;
222 
223 	dlme_data_paddr = args->dlme_paddr + args->dlme_data_off;
224 	dlme_data_max_size = args->dlme_size - args->dlme_data_off;
225 
226 	/*
227 	 * The capacity of the given DLME data region is checked when
228 	 * the other dynamic launch arguments are.
229 	 */
230 	if (dlme_data_max_size < dlme_data_min_size) {
231 		ERROR("%s: assertion failed:"
232 		      " dlme_data_max_size (%ld) < dlme_data_total_bytes_req (%ld)\n",
233 		      __func__, dlme_data_max_size, dlme_data_min_size);
234 		panic();
235 	}
236 
237 	/* Map the DLME data region as NS memory. */
238 	dlme_data_mapping_bytes = ALIGNED_UP(dlme_data_max_size, DRTM_PAGE_SIZE);
239 	rc = mmap_add_dynamic_region_alloc_va(dlme_data_paddr,
240 					      &dlme_data_mapping,
241 					      dlme_data_mapping_bytes,
242 					      MT_RW_DATA | MT_NS |
243 					      MT_SHAREABILITY_ISH);
244 	if (rc != 0) {
245 		WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
246 		     __func__, rc);
247 		return INTERNAL_ERROR;
248 	}
249 	dlme_data_hdr = (struct_dlme_data_header *)dlme_data_mapping;
250 	dlme_data_cursor = (uint8_t *)dlme_data_hdr + sizeof(*dlme_data_hdr);
251 
252 	memcpy(dlme_data_hdr, (const void *)&dlme_data_hdr_init,
253 	       sizeof(*dlme_data_hdr));
254 
255 	/* Set the header version and size. */
256 	dlme_data_hdr->version = 1;
257 	dlme_data_hdr->this_hdr_size = sizeof(*dlme_data_hdr);
258 
259 	/* Prepare DLME protected regions. */
260 	drtm_dma_prot_serialise_table(dlme_data_cursor,
261 				      &serialised_bytes_actual);
262 	assert(serialised_bytes_actual ==
263 	       dlme_data_hdr->dlme_prot_regions_size);
264 	dlme_data_cursor += serialised_bytes_actual;
265 
266 	/* Prepare DLME address map. */
267 	if (plat_drtm_mem_map != NULL) {
268 		memcpy(dlme_data_cursor, plat_drtm_mem_map,
269 		       dlme_data_hdr->dlme_addr_map_size);
270 	} else {
271 		WARN("DRTM: DLME address map is not in the cache\n");
272 	}
273 	dlme_data_cursor += dlme_data_hdr->dlme_addr_map_size;
274 
275 	/* Prepare DRTM event log for DLME. */
276 	drtm_serialise_event_log(dlme_data_cursor, &serialised_bytes_actual);
277 	assert(serialised_bytes_actual <= PLAT_DRTM_EVENT_LOG_MAX_SIZE);
278 	dlme_data_hdr->dlme_tpm_log_size = serialised_bytes_actual;
279 	dlme_data_cursor += serialised_bytes_actual;
280 
281 	/*
282 	 * TODO: Prepare the TCB hashes for DLME, currently its size
283 	 * 0
284 	 */
285 	dlme_data_cursor += dlme_data_hdr->dlme_tcb_hashes_table_size;
286 
287 	/* Implementation-specific region size is unused. */
288 	dlme_data_cursor += dlme_data_hdr->dlme_impdef_region_size;
289 
290 	/*
291 	 * Prepare DLME data size, includes all data region referenced above
292 	 * alongwith the DLME data header
293 	 */
294 	dlme_data_hdr->dlme_data_size = dlme_data_cursor - (uint8_t *)dlme_data_hdr;
295 
296 	/* Unmap the DLME data region. */
297 	rc = mmap_remove_dynamic_region(dlme_data_mapping, dlme_data_mapping_bytes);
298 	if (rc != 0) {
299 		ERROR("%s(): mmap_remove_dynamic_region() failed"
300 		      " unexpectedly rc=%d\n", __func__, rc);
301 		panic();
302 	}
303 
304 	return SUCCESS;
305 }
306 
307 /*
308  * Note: accesses to the dynamic launch args, and to the DLME data are
309  * little-endian as required, thanks to TF-A BL31 init requirements.
310  */
311 static enum drtm_retc drtm_dl_check_args(uint64_t x1,
312 					 struct_drtm_dl_args *a_out)
313 {
314 	uint64_t dlme_start, dlme_end;
315 	uint64_t dlme_img_start, dlme_img_ep, dlme_img_end;
316 	uint64_t dlme_data_start, dlme_data_end;
317 	uintptr_t args_mapping;
318 	size_t args_mapping_size;
319 	struct_drtm_dl_args *a;
320 	struct_drtm_dl_args args_buf;
321 	int rc;
322 
323 	if (x1 % DRTM_PAGE_SIZE != 0) {
324 		ERROR("DRTM: parameters structure is not "
325 		      DRTM_PAGE_SIZE_STR "-aligned\n");
326 		return INVALID_PARAMETERS;
327 	}
328 
329 	args_mapping_size = ALIGNED_UP(sizeof(struct_drtm_dl_args), DRTM_PAGE_SIZE);
330 	rc = mmap_add_dynamic_region_alloc_va(x1, &args_mapping, args_mapping_size,
331 					      MT_MEMORY | MT_NS | MT_RO |
332 					      MT_SHAREABILITY_ISH);
333 	if (rc != 0) {
334 		WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
335 		      __func__, rc);
336 		return INTERNAL_ERROR;
337 	}
338 	a = (struct_drtm_dl_args *)args_mapping;
339 	/*
340 	 * TODO: invalidate all data cache before reading the data passed by the
341 	 * DCE Preamble.  This is required to avoid / defend against racing with
342 	 * cache evictions.
343 	 */
344 	args_buf = *a;
345 
346 	rc = mmap_remove_dynamic_region(args_mapping, args_mapping_size);
347 	if (rc) {
348 		ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
349 		      " rc=%d\n", __func__, rc);
350 		panic();
351 	}
352 	a = &args_buf;
353 
354 	if (a->version != 1) {
355 		ERROR("DRTM: parameters structure incompatible with major version %d\n",
356 		      ARM_DRTM_VERSION_MAJOR);
357 		return NOT_SUPPORTED;
358 	}
359 
360 	if (!(a->dlme_img_off < a->dlme_size &&
361 	      a->dlme_data_off < a->dlme_size)) {
362 		ERROR("DRTM: argument offset is outside of the DLME region\n");
363 		return INVALID_PARAMETERS;
364 	}
365 	dlme_start = a->dlme_paddr;
366 	dlme_end = a->dlme_paddr + a->dlme_size;
367 	dlme_img_start = a->dlme_paddr + a->dlme_img_off;
368 	dlme_img_ep = dlme_img_start + a->dlme_img_ep_off;
369 	dlme_img_end = dlme_img_start + a->dlme_img_size;
370 	dlme_data_start = a->dlme_paddr + a->dlme_data_off;
371 	dlme_data_end = dlme_end;
372 
373 	/*
374 	 * TODO: validate that the DLME physical address range is all NS memory,
375 	 * return INVALID_PARAMETERS if it is not.
376 	 * Note that this check relies on platform-specific information. For
377 	 * examples, see psci_plat_pm_ops->validate_ns_entrypoint() or
378 	 * arm_validate_ns_entrypoint().
379 	 */
380 
381 	/* Check the DLME regions arguments. */
382 	if ((dlme_start % DRTM_PAGE_SIZE) != 0) {
383 		ERROR("DRTM: argument DLME region is not "
384 		      DRTM_PAGE_SIZE_STR "-aligned\n");
385 		return INVALID_PARAMETERS;
386 	}
387 
388 	if (!(dlme_start < dlme_end &&
389 	      dlme_start <= dlme_img_start && dlme_img_start < dlme_img_end &&
390 	      dlme_start <= dlme_data_start && dlme_data_start < dlme_data_end)) {
391 		ERROR("DRTM: argument DLME region is discontiguous\n");
392 		return INVALID_PARAMETERS;
393 	}
394 
395 	if (dlme_img_start < dlme_data_end && dlme_data_start < dlme_img_end) {
396 		ERROR("DRTM: argument DLME regions overlap\n");
397 		return INVALID_PARAMETERS;
398 	}
399 
400 	/* Check the DLME image region arguments. */
401 	if ((dlme_img_start % DRTM_PAGE_SIZE) != 0) {
402 		ERROR("DRTM: argument DLME image region is not "
403 		      DRTM_PAGE_SIZE_STR "-aligned\n");
404 		return INVALID_PARAMETERS;
405 	}
406 
407 	if (!(dlme_img_start <= dlme_img_ep && dlme_img_ep < dlme_img_end)) {
408 		ERROR("DRTM: DLME entry point is outside of the DLME image region\n");
409 		return INVALID_PARAMETERS;
410 	}
411 
412 	if ((dlme_img_ep % 4) != 0) {
413 		ERROR("DRTM: DLME image entry point is not 4-byte-aligned\n");
414 		return INVALID_PARAMETERS;
415 	}
416 
417 	/* Check the DLME data region arguments. */
418 	if ((dlme_data_start % DRTM_PAGE_SIZE) != 0) {
419 		ERROR("DRTM: argument DLME data region is not "
420 		      DRTM_PAGE_SIZE_STR "-aligned\n");
421 		return INVALID_PARAMETERS;
422 	}
423 
424 	if (dlme_data_end - dlme_data_start < dlme_data_min_size) {
425 		ERROR("DRTM: argument DLME data region is short of %lu bytes\n",
426 		      dlme_data_min_size - (size_t)(dlme_data_end - dlme_data_start));
427 		return INVALID_PARAMETERS;
428 	}
429 
430 	/* Check the Normal World DCE region arguments. */
431 	if (a->dce_nwd_paddr != 0) {
432 		uint32_t dce_nwd_start = a->dce_nwd_paddr;
433 		uint32_t dce_nwd_end = dce_nwd_start + a->dce_nwd_size;
434 
435 		if (!(dce_nwd_start < dce_nwd_end)) {
436 			ERROR("DRTM: argument Normal World DCE region is dicontiguous\n");
437 			return INVALID_PARAMETERS;
438 		}
439 
440 		if (dce_nwd_start < dlme_end && dlme_start < dce_nwd_end) {
441 			ERROR("DRTM: argument Normal World DCE regions overlap\n");
442 			return INVALID_PARAMETERS;
443 		}
444 	}
445 
446 	*a_out = *a;
447 	return SUCCESS;
448 }
449 
450 static void drtm_dl_reset_dlme_el_state(enum drtm_dlme_el dlme_el)
451 {
452 	uint64_t sctlr;
453 
454 	/*
455 	 * TODO: Set PE state according to the PSCI's specification of the initial
456 	 * state after CPU_ON, or to reset values if unspecified, where they exist,
457 	 * or define sensible values otherwise.
458 	 */
459 
460 	switch (dlme_el) {
461 	case DLME_AT_EL1:
462 		sctlr = read_sctlr_el1();
463 		break;
464 
465 	case DLME_AT_EL2:
466 		sctlr = read_sctlr_el2();
467 		break;
468 
469 	default: /* Not reached */
470 		ERROR("%s(): dlme_el has the unexpected value %d\n",
471 		      __func__, dlme_el);
472 		panic();
473 	}
474 
475 	sctlr &= ~(/* Disable DLME's EL MMU, since the existing page-tables are untrusted. */
476 		   SCTLR_M_BIT
477 		   | SCTLR_EE_BIT               /* Little-endian data accesses. */
478 		  );
479 
480 	sctlr |= SCTLR_C_BIT | SCTLR_I_BIT; /* Allow instruction and data caching. */
481 
482 	switch (dlme_el) {
483 	case DLME_AT_EL1:
484 		write_sctlr_el1(sctlr);
485 		break;
486 
487 	case DLME_AT_EL2:
488 		write_sctlr_el2(sctlr);
489 		break;
490 	}
491 }
492 
493 static void drtm_dl_reset_dlme_context(enum drtm_dlme_el dlme_el)
494 {
495 	void *ns_ctx = cm_get_context(NON_SECURE);
496 	gp_regs_t *gpregs = get_gpregs_ctx(ns_ctx);
497 	uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3);
498 
499 	/* Reset all gpregs, including SP_EL0. */
500 	memset(gpregs, 0, sizeof(*gpregs));
501 
502 	/* Reset SP_ELx. */
503 	switch (dlme_el) {
504 	case DLME_AT_EL1:
505 		write_sp_el1(0);
506 		break;
507 
508 	case DLME_AT_EL2:
509 		write_sp_el2(0);
510 		break;
511 	}
512 
513 	/*
514 	 * DLME's async exceptions are masked to avoid a NWd attacker's timed
515 	 * interference with any state we established trust in or measured.
516 	 */
517 	spsr_el3 |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
518 
519 	write_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3, spsr_el3);
520 }
521 
522 static void drtm_dl_prepare_eret_to_dlme(const struct_drtm_dl_args *args, enum drtm_dlme_el dlme_el)
523 {
524 	void *ctx = cm_get_context(NON_SECURE);
525 	uint64_t dlme_ep = DL_ARGS_GET_DLME_ENTRY_POINT(args);
526 	uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
527 
528 	/* Next ERET is to the DLME's EL. */
529 	spsr_el3 &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
530 	switch (dlme_el) {
531 	case DLME_AT_EL1:
532 		spsr_el3 |= MODE_EL1 << MODE_EL_SHIFT;
533 		break;
534 
535 	case DLME_AT_EL2:
536 		spsr_el3 |= MODE_EL2 << MODE_EL_SHIFT;
537 		break;
538 	}
539 
540 	/* Next ERET is to the DLME entry point. */
541 	cm_set_elr_spsr_el3(NON_SECURE, dlme_ep, spsr_el3);
542 }
543 
544 static uint64_t drtm_dynamic_launch(uint64_t x1, void *handle)
545 {
546 	enum drtm_retc ret = SUCCESS;
547 	enum drtm_retc dma_prot_ret;
548 	struct_drtm_dl_args args;
549 	/* DLME should be highest NS exception level */
550 	enum drtm_dlme_el dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
551 
552 	/* Ensure that only boot PE is powered on */
553 	ret = drtm_dl_check_cores();
554 	if (ret != SUCCESS) {
555 		SMC_RET1(handle, ret);
556 	}
557 
558 	/*
559 	 * Ensure that execution state is AArch64 and the caller
560 	 * is highest non-secure exception level
561 	 */
562 	ret = drtm_dl_check_caller_el(handle);
563 	if (ret != SUCCESS) {
564 		SMC_RET1(handle, ret);
565 	}
566 
567 	ret = drtm_dl_check_args(x1, &args);
568 	if (ret != SUCCESS) {
569 		SMC_RET1(handle, ret);
570 	}
571 
572 	/*
573 	 * Engage the DMA protections.  The launch cannot proceed without the DMA
574 	 * protections due to potential TOC/TOU vulnerabilities w.r.t. the DLME
575 	 * region (and to the NWd DCE region).
576 	 */
577 	ret = drtm_dma_prot_engage(&args.dma_prot_args,
578 				   DL_ARGS_GET_DMA_PROT_TYPE(&args));
579 	if (ret != SUCCESS) {
580 		SMC_RET1(handle, ret);
581 	}
582 
583 	/*
584 	 * The DMA protection is now engaged.  Note that any failure mode that
585 	 * returns an error to the DRTM-launch caller must now disengage DMA
586 	 * protections before returning to the caller.
587 	 */
588 
589 	ret = drtm_take_measurements(&args);
590 	if (ret != SUCCESS) {
591 		goto err_undo_dma_prot;
592 	}
593 
594 	ret = drtm_dl_prepare_dlme_data(&args);
595 	if (ret != SUCCESS) {
596 		goto err_undo_dma_prot;
597 	}
598 
599 	/*
600 	 * Note that, at the time of writing, the DRTM spec allows a successful
601 	 * launch from NS-EL1 to return to a DLME in NS-EL2.  The practical risk
602 	 * of a privilege escalation, e.g. due to a compromised hypervisor, is
603 	 * considered small enough not to warrant the specification of additional
604 	 * DRTM conduits that would be necessary to maintain OSs' abstraction from
605 	 * the presence of EL2 were the dynamic launch only be allowed from the
606 	 * highest NS EL.
607 	 */
608 
609 	dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
610 
611 	drtm_dl_reset_dlme_el_state(dlme_el);
612 	drtm_dl_reset_dlme_context(dlme_el);
613 
614 	/*
615 	 * TODO: Reset all SDEI event handlers, since they are untrusted.  Both
616 	 * private and shared events for all cores must be unregistered.
617 	 * Note that simply calling SDEI ABIs would not be adequate for this, since
618 	 * there is currently no SDEI operation that clears private data for all PEs.
619 	 */
620 
621 	drtm_dl_prepare_eret_to_dlme(&args, dlme_el);
622 
623 	/*
624 	 * TODO: invalidate the instruction cache before jumping to the DLME.
625 	 * This is required to defend against potentially-malicious cache contents.
626 	 */
627 
628 	/* Return the DLME region's address in x0, and the DLME data offset in x1.*/
629 	SMC_RET2(handle, args.dlme_paddr, args.dlme_data_off);
630 
631 err_undo_dma_prot:
632 	dma_prot_ret = drtm_dma_prot_disengage();
633 	if (dma_prot_ret != SUCCESS) {
634 		ERROR("%s(): drtm_dma_prot_disengage() failed unexpectedly"
635 		      " rc=%d\n", __func__, ret);
636 		panic();
637 	}
638 
639 	SMC_RET1(handle, ret);
640 }
641 
642 uint64_t drtm_smc_handler(uint32_t smc_fid,
643 			  uint64_t x1,
644 			  uint64_t x2,
645 			  uint64_t x3,
646 			  uint64_t x4,
647 			  void *cookie,
648 			  void *handle,
649 			  uint64_t flags)
650 {
651 	/* Check that the SMC call is from the Normal World. */
652 	if (!is_caller_non_secure(flags)) {
653 		SMC_RET1(handle, NOT_SUPPORTED);
654 	}
655 
656 	switch (smc_fid) {
657 	case ARM_DRTM_SVC_VERSION:
658 		INFO("DRTM service handler: version\n");
659 		/* Return the version of current implementation */
660 		SMC_RET1(handle, ARM_DRTM_VERSION);
661 		break;	/* not reached */
662 
663 	case ARM_DRTM_SVC_FEATURES:
664 		if (((x1 >> ARM_DRTM_FUNC_SHIFT) & ARM_DRTM_FUNC_MASK) ==
665 		    ARM_DRTM_FUNC_ID) {
666 			/* Dispatch function-based queries. */
667 			switch (x1 & FUNCID_MASK) {
668 			case ARM_DRTM_SVC_VERSION:
669 				SMC_RET1(handle, SUCCESS);
670 				break;	/* not reached */
671 
672 			case ARM_DRTM_SVC_FEATURES:
673 				SMC_RET1(handle, SUCCESS);
674 				break;	/* not reached */
675 
676 			case ARM_DRTM_SVC_UNPROTECT_MEM:
677 				SMC_RET1(handle, SUCCESS);
678 				break;	/* not reached */
679 
680 			case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
681 				SMC_RET1(handle, SUCCESS);
682 				break;	/* not reached */
683 
684 			case ARM_DRTM_SVC_CLOSE_LOCALITY:
685 				WARN("ARM_DRTM_SVC_CLOSE_LOCALITY feature %s",
686 				     "is not supported\n");
687 				SMC_RET1(handle, NOT_SUPPORTED);
688 				break;	/* not reached */
689 
690 			case ARM_DRTM_SVC_GET_ERROR:
691 				SMC_RET1(handle, SUCCESS);
692 				break;	/* not reached */
693 
694 			case ARM_DRTM_SVC_SET_ERROR:
695 				SMC_RET1(handle, SUCCESS);
696 				break;	/* not reached */
697 
698 			case ARM_DRTM_SVC_SET_TCB_HASH:
699 				WARN("ARM_DRTM_SVC_TCB_HASH feature %s",
700 				     "is not supported\n");
701 				SMC_RET1(handle, NOT_SUPPORTED);
702 				break;	/* not reached */
703 
704 			case ARM_DRTM_SVC_LOCK_TCB_HASH:
705 				WARN("ARM_DRTM_SVC_LOCK_TCB_HASH feature %s",
706 				     "is not supported\n");
707 				SMC_RET1(handle, NOT_SUPPORTED);
708 				break;	/* not reached */
709 
710 			default:
711 				ERROR("Unknown DRTM service function\n");
712 				SMC_RET1(handle, NOT_SUPPORTED);
713 				break;	/* not reached */
714 			}
715 		} else {
716 			/* Dispatch feature-based queries. */
717 			switch (x1 & ARM_DRTM_FEAT_ID_MASK) {
718 			case ARM_DRTM_FEATURES_TPM:
719 				INFO("++ DRTM service handler: TPM features\n");
720 				return drtm_features_tpm(handle);
721 				break;	/* not reached */
722 
723 			case ARM_DRTM_FEATURES_MEM_REQ:
724 				INFO("++ DRTM service handler: Min. mem."
725 				     " requirement features\n");
726 				return drtm_features_mem_req(handle);
727 				break;	/* not reached */
728 
729 			case ARM_DRTM_FEATURES_DMA_PROT:
730 				INFO("++ DRTM service handler: "
731 				     "DMA protection features\n");
732 				return drtm_features_dma_prot(handle);
733 				break;	/* not reached */
734 
735 			case ARM_DRTM_FEATURES_BOOT_PE_ID:
736 				INFO("++ DRTM service handler: "
737 				     "Boot PE ID features\n");
738 				return drtm_features_boot_pe_id(handle);
739 				break;	/* not reached */
740 
741 			case ARM_DRTM_FEATURES_TCB_HASHES:
742 				INFO("++ DRTM service handler: "
743 				     "TCB-hashes features\n");
744 				return drtm_features_tcb_hashes(handle);
745 				break;	/* not reached */
746 
747 			default:
748 				ERROR("Unknown ARM DRTM service feature\n");
749 				SMC_RET1(handle, NOT_SUPPORTED);
750 				break;	/* not reached */
751 			}
752 		}
753 
754 	case ARM_DRTM_SVC_UNPROTECT_MEM:
755 		INFO("DRTM service handler: unprotect mem\n");
756 		return drtm_unprotect_mem(handle);
757 		break;	/* not reached */
758 
759 	case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
760 		INFO("DRTM service handler: dynamic launch\n");
761 		return drtm_dynamic_launch(x1, handle);
762 		break;	/* not reached */
763 
764 	case ARM_DRTM_SVC_CLOSE_LOCALITY:
765 		WARN("DRTM service handler: close locality %s\n",
766 		     "is not supported");
767 		SMC_RET1(handle, NOT_SUPPORTED);
768 		break;	/* not reached */
769 
770 	case ARM_DRTM_SVC_GET_ERROR:
771 		INFO("DRTM service handler: get error\n");
772 		drtm_get_error(handle);
773 		break;	/* not reached */
774 
775 	case ARM_DRTM_SVC_SET_ERROR:
776 		INFO("DRTM service handler: set error\n");
777 		drtm_set_error(x1, handle);
778 		break;	/* not reached */
779 
780 	case ARM_DRTM_SVC_SET_TCB_HASH:
781 		WARN("DRTM service handler: set TCB hash %s\n",
782 		     "is not supported");
783 		SMC_RET1(handle, NOT_SUPPORTED);
784 		break;  /* not reached */
785 
786 	case ARM_DRTM_SVC_LOCK_TCB_HASH:
787 		WARN("DRTM service handler: lock TCB hash %s\n",
788 		     "is not supported");
789 		SMC_RET1(handle, NOT_SUPPORTED);
790 		break;  /* not reached */
791 
792 	default:
793 		ERROR("Unknown DRTM service function: 0x%x\n", smc_fid);
794 		SMC_RET1(handle, SMC_UNK);
795 		break;	/* not reached */
796 	}
797 
798 	/* not reached */
799 	SMC_RET1(handle, SMC_UNK);
800 }
801