xref: /rk3399_ARM-atf/services/std_svc/drtm/drtm_main.c (revision a65fa57b129713ddaa3cdff048921368f9dacc2e)
1 /*
2  * Copyright (c) 2022-2025 Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier:    BSD-3-Clause
5  *
6  * DRTM service
7  *
8  * Authors:
9  *	Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
10  *	Brian Nezvadovitz <brinez@microsoft.com> 2021-02-01
11  */
12 
13 #include <stdint.h>
14 
15 #include <arch.h>
16 #include <arch_helpers.h>
17 #include <common/bl_common.h>
18 #include <common/debug.h>
19 #include <common/runtime_svc.h>
20 #include <drivers/auth/crypto_mod.h>
21 #include "drtm_main.h"
22 #include "drtm_measurements.h"
23 #include "drtm_remediation.h"
24 #include <lib/el3_runtime/context_mgmt.h>
25 #include <lib/psci/psci_lib.h>
26 #include <lib/xlat_tables/xlat_tables_v2.h>
27 #include <plat/common/platform.h>
28 #include <services/drtm_svc.h>
29 #include <services/sdei.h>
30 #include <platform_def.h>
31 
32 /* Structure to store DRTM features specific to the platform. */
33 static drtm_features_t plat_drtm_features;
34 
35 /* DRTM-formatted memory map. */
36 static drtm_memory_region_descriptor_table_t *plat_drtm_mem_map;
37 
38 /* DLME header */
39 struct_dlme_data_header dlme_data_hdr_init;
40 
41 /* Minimum data memory requirement */
42 uint64_t dlme_data_min_size;
43 
44 int drtm_setup(void)
45 {
46 	bool rc;
47 	const plat_drtm_tpm_features_t *plat_tpm_feat;
48 	const plat_drtm_dma_prot_features_t *plat_dma_prot_feat;
49 
50 	INFO("DRTM service setup\n");
51 
52 	/* Read boot PE ID from MPIDR */
53 	plat_drtm_features.boot_pe_id = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
54 
55 	rc = drtm_dma_prot_init();
56 	if (rc) {
57 		return INTERNAL_ERROR;
58 	}
59 
60 	/*
61 	 * initialise the platform supported crypto module that will
62 	 * be used by the DRTM-service to calculate hash of DRTM-
63 	 * implementation specific components
64 	 */
65 	crypto_mod_init();
66 
67 	/* Build DRTM-compatible address map. */
68 	plat_drtm_mem_map = drtm_build_address_map();
69 	if (plat_drtm_mem_map == NULL) {
70 		return INTERNAL_ERROR;
71 	}
72 
73 	/* Get DRTM features from platform hooks. */
74 	plat_tpm_feat = plat_drtm_get_tpm_features();
75 	if (plat_tpm_feat == NULL) {
76 		return INTERNAL_ERROR;
77 	}
78 
79 	plat_dma_prot_feat = plat_drtm_get_dma_prot_features();
80 	if (plat_dma_prot_feat == NULL) {
81 		return INTERNAL_ERROR;
82 	}
83 
84 	/*
85 	 * Add up minimum DLME data memory.
86 	 *
87 	 * For systems with complete DMA protection there is only one entry in
88 	 * the protected regions table.
89 	 */
90 	if (plat_dma_prot_feat->dma_protection_support ==
91 			ARM_DRTM_DMA_PROT_FEATURES_DMA_SUPPORT_COMPLETE) {
92 		dlme_data_min_size =
93 			sizeof(drtm_memory_region_descriptor_table_t) +
94 			sizeof(drtm_mem_region_t);
95 		dlme_data_hdr_init.dlme_prot_regions_size = dlme_data_min_size;
96 	} else {
97 		/*
98 		 * TODO set protected regions table size based on platform DMA
99 		 * protection configuration
100 		 */
101 		panic();
102 	}
103 
104 	dlme_data_hdr_init.dlme_addr_map_size = drtm_get_address_map_size();
105 	dlme_data_hdr_init.dlme_tcb_hashes_table_size =
106 				plat_drtm_get_tcb_hash_table_size();
107 	dlme_data_hdr_init.dlme_acpi_tables_region_size =
108 				plat_drtm_get_acpi_tables_region_size();
109 	dlme_data_hdr_init.dlme_impdef_region_size =
110 				plat_drtm_get_imp_def_dlme_region_size();
111 
112 	dlme_data_min_size += sizeof(struct_dlme_data_header) +
113 			      dlme_data_hdr_init.dlme_addr_map_size +
114 			      ARM_DRTM_MIN_EVENT_LOG_SIZE +
115 			      dlme_data_hdr_init.dlme_tcb_hashes_table_size +
116 			      dlme_data_hdr_init.dlme_acpi_tables_region_size +
117 			      dlme_data_hdr_init.dlme_impdef_region_size;
118 
119 	/* Fill out platform DRTM features structure */
120 	/* Only support default PCR schema (0x1) in this implementation. */
121 	ARM_DRTM_TPM_FEATURES_SET_PCR_SCHEMA(plat_drtm_features.tpm_features,
122 		ARM_DRTM_TPM_FEATURES_PCR_SCHEMA_DEFAULT);
123 	ARM_DRTM_TPM_FEATURES_SET_TPM_HASH(plat_drtm_features.tpm_features,
124 		plat_tpm_feat->tpm_based_hash_support);
125 	ARM_DRTM_TPM_FEATURES_SET_FW_HASH(plat_drtm_features.tpm_features,
126 		plat_tpm_feat->firmware_hash_algorithm);
127 	ARM_DRTM_MIN_MEM_REQ_SET_MIN_DLME_DATA_SIZE(plat_drtm_features.minimum_memory_requirement,
128 		page_align(dlme_data_min_size, UP)/PAGE_SIZE);
129 	ARM_DRTM_MIN_MEM_REQ_SET_DCE_SIZE(plat_drtm_features.minimum_memory_requirement,
130 		plat_drtm_get_min_size_normal_world_dce());
131 	ARM_DRTM_DMA_PROT_FEATURES_SET_MAX_REGIONS(plat_drtm_features.dma_prot_features,
132 		plat_dma_prot_feat->max_num_mem_prot_regions);
133 	ARM_DRTM_DMA_PROT_FEATURES_SET_DMA_SUPPORT(plat_drtm_features.dma_prot_features,
134 		plat_dma_prot_feat->dma_protection_support);
135 	ARM_DRTM_TCB_HASH_FEATURES_SET_MAX_NUM_HASHES(plat_drtm_features.tcb_hash_features,
136 		plat_drtm_get_tcb_hash_features());
137 
138 	return 0;
139 }
140 
141 static inline void invalidate_icache_all(void)
142 {
143 	__asm__ volatile("ic      ialluis");
144 	dsb();
145 	isb();
146 }
147 
148 static inline uint64_t drtm_features_tpm(void *ctx)
149 {
150 	SMC_RET2(ctx, 1ULL, /* TPM feature is supported */
151 		 plat_drtm_features.tpm_features);
152 }
153 
154 static inline uint64_t drtm_features_mem_req(void *ctx)
155 {
156 	SMC_RET2(ctx, 1ULL, /* memory req Feature is supported */
157 		 plat_drtm_features.minimum_memory_requirement);
158 }
159 
160 static inline uint64_t drtm_features_boot_pe_id(void *ctx)
161 {
162 	SMC_RET2(ctx, 1ULL, /* Boot PE feature is supported */
163 		 plat_drtm_features.boot_pe_id);
164 }
165 
166 static inline uint64_t drtm_features_dma_prot(void *ctx)
167 {
168 	SMC_RET2(ctx, 1ULL, /* DMA protection feature is supported */
169 		 plat_drtm_features.dma_prot_features);
170 }
171 
172 static inline uint64_t drtm_features_tcb_hashes(void *ctx)
173 {
174 	SMC_RET2(ctx, 1ULL, /* TCB hash feature is supported */
175 		 plat_drtm_features.tcb_hash_features);
176 }
177 
178 static enum drtm_retc drtm_dl_check_caller_el(void *ctx)
179 {
180 	uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
181 	uint64_t dl_caller_el;
182 	uint64_t dl_caller_aarch;
183 
184 	dl_caller_el = spsr_el3 >> MODE_EL_SHIFT & MODE_EL_MASK;
185 	dl_caller_aarch = spsr_el3 >> MODE_RW_SHIFT & MODE_RW_MASK;
186 
187 	/* Caller's security state is checked from drtm_smc_handle function */
188 
189 	/* Caller can be NS-EL2/EL1 */
190 	if (dl_caller_el == MODE_EL3) {
191 		ERROR("DRTM: invalid launch from EL3\n");
192 		return DENIED;
193 	}
194 
195 	if (dl_caller_aarch != MODE_RW_64) {
196 		ERROR("DRTM: invalid launch from non-AArch64 execution state\n");
197 		return DENIED;
198 	}
199 
200 	return SUCCESS;
201 }
202 
203 static enum drtm_retc drtm_dl_check_cores(void)
204 {
205 	bool running_on_single_core;
206 	uint64_t this_pe_aff_value = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
207 
208 	if (this_pe_aff_value != plat_drtm_features.boot_pe_id) {
209 		ERROR("DRTM: invalid launch on a non-boot PE\n");
210 		return DENIED;
211 	}
212 
213 	running_on_single_core = psci_is_last_on_cpu_safe(plat_my_core_pos());
214 	if (!running_on_single_core) {
215 		ERROR("DRTM: invalid launch due to non-boot PE not being turned off\n");
216 		return SECONDARY_PE_NOT_OFF;
217 	}
218 
219 	return SUCCESS;
220 }
221 
222 static enum drtm_retc drtm_dl_prepare_dlme_data(const struct_drtm_dl_args *args)
223 {
224 	int rc;
225 	uint64_t dlme_data_paddr;
226 	size_t dlme_data_max_size;
227 	uintptr_t dlme_data_mapping;
228 	struct_dlme_data_header *dlme_data_hdr;
229 	uint8_t *dlme_data_cursor;
230 	size_t dlme_data_mapping_bytes;
231 	size_t serialised_bytes_actual;
232 
233 	dlme_data_paddr = args->dlme_paddr + args->dlme_data_off;
234 	dlme_data_max_size = args->dlme_size - args->dlme_data_off;
235 
236 	/*
237 	 * The capacity of the given DLME data region is checked when
238 	 * the other dynamic launch arguments are.
239 	 */
240 	if (dlme_data_max_size < dlme_data_min_size) {
241 		ERROR("%s: assertion failed:"
242 		      " dlme_data_max_size (%ld) < dlme_data_min_size (%ld)\n",
243 		      __func__, dlme_data_max_size, dlme_data_min_size);
244 		panic();
245 	}
246 
247 	/* Map the DLME data region as NS memory. */
248 	dlme_data_mapping_bytes = ALIGNED_UP(dlme_data_max_size, DRTM_PAGE_SIZE);
249 	rc = mmap_add_dynamic_region_alloc_va(dlme_data_paddr,
250 					      &dlme_data_mapping,
251 					      dlme_data_mapping_bytes,
252 					      MT_RW_DATA | MT_NS |
253 					      MT_SHAREABILITY_ISH);
254 	if (rc != 0) {
255 		WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
256 		     __func__, rc);
257 		return INTERNAL_ERROR;
258 	}
259 	dlme_data_hdr = (struct_dlme_data_header *)dlme_data_mapping;
260 	dlme_data_cursor = (uint8_t *)dlme_data_hdr + sizeof(*dlme_data_hdr);
261 
262 	memcpy(dlme_data_hdr, (const void *)&dlme_data_hdr_init,
263 	       sizeof(*dlme_data_hdr));
264 
265 	/* Set the header version and size. */
266 	dlme_data_hdr->version = 1;
267 	dlme_data_hdr->this_hdr_size = sizeof(*dlme_data_hdr);
268 
269 	/* Prepare DLME protected regions. */
270 	drtm_dma_prot_serialise_table(dlme_data_cursor,
271 				      &serialised_bytes_actual);
272 	assert(serialised_bytes_actual ==
273 	       dlme_data_hdr->dlme_prot_regions_size);
274 	dlme_data_cursor += serialised_bytes_actual;
275 
276 	/* Prepare DLME address map. */
277 	if (plat_drtm_mem_map != NULL) {
278 		memcpy(dlme_data_cursor, plat_drtm_mem_map,
279 		       dlme_data_hdr->dlme_addr_map_size);
280 	} else {
281 		WARN("DRTM: DLME address map is not in the cache\n");
282 	}
283 	dlme_data_cursor += dlme_data_hdr->dlme_addr_map_size;
284 
285 	/* Prepare DRTM event log for DLME. */
286 	drtm_serialise_event_log(dlme_data_cursor, &serialised_bytes_actual);
287 	assert(serialised_bytes_actual <= ARM_DRTM_MIN_EVENT_LOG_SIZE);
288 	dlme_data_hdr->dlme_tpm_log_size = ARM_DRTM_MIN_EVENT_LOG_SIZE;
289 	dlme_data_cursor +=  dlme_data_hdr->dlme_tpm_log_size;
290 
291 	/*
292 	 * TODO: Prepare the TCB hashes for DLME, currently its size
293 	 * 0
294 	 */
295 	dlme_data_cursor += dlme_data_hdr->dlme_tcb_hashes_table_size;
296 
297 	/* Implementation-specific region size is unused. */
298 	dlme_data_cursor += dlme_data_hdr->dlme_impdef_region_size;
299 
300 	/*
301 	 * Prepare DLME data size, includes all data region referenced above
302 	 * alongwith the DLME data header
303 	 */
304 	dlme_data_hdr->dlme_data_size = dlme_data_cursor - (uint8_t *)dlme_data_hdr;
305 
306 	/* Unmap the DLME data region. */
307 	rc = mmap_remove_dynamic_region(dlme_data_mapping, dlme_data_mapping_bytes);
308 	if (rc != 0) {
309 		ERROR("%s(): mmap_remove_dynamic_region() failed"
310 		      " unexpectedly rc=%d\n", __func__, rc);
311 		panic();
312 	}
313 
314 	return SUCCESS;
315 }
316 
317 /*
318  * Note: accesses to the dynamic launch args, and to the DLME data are
319  * little-endian as required, thanks to TF-A BL31 init requirements.
320  */
321 static enum drtm_retc drtm_dl_check_args(uint64_t x1,
322 					 struct_drtm_dl_args *a_out)
323 {
324 	uint64_t dlme_start, dlme_end;
325 	uint64_t dlme_img_start, dlme_img_ep, dlme_img_end;
326 	uint64_t dlme_data_start, dlme_data_end;
327 	uintptr_t va_mapping;
328 	size_t va_mapping_size;
329 	struct_drtm_dl_args *a;
330 	struct_drtm_dl_args args_buf;
331 	int rc;
332 
333 	if (x1 % DRTM_PAGE_SIZE != 0) {
334 		ERROR("DRTM: parameters structure is not "
335 		      DRTM_PAGE_SIZE_STR "-aligned\n");
336 		return INVALID_PARAMETERS;
337 	}
338 
339 	va_mapping_size = ALIGNED_UP(sizeof(struct_drtm_dl_args), DRTM_PAGE_SIZE);
340 
341 	/* check DRTM parameters are within NS address region */
342 	rc = plat_drtm_validate_ns_region(x1, va_mapping_size);
343 	if (rc != 0) {
344 		ERROR("DRTM: parameters lies within secure memory\n");
345 		return INVALID_PARAMETERS;
346 	}
347 
348 	rc = mmap_add_dynamic_region_alloc_va(x1, &va_mapping, va_mapping_size,
349 					      MT_MEMORY | MT_NS | MT_RO |
350 					      MT_SHAREABILITY_ISH);
351 	if (rc != 0) {
352 		WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
353 		      __func__, rc);
354 		return INTERNAL_ERROR;
355 	}
356 	a = (struct_drtm_dl_args *)va_mapping;
357 
358 	/* Sanitize cache of data passed in args by the DCE Preamble. */
359 	flush_dcache_range(va_mapping, va_mapping_size);
360 
361 	args_buf = *a;
362 
363 	rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size);
364 	if (rc) {
365 		ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
366 		      " rc=%d\n", __func__, rc);
367 		panic();
368 	}
369 	a = &args_buf;
370 
371 	if (!((a->version >= ARM_DRTM_PARAMS_MIN_VERSION) &&
372 	    (a->version <= ARM_DRTM_PARAMS_MAX_VERSION))) {
373 		ERROR("DRTM: parameters structure version %u is unsupported\n",
374 		      a->version);
375 		return NOT_SUPPORTED;
376 	}
377 
378 	if (!(a->dlme_img_off < a->dlme_size &&
379 	      a->dlme_data_off < a->dlme_size)) {
380 		ERROR("DRTM: argument offset is outside of the DLME region\n");
381 		return INVALID_PARAMETERS;
382 	}
383 	dlme_start = a->dlme_paddr;
384 	dlme_end = a->dlme_paddr + a->dlme_size;
385 	dlme_img_start = a->dlme_paddr + a->dlme_img_off;
386 	dlme_img_ep = dlme_img_start + a->dlme_img_ep_off;
387 	dlme_img_end = dlme_img_start + a->dlme_img_size;
388 	dlme_data_start = a->dlme_paddr + a->dlme_data_off;
389 	dlme_data_end = dlme_end;
390 
391 	/* Check the DLME regions arguments. */
392 	if ((dlme_start % DRTM_PAGE_SIZE) != 0) {
393 		ERROR("DRTM: argument DLME region is not "
394 		      DRTM_PAGE_SIZE_STR "-aligned\n");
395 		return INVALID_PARAMETERS;
396 	}
397 
398 	if (!(dlme_start < dlme_end &&
399 	      dlme_start <= dlme_img_start && dlme_img_start < dlme_img_end &&
400 	      dlme_start <= dlme_data_start && dlme_data_start < dlme_data_end)) {
401 		ERROR("DRTM: argument DLME region is discontiguous\n");
402 		return INVALID_PARAMETERS;
403 	}
404 
405 	if (dlme_img_start < dlme_data_end && dlme_data_start < dlme_img_end) {
406 		ERROR("DRTM: argument DLME regions overlap\n");
407 		return INVALID_PARAMETERS;
408 	}
409 
410 	/* Check the DLME image region arguments. */
411 	if ((dlme_img_start % DRTM_PAGE_SIZE) != 0) {
412 		ERROR("DRTM: argument DLME image region is not "
413 		      DRTM_PAGE_SIZE_STR "-aligned\n");
414 		return INVALID_PARAMETERS;
415 	}
416 
417 	if (!(dlme_img_start <= dlme_img_ep && dlme_img_ep < dlme_img_end)) {
418 		ERROR("DRTM: DLME entry point is outside of the DLME image region\n");
419 		return INVALID_PARAMETERS;
420 	}
421 
422 	if ((dlme_img_ep % 4) != 0) {
423 		ERROR("DRTM: DLME image entry point is not 4-byte-aligned\n");
424 		return INVALID_PARAMETERS;
425 	}
426 
427 	/* Check the DLME data region arguments. */
428 	if ((dlme_data_start % DRTM_PAGE_SIZE) != 0) {
429 		ERROR("DRTM: argument DLME data region is not "
430 		      DRTM_PAGE_SIZE_STR "-aligned\n");
431 		return INVALID_PARAMETERS;
432 	}
433 
434 	if (dlme_data_end - dlme_data_start < dlme_data_min_size) {
435 		ERROR("DRTM: argument DLME data region is short of %lu bytes\n",
436 		      dlme_data_min_size - (size_t)(dlme_data_end - dlme_data_start));
437 		return INVALID_PARAMETERS;
438 	}
439 
440 	/* check DLME region (paddr + size) is within a NS address region */
441 	rc = plat_drtm_validate_ns_region(dlme_start, (size_t)a->dlme_size);
442 	if (rc != 0) {
443 		ERROR("DRTM: DLME region lies within secure memory\n");
444 		return INVALID_PARAMETERS;
445 	}
446 
447 	/* Check the Normal World DCE region arguments. */
448 	if (a->dce_nwd_paddr != 0) {
449 		uint32_t dce_nwd_start = a->dce_nwd_paddr;
450 		uint32_t dce_nwd_end = dce_nwd_start + a->dce_nwd_size;
451 
452 		if (!(dce_nwd_start < dce_nwd_end)) {
453 			ERROR("DRTM: argument Normal World DCE region is dicontiguous\n");
454 			return INVALID_PARAMETERS;
455 		}
456 
457 		if (dce_nwd_start < dlme_end && dlme_start < dce_nwd_end) {
458 			ERROR("DRTM: argument Normal World DCE regions overlap\n");
459 			return INVALID_PARAMETERS;
460 		}
461 	}
462 
463 	/*
464 	 * Map and sanitize the cache of data range passed by DCE Preamble. This
465 	 * is required to avoid / defend against racing with cache evictions
466 	 */
467 	va_mapping_size = ALIGNED_UP((dlme_end - dlme_start), DRTM_PAGE_SIZE);
468 	rc = mmap_add_dynamic_region_alloc_va(dlme_start, &va_mapping, va_mapping_size,
469 					      MT_MEMORY | MT_NS | MT_RO |
470 					      MT_SHAREABILITY_ISH);
471 	if (rc != 0) {
472 		ERROR("DRTM: %s: mmap_add_dynamic_region_alloc_va() failed rc=%d\n",
473 		      __func__, rc);
474 		return INTERNAL_ERROR;
475 	}
476 	flush_dcache_range(va_mapping, va_mapping_size);
477 
478 	rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size);
479 	if (rc) {
480 		ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
481 		      " rc=%d\n", __func__, rc);
482 		panic();
483 	}
484 
485 	*a_out = *a;
486 	return SUCCESS;
487 }
488 
489 static void drtm_dl_reset_dlme_el_state(enum drtm_dlme_el dlme_el)
490 {
491 	uint64_t sctlr;
492 
493 	/*
494 	 * TODO: Set PE state according to the PSCI's specification of the initial
495 	 * state after CPU_ON, or to reset values if unspecified, where they exist,
496 	 * or define sensible values otherwise.
497 	 */
498 
499 	switch (dlme_el) {
500 	case DLME_AT_EL1:
501 		sctlr = read_sctlr_el1();
502 		break;
503 
504 	case DLME_AT_EL2:
505 		sctlr = read_sctlr_el2();
506 		break;
507 
508 	default: /* Not reached */
509 		ERROR("%s(): dlme_el has the unexpected value %d\n",
510 		      __func__, dlme_el);
511 		panic();
512 	}
513 
514 	sctlr &= ~(/* Disable DLME's EL MMU, since the existing page-tables are untrusted. */
515 		   SCTLR_M_BIT
516 		   | SCTLR_EE_BIT               /* Little-endian data accesses. */
517 		   | SCTLR_C_BIT		/* disable data caching */
518 		   | SCTLR_I_BIT		/* disable instruction caching */
519 		  );
520 
521 	switch (dlme_el) {
522 	case DLME_AT_EL1:
523 		write_sctlr_el1(sctlr);
524 		break;
525 
526 	case DLME_AT_EL2:
527 		write_sctlr_el2(sctlr);
528 		break;
529 	}
530 }
531 
532 static void drtm_dl_reset_dlme_context(enum drtm_dlme_el dlme_el)
533 {
534 	void *ns_ctx = cm_get_context(NON_SECURE);
535 	gp_regs_t *gpregs = get_gpregs_ctx(ns_ctx);
536 	uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3);
537 
538 	/* Reset all gpregs, including SP_EL0. */
539 	memset(gpregs, 0, sizeof(*gpregs));
540 
541 	/* Reset SP_ELx. */
542 	switch (dlme_el) {
543 	case DLME_AT_EL1:
544 		write_sp_el1(0);
545 		break;
546 
547 	case DLME_AT_EL2:
548 		write_sp_el2(0);
549 		break;
550 	}
551 
552 	/*
553 	 * DLME's async exceptions are masked to avoid a NWd attacker's timed
554 	 * interference with any state we established trust in or measured.
555 	 */
556 	spsr_el3 |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
557 
558 	write_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3, spsr_el3);
559 }
560 
561 static void drtm_dl_prepare_eret_to_dlme(const struct_drtm_dl_args *args, enum drtm_dlme_el dlme_el)
562 {
563 	void *ctx = cm_get_context(NON_SECURE);
564 	uint64_t dlme_ep = DL_ARGS_GET_DLME_ENTRY_POINT(args);
565 	uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
566 
567 	/* Next ERET is to the DLME's EL. */
568 	spsr_el3 &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
569 	switch (dlme_el) {
570 	case DLME_AT_EL1:
571 		spsr_el3 |= MODE_EL1 << MODE_EL_SHIFT;
572 		break;
573 
574 	case DLME_AT_EL2:
575 		spsr_el3 |= MODE_EL2 << MODE_EL_SHIFT;
576 		break;
577 	}
578 
579 	/* Next ERET is to the DLME entry point. */
580 	cm_set_elr_spsr_el3(NON_SECURE, dlme_ep, spsr_el3);
581 }
582 
583 static uint64_t drtm_dynamic_launch(uint64_t x1, void *handle)
584 {
585 	enum drtm_retc ret = SUCCESS;
586 	enum drtm_retc dma_prot_ret;
587 	struct_drtm_dl_args args;
588 	/* DLME should be highest NS exception level */
589 	enum drtm_dlme_el dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
590 
591 	/* Ensure that only boot PE is powered on */
592 	ret = drtm_dl_check_cores();
593 	if (ret != SUCCESS) {
594 		SMC_RET1(handle, ret);
595 	}
596 
597 	/*
598 	 * Ensure that execution state is AArch64 and the caller
599 	 * is highest non-secure exception level
600 	 */
601 	ret = drtm_dl_check_caller_el(handle);
602 	if (ret != SUCCESS) {
603 		SMC_RET1(handle, ret);
604 	}
605 
606 	ret = drtm_dl_check_args(x1, &args);
607 	if (ret != SUCCESS) {
608 		SMC_RET1(handle, ret);
609 	}
610 
611 	/* Ensure that there are no SDEI event registered */
612 #if SDEI_SUPPORT
613 	if (sdei_get_registered_event_count() != 0) {
614 		SMC_RET1(handle, DENIED);
615 	}
616 #endif /* SDEI_SUPPORT */
617 
618 	/*
619 	 * Engage the DMA protections.  The launch cannot proceed without the DMA
620 	 * protections due to potential TOC/TOU vulnerabilities w.r.t. the DLME
621 	 * region (and to the NWd DCE region).
622 	 */
623 	ret = drtm_dma_prot_engage(&args.dma_prot_args,
624 				   DL_ARGS_GET_DMA_PROT_TYPE(&args));
625 	if (ret != SUCCESS) {
626 		SMC_RET1(handle, ret);
627 	}
628 
629 	/*
630 	 * The DMA protection is now engaged.  Note that any failure mode that
631 	 * returns an error to the DRTM-launch caller must now disengage DMA
632 	 * protections before returning to the caller.
633 	 */
634 
635 	ret = drtm_take_measurements(&args);
636 	if (ret != SUCCESS) {
637 		goto err_undo_dma_prot;
638 	}
639 
640 	ret = drtm_dl_prepare_dlme_data(&args);
641 	if (ret != SUCCESS) {
642 		goto err_undo_dma_prot;
643 	}
644 
645 	/*
646 	 * Note that, at the time of writing, the DRTM spec allows a successful
647 	 * launch from NS-EL1 to return to a DLME in NS-EL2.  The practical risk
648 	 * of a privilege escalation, e.g. due to a compromised hypervisor, is
649 	 * considered small enough not to warrant the specification of additional
650 	 * DRTM conduits that would be necessary to maintain OSs' abstraction from
651 	 * the presence of EL2 were the dynamic launch only be allowed from the
652 	 * highest NS EL.
653 	 */
654 
655 	dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
656 
657 	drtm_dl_reset_dlme_el_state(dlme_el);
658 	drtm_dl_reset_dlme_context(dlme_el);
659 
660 	/*
661 	 * Setting the Generic Timer frequency is required before launching
662 	 * DLME and is already done for running CPU during PSCI setup.
663 	 */
664 	drtm_dl_prepare_eret_to_dlme(&args, dlme_el);
665 
666 	/*
667 	 * As per DRTM 1.0 spec table #30 invalidate the instruction cache
668 	 * before jumping to the DLME. This is required to defend against
669 	 * potentially-malicious cache contents.
670 	 */
671 	invalidate_icache_all();
672 
673 	/* Return the DLME region's address in x0, and the DLME data offset in x1.*/
674 	SMC_RET2(handle, args.dlme_paddr, args.dlme_data_off);
675 
676 err_undo_dma_prot:
677 	dma_prot_ret = drtm_dma_prot_disengage();
678 	if (dma_prot_ret != SUCCESS) {
679 		ERROR("%s(): drtm_dma_prot_disengage() failed unexpectedly"
680 		      " rc=%d\n", __func__, ret);
681 		panic();
682 	}
683 
684 	SMC_RET1(handle, ret);
685 }
686 
687 uint64_t drtm_smc_handler(uint32_t smc_fid,
688 			  uint64_t x1,
689 			  uint64_t x2,
690 			  uint64_t x3,
691 			  uint64_t x4,
692 			  void *cookie,
693 			  void *handle,
694 			  uint64_t flags)
695 {
696 	/* Check that the SMC call is from the Normal World. */
697 	if (!is_caller_non_secure(flags)) {
698 		SMC_RET1(handle, NOT_SUPPORTED);
699 	}
700 
701 	switch (smc_fid) {
702 	case ARM_DRTM_SVC_VERSION:
703 		INFO("DRTM service handler: version\n");
704 		/* Return the version of current implementation */
705 		SMC_RET1(handle, ARM_DRTM_VERSION);
706 		break;	/* not reached */
707 
708 	case ARM_DRTM_SVC_FEATURES:
709 		if (((x1 >> ARM_DRTM_FUNC_SHIFT) & ARM_DRTM_FUNC_MASK) ==
710 		    ARM_DRTM_FUNC_ID) {
711 			/* Dispatch function-based queries. */
712 			switch (x1 & FUNCID_MASK) {
713 			case ARM_DRTM_SVC_VERSION:
714 				SMC_RET1(handle, SUCCESS);
715 				break;	/* not reached */
716 
717 			case ARM_DRTM_SVC_FEATURES:
718 				SMC_RET1(handle, SUCCESS);
719 				break;	/* not reached */
720 
721 			case ARM_DRTM_SVC_UNPROTECT_MEM:
722 				SMC_RET1(handle, SUCCESS);
723 				break;	/* not reached */
724 
725 			case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
726 				SMC_RET1(handle, SUCCESS);
727 				break;	/* not reached */
728 
729 			case ARM_DRTM_SVC_CLOSE_LOCALITY:
730 				WARN("ARM_DRTM_SVC_CLOSE_LOCALITY feature %s",
731 				     "is not supported\n");
732 				SMC_RET1(handle, NOT_SUPPORTED);
733 				break;	/* not reached */
734 
735 			case ARM_DRTM_SVC_GET_ERROR:
736 				SMC_RET1(handle, SUCCESS);
737 				break;	/* not reached */
738 
739 			case ARM_DRTM_SVC_SET_ERROR:
740 				SMC_RET1(handle, SUCCESS);
741 				break;	/* not reached */
742 
743 			case ARM_DRTM_SVC_SET_TCB_HASH:
744 				WARN("ARM_DRTM_SVC_TCB_HASH feature %s",
745 				     "is not supported\n");
746 				SMC_RET1(handle, NOT_SUPPORTED);
747 				break;	/* not reached */
748 
749 			case ARM_DRTM_SVC_LOCK_TCB_HASH:
750 				WARN("ARM_DRTM_SVC_LOCK_TCB_HASH feature %s",
751 				     "is not supported\n");
752 				SMC_RET1(handle, NOT_SUPPORTED);
753 				break;	/* not reached */
754 
755 			default:
756 				ERROR("Unknown DRTM service function\n");
757 				SMC_RET1(handle, NOT_SUPPORTED);
758 				break;	/* not reached */
759 			}
760 		} else {
761 			/* Dispatch feature-based queries. */
762 			switch (x1 & ARM_DRTM_FEAT_ID_MASK) {
763 			case ARM_DRTM_FEATURES_TPM:
764 				INFO("++ DRTM service handler: TPM features\n");
765 				return drtm_features_tpm(handle);
766 				break;	/* not reached */
767 
768 			case ARM_DRTM_FEATURES_MEM_REQ:
769 				INFO("++ DRTM service handler: Min. mem."
770 				     " requirement features\n");
771 				return drtm_features_mem_req(handle);
772 				break;	/* not reached */
773 
774 			case ARM_DRTM_FEATURES_DMA_PROT:
775 				INFO("++ DRTM service handler: "
776 				     "DMA protection features\n");
777 				return drtm_features_dma_prot(handle);
778 				break;	/* not reached */
779 
780 			case ARM_DRTM_FEATURES_BOOT_PE_ID:
781 				INFO("++ DRTM service handler: "
782 				     "Boot PE ID features\n");
783 				return drtm_features_boot_pe_id(handle);
784 				break;	/* not reached */
785 
786 			case ARM_DRTM_FEATURES_TCB_HASHES:
787 				INFO("++ DRTM service handler: "
788 				     "TCB-hashes features\n");
789 				return drtm_features_tcb_hashes(handle);
790 				break;	/* not reached */
791 
792 			default:
793 				ERROR("Unknown ARM DRTM service feature\n");
794 				SMC_RET1(handle, NOT_SUPPORTED);
795 				break;	/* not reached */
796 			}
797 		}
798 
799 	case ARM_DRTM_SVC_UNPROTECT_MEM:
800 		INFO("DRTM service handler: unprotect mem\n");
801 		return drtm_unprotect_mem(handle);
802 		break;	/* not reached */
803 
804 	case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
805 		INFO("DRTM service handler: dynamic launch\n");
806 		return drtm_dynamic_launch(x1, handle);
807 		break;	/* not reached */
808 
809 	case ARM_DRTM_SVC_CLOSE_LOCALITY:
810 		WARN("DRTM service handler: close locality %s\n",
811 		     "is not supported");
812 		SMC_RET1(handle, NOT_SUPPORTED);
813 		break;	/* not reached */
814 
815 	case ARM_DRTM_SVC_GET_ERROR:
816 		INFO("DRTM service handler: get error\n");
817 		return drtm_get_error(handle);
818 		break;	/* not reached */
819 
820 	case ARM_DRTM_SVC_SET_ERROR:
821 		INFO("DRTM service handler: set error\n");
822 		return drtm_set_error(x1, handle);
823 		break;	/* not reached */
824 
825 	case ARM_DRTM_SVC_SET_TCB_HASH:
826 		WARN("DRTM service handler: set TCB hash %s\n",
827 		     "is not supported");
828 		SMC_RET1(handle, NOT_SUPPORTED);
829 		break;  /* not reached */
830 
831 	case ARM_DRTM_SVC_LOCK_TCB_HASH:
832 		WARN("DRTM service handler: lock TCB hash %s\n",
833 		     "is not supported");
834 		SMC_RET1(handle, NOT_SUPPORTED);
835 		break;  /* not reached */
836 
837 	default:
838 		ERROR("Unknown DRTM service function: 0x%x\n", smc_fid);
839 		SMC_RET1(handle, SMC_UNK);
840 		break;	/* not reached */
841 	}
842 
843 	/* not reached */
844 	SMC_RET1(handle, SMC_UNK);
845 }
846