xref: /rk3399_ARM-atf/services/std_svc/drtm/drtm_main.c (revision d42119cc294fbca2afc263fe5e44538a0ca5e7b8)
1 /*
2  * Copyright (c) 2022 Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier:    BSD-3-Clause
5  *
6  * DRTM service
7  *
8  * Authors:
9  *	Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
10  *	Brian Nezvadovitz <brinez@microsoft.com> 2021-02-01
11  */
12 
13 #include <stdint.h>
14 
15 #include <arch.h>
16 #include <arch_helpers.h>
17 #include <common/bl_common.h>
18 #include <common/debug.h>
19 #include <common/runtime_svc.h>
20 #include <drivers/auth/crypto_mod.h>
21 #include "drtm_main.h"
22 #include "drtm_measurements.h"
23 #include "drtm_remediation.h"
24 #include <lib/psci/psci_lib.h>
25 #include <lib/xlat_tables/xlat_tables_v2.h>
26 #include <plat/common/platform.h>
27 #include <services/drtm_svc.h>
28 #include <platform_def.h>
29 
30 /* Structure to store DRTM features specific to the platform. */
31 static drtm_features_t plat_drtm_features;
32 
33 /* DRTM-formatted memory map. */
34 static drtm_memory_region_descriptor_table_t *plat_drtm_mem_map;
35 
36 /* DLME header */
37 struct_dlme_data_header dlme_data_hdr_init;
38 
39 /* Minimum data memory requirement */
40 uint64_t dlme_data_min_size;
41 
42 int drtm_setup(void)
43 {
44 	bool rc;
45 	const plat_drtm_tpm_features_t *plat_tpm_feat;
46 	const plat_drtm_dma_prot_features_t *plat_dma_prot_feat;
47 
48 	INFO("DRTM service setup\n");
49 
50 	/* Read boot PE ID from MPIDR */
51 	plat_drtm_features.boot_pe_id = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
52 
53 	rc = drtm_dma_prot_init();
54 	if (rc) {
55 		return INTERNAL_ERROR;
56 	}
57 
58 	/*
59 	 * initialise the platform supported crypto module that will
60 	 * be used by the DRTM-service to calculate hash of DRTM-
61 	 * implementation specific components
62 	 */
63 	crypto_mod_init();
64 
65 	/* Build DRTM-compatible address map. */
66 	plat_drtm_mem_map = drtm_build_address_map();
67 	if (plat_drtm_mem_map == NULL) {
68 		return INTERNAL_ERROR;
69 	}
70 
71 	/* Get DRTM features from platform hooks. */
72 	plat_tpm_feat = plat_drtm_get_tpm_features();
73 	if (plat_tpm_feat == NULL) {
74 		return INTERNAL_ERROR;
75 	}
76 
77 	plat_dma_prot_feat = plat_drtm_get_dma_prot_features();
78 	if (plat_dma_prot_feat == NULL) {
79 		return INTERNAL_ERROR;
80 	}
81 
82 	/*
83 	 * Add up minimum DLME data memory.
84 	 *
85 	 * For systems with complete DMA protection there is only one entry in
86 	 * the protected regions table.
87 	 */
88 	if (plat_dma_prot_feat->dma_protection_support ==
89 			ARM_DRTM_DMA_PROT_FEATURES_DMA_SUPPORT_COMPLETE) {
90 		dlme_data_min_size =
91 			sizeof(drtm_memory_region_descriptor_table_t) +
92 			sizeof(drtm_mem_region_t);
93 		dlme_data_hdr_init.dlme_prot_regions_size = dlme_data_min_size;
94 	} else {
95 		/*
96 		 * TODO set protected regions table size based on platform DMA
97 		 * protection configuration
98 		 */
99 		panic();
100 	}
101 
102 	dlme_data_hdr_init.dlme_addr_map_size = drtm_get_address_map_size();
103 	dlme_data_hdr_init.dlme_tcb_hashes_table_size =
104 				plat_drtm_get_tcb_hash_table_size();
105 	dlme_data_hdr_init.dlme_impdef_region_size =
106 				plat_drtm_get_imp_def_dlme_region_size();
107 
108 	dlme_data_min_size += dlme_data_hdr_init.dlme_addr_map_size +
109 			      PLAT_DRTM_EVENT_LOG_MAX_SIZE +
110 			      dlme_data_hdr_init.dlme_tcb_hashes_table_size +
111 			      dlme_data_hdr_init.dlme_impdef_region_size;
112 
113 	dlme_data_min_size = page_align(dlme_data_min_size, UP)/PAGE_SIZE;
114 
115 	/* Fill out platform DRTM features structure */
116 	/* Only support default PCR schema (0x1) in this implementation. */
117 	ARM_DRTM_TPM_FEATURES_SET_PCR_SCHEMA(plat_drtm_features.tpm_features,
118 		ARM_DRTM_TPM_FEATURES_PCR_SCHEMA_DEFAULT);
119 	ARM_DRTM_TPM_FEATURES_SET_TPM_HASH(plat_drtm_features.tpm_features,
120 		plat_tpm_feat->tpm_based_hash_support);
121 	ARM_DRTM_TPM_FEATURES_SET_FW_HASH(plat_drtm_features.tpm_features,
122 		plat_tpm_feat->firmware_hash_algorithm);
123 	ARM_DRTM_MIN_MEM_REQ_SET_MIN_DLME_DATA_SIZE(plat_drtm_features.minimum_memory_requirement,
124 		dlme_data_min_size);
125 	ARM_DRTM_MIN_MEM_REQ_SET_DCE_SIZE(plat_drtm_features.minimum_memory_requirement,
126 		plat_drtm_get_min_size_normal_world_dce());
127 	ARM_DRTM_DMA_PROT_FEATURES_SET_MAX_REGIONS(plat_drtm_features.dma_prot_features,
128 		plat_dma_prot_feat->max_num_mem_prot_regions);
129 	ARM_DRTM_DMA_PROT_FEATURES_SET_DMA_SUPPORT(plat_drtm_features.dma_prot_features,
130 		plat_dma_prot_feat->dma_protection_support);
131 	ARM_DRTM_TCB_HASH_FEATURES_SET_MAX_NUM_HASHES(plat_drtm_features.tcb_hash_features,
132 		plat_drtm_get_tcb_hash_features());
133 
134 	return 0;
135 }
136 
137 static inline uint64_t drtm_features_tpm(void *ctx)
138 {
139 	SMC_RET2(ctx, 1ULL, /* TPM feature is supported */
140 		 plat_drtm_features.tpm_features);
141 }
142 
143 static inline uint64_t drtm_features_mem_req(void *ctx)
144 {
145 	SMC_RET2(ctx, 1ULL, /* memory req Feature is supported */
146 		 plat_drtm_features.minimum_memory_requirement);
147 }
148 
149 static inline uint64_t drtm_features_boot_pe_id(void *ctx)
150 {
151 	SMC_RET2(ctx, 1ULL, /* Boot PE feature is supported */
152 		 plat_drtm_features.boot_pe_id);
153 }
154 
155 static inline uint64_t drtm_features_dma_prot(void *ctx)
156 {
157 	SMC_RET2(ctx, 1ULL, /* DMA protection feature is supported */
158 		 plat_drtm_features.dma_prot_features);
159 }
160 
161 static inline uint64_t drtm_features_tcb_hashes(void *ctx)
162 {
163 	SMC_RET2(ctx, 1ULL, /* TCB hash feature is supported */
164 		 plat_drtm_features.tcb_hash_features);
165 }
166 
167 static enum drtm_retc drtm_dl_check_caller_el(void *ctx)
168 {
169 	uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
170 	uint64_t dl_caller_el;
171 	uint64_t dl_caller_aarch;
172 
173 	dl_caller_el = spsr_el3 >> MODE_EL_SHIFT & MODE_EL_MASK;
174 	dl_caller_aarch = spsr_el3 >> MODE_RW_SHIFT & MODE_RW_MASK;
175 
176 	/* Caller's security state is checked from drtm_smc_handle function */
177 
178 	/* Caller can be NS-EL2/EL1 */
179 	if (dl_caller_el == MODE_EL3) {
180 		ERROR("DRTM: invalid launch from EL3\n");
181 		return DENIED;
182 	}
183 
184 	if (dl_caller_aarch != MODE_RW_64) {
185 		ERROR("DRTM: invalid launch from non-AArch64 execution state\n");
186 		return DENIED;
187 	}
188 
189 	return SUCCESS;
190 }
191 
192 static enum drtm_retc drtm_dl_check_cores(void)
193 {
194 	bool running_on_single_core;
195 	uint64_t this_pe_aff_value = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
196 
197 	if (this_pe_aff_value != plat_drtm_features.boot_pe_id) {
198 		ERROR("DRTM: invalid launch on a non-boot PE\n");
199 		return DENIED;
200 	}
201 
202 	running_on_single_core = psci_is_last_on_cpu_safe();
203 	if (!running_on_single_core) {
204 		ERROR("DRTM: invalid launch due to non-boot PE not being turned off\n");
205 		return DENIED;
206 	}
207 
208 	return SUCCESS;
209 }
210 
211 static enum drtm_retc drtm_dl_prepare_dlme_data(const struct_drtm_dl_args *args)
212 {
213 	int rc;
214 	uint64_t dlme_data_paddr;
215 	size_t dlme_data_max_size;
216 	uintptr_t dlme_data_mapping;
217 	struct_dlme_data_header *dlme_data_hdr;
218 	uint8_t *dlme_data_cursor;
219 	size_t dlme_data_mapping_bytes;
220 	size_t serialised_bytes_actual;
221 
222 	dlme_data_paddr = args->dlme_paddr + args->dlme_data_off;
223 	dlme_data_max_size = args->dlme_size - args->dlme_data_off;
224 
225 	/*
226 	 * The capacity of the given DLME data region is checked when
227 	 * the other dynamic launch arguments are.
228 	 */
229 	if (dlme_data_max_size < dlme_data_min_size) {
230 		ERROR("%s: assertion failed:"
231 		      " dlme_data_max_size (%ld) < dlme_data_total_bytes_req (%ld)\n",
232 		      __func__, dlme_data_max_size, dlme_data_min_size);
233 		panic();
234 	}
235 
236 	/* Map the DLME data region as NS memory. */
237 	dlme_data_mapping_bytes = ALIGNED_UP(dlme_data_max_size, DRTM_PAGE_SIZE);
238 	rc = mmap_add_dynamic_region_alloc_va(dlme_data_paddr,
239 					      &dlme_data_mapping,
240 					      dlme_data_mapping_bytes,
241 					      MT_RW_DATA | MT_NS |
242 					      MT_SHAREABILITY_ISH);
243 	if (rc != 0) {
244 		WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
245 		     __func__, rc);
246 		return INTERNAL_ERROR;
247 	}
248 	dlme_data_hdr = (struct_dlme_data_header *)dlme_data_mapping;
249 	dlme_data_cursor = (uint8_t *)dlme_data_hdr + sizeof(*dlme_data_hdr);
250 
251 	memcpy(dlme_data_hdr, (const void *)&dlme_data_hdr_init,
252 	       sizeof(*dlme_data_hdr));
253 
254 	/* Set the header version and size. */
255 	dlme_data_hdr->version = 1;
256 	dlme_data_hdr->this_hdr_size = sizeof(*dlme_data_hdr);
257 
258 	/* Prepare DLME protected regions. */
259 	drtm_dma_prot_serialise_table(dlme_data_cursor,
260 				      &serialised_bytes_actual);
261 	assert(serialised_bytes_actual ==
262 	       dlme_data_hdr->dlme_prot_regions_size);
263 	dlme_data_cursor += serialised_bytes_actual;
264 
265 	/* Prepare DLME address map. */
266 	if (plat_drtm_mem_map != NULL) {
267 		memcpy(dlme_data_cursor, plat_drtm_mem_map,
268 		       dlme_data_hdr->dlme_addr_map_size);
269 	} else {
270 		WARN("DRTM: DLME address map is not in the cache\n");
271 	}
272 	dlme_data_cursor += dlme_data_hdr->dlme_addr_map_size;
273 
274 	/* Prepare DRTM event log for DLME. */
275 	drtm_serialise_event_log(dlme_data_cursor, &serialised_bytes_actual);
276 	assert(serialised_bytes_actual <= PLAT_DRTM_EVENT_LOG_MAX_SIZE);
277 	dlme_data_hdr->dlme_tpm_log_size = serialised_bytes_actual;
278 	dlme_data_cursor += serialised_bytes_actual;
279 
280 	/*
281 	 * TODO: Prepare the TCB hashes for DLME, currently its size
282 	 * 0
283 	 */
284 	dlme_data_cursor += dlme_data_hdr->dlme_tcb_hashes_table_size;
285 
286 	/* Implementation-specific region size is unused. */
287 	dlme_data_cursor += dlme_data_hdr->dlme_impdef_region_size;
288 
289 	/*
290 	 * Prepare DLME data size, includes all data region referenced above
291 	 * alongwith the DLME data header
292 	 */
293 	dlme_data_hdr->dlme_data_size = dlme_data_cursor - (uint8_t *)dlme_data_hdr;
294 
295 	/* Unmap the DLME data region. */
296 	rc = mmap_remove_dynamic_region(dlme_data_mapping, dlme_data_mapping_bytes);
297 	if (rc != 0) {
298 		ERROR("%s(): mmap_remove_dynamic_region() failed"
299 		      " unexpectedly rc=%d\n", __func__, rc);
300 		panic();
301 	}
302 
303 	return SUCCESS;
304 }
305 
306 /*
307  * Note: accesses to the dynamic launch args, and to the DLME data are
308  * little-endian as required, thanks to TF-A BL31 init requirements.
309  */
310 static enum drtm_retc drtm_dl_check_args(uint64_t x1,
311 					 struct_drtm_dl_args *a_out)
312 {
313 	uint64_t dlme_start, dlme_end;
314 	uint64_t dlme_img_start, dlme_img_ep, dlme_img_end;
315 	uint64_t dlme_data_start, dlme_data_end;
316 	uintptr_t args_mapping;
317 	size_t args_mapping_size;
318 	struct_drtm_dl_args *a;
319 	struct_drtm_dl_args args_buf;
320 	int rc;
321 
322 	if (x1 % DRTM_PAGE_SIZE != 0) {
323 		ERROR("DRTM: parameters structure is not "
324 		      DRTM_PAGE_SIZE_STR "-aligned\n");
325 		return INVALID_PARAMETERS;
326 	}
327 
328 	args_mapping_size = ALIGNED_UP(sizeof(struct_drtm_dl_args), DRTM_PAGE_SIZE);
329 	rc = mmap_add_dynamic_region_alloc_va(x1, &args_mapping, args_mapping_size,
330 					      MT_MEMORY | MT_NS | MT_RO |
331 					      MT_SHAREABILITY_ISH);
332 	if (rc != 0) {
333 		WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
334 		      __func__, rc);
335 		return INTERNAL_ERROR;
336 	}
337 	a = (struct_drtm_dl_args *)args_mapping;
338 	/*
339 	 * TODO: invalidate all data cache before reading the data passed by the
340 	 * DCE Preamble.  This is required to avoid / defend against racing with
341 	 * cache evictions.
342 	 */
343 	args_buf = *a;
344 
345 	rc = mmap_remove_dynamic_region(args_mapping, args_mapping_size);
346 	if (rc) {
347 		ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
348 		      " rc=%d\n", __func__, rc);
349 		panic();
350 	}
351 	a = &args_buf;
352 
353 	if (a->version != 1) {
354 		ERROR("DRTM: parameters structure incompatible with major version %d\n",
355 		      ARM_DRTM_VERSION_MAJOR);
356 		return NOT_SUPPORTED;
357 	}
358 
359 	if (!(a->dlme_img_off < a->dlme_size &&
360 	      a->dlme_data_off < a->dlme_size)) {
361 		ERROR("DRTM: argument offset is outside of the DLME region\n");
362 		return INVALID_PARAMETERS;
363 	}
364 	dlme_start = a->dlme_paddr;
365 	dlme_end = a->dlme_paddr + a->dlme_size;
366 	dlme_img_start = a->dlme_paddr + a->dlme_img_off;
367 	dlme_img_ep = dlme_img_start + a->dlme_img_ep_off;
368 	dlme_img_end = dlme_img_start + a->dlme_img_size;
369 	dlme_data_start = a->dlme_paddr + a->dlme_data_off;
370 	dlme_data_end = dlme_end;
371 
372 	/*
373 	 * TODO: validate that the DLME physical address range is all NS memory,
374 	 * return INVALID_PARAMETERS if it is not.
375 	 * Note that this check relies on platform-specific information. For
376 	 * examples, see psci_plat_pm_ops->validate_ns_entrypoint() or
377 	 * arm_validate_ns_entrypoint().
378 	 */
379 
380 	/* Check the DLME regions arguments. */
381 	if ((dlme_start % DRTM_PAGE_SIZE) != 0) {
382 		ERROR("DRTM: argument DLME region is not "
383 		      DRTM_PAGE_SIZE_STR "-aligned\n");
384 		return INVALID_PARAMETERS;
385 	}
386 
387 	if (!(dlme_start < dlme_end &&
388 	      dlme_start <= dlme_img_start && dlme_img_start < dlme_img_end &&
389 	      dlme_start <= dlme_data_start && dlme_data_start < dlme_data_end)) {
390 		ERROR("DRTM: argument DLME region is discontiguous\n");
391 		return INVALID_PARAMETERS;
392 	}
393 
394 	if (dlme_img_start < dlme_data_end && dlme_data_start < dlme_img_end) {
395 		ERROR("DRTM: argument DLME regions overlap\n");
396 		return INVALID_PARAMETERS;
397 	}
398 
399 	/* Check the DLME image region arguments. */
400 	if ((dlme_img_start % DRTM_PAGE_SIZE) != 0) {
401 		ERROR("DRTM: argument DLME image region is not "
402 		      DRTM_PAGE_SIZE_STR "-aligned\n");
403 		return INVALID_PARAMETERS;
404 	}
405 
406 	if (!(dlme_img_start <= dlme_img_ep && dlme_img_ep < dlme_img_end)) {
407 		ERROR("DRTM: DLME entry point is outside of the DLME image region\n");
408 		return INVALID_PARAMETERS;
409 	}
410 
411 	if ((dlme_img_ep % 4) != 0) {
412 		ERROR("DRTM: DLME image entry point is not 4-byte-aligned\n");
413 		return INVALID_PARAMETERS;
414 	}
415 
416 	/* Check the DLME data region arguments. */
417 	if ((dlme_data_start % DRTM_PAGE_SIZE) != 0) {
418 		ERROR("DRTM: argument DLME data region is not "
419 		      DRTM_PAGE_SIZE_STR "-aligned\n");
420 		return INVALID_PARAMETERS;
421 	}
422 
423 	if (dlme_data_end - dlme_data_start < dlme_data_min_size) {
424 		ERROR("DRTM: argument DLME data region is short of %lu bytes\n",
425 		      dlme_data_min_size - (size_t)(dlme_data_end - dlme_data_start));
426 		return INVALID_PARAMETERS;
427 	}
428 
429 	/* Check the Normal World DCE region arguments. */
430 	if (a->dce_nwd_paddr != 0) {
431 		uint32_t dce_nwd_start = a->dce_nwd_paddr;
432 		uint32_t dce_nwd_end = dce_nwd_start + a->dce_nwd_size;
433 
434 		if (!(dce_nwd_start < dce_nwd_end)) {
435 			ERROR("DRTM: argument Normal World DCE region is dicontiguous\n");
436 			return INVALID_PARAMETERS;
437 		}
438 
439 		if (dce_nwd_start < dlme_end && dlme_start < dce_nwd_end) {
440 			ERROR("DRTM: argument Normal World DCE regions overlap\n");
441 			return INVALID_PARAMETERS;
442 		}
443 	}
444 
445 	*a_out = *a;
446 	return SUCCESS;
447 }
448 
449 static uint64_t drtm_dynamic_launch(uint64_t x1, void *handle)
450 {
451 	enum drtm_retc ret = SUCCESS;
452 	enum drtm_retc dma_prot_ret;
453 	struct_drtm_dl_args args;
454 
455 	/* Ensure that only boot PE is powered on */
456 	ret = drtm_dl_check_cores();
457 	if (ret != SUCCESS) {
458 		SMC_RET1(handle, ret);
459 	}
460 
461 	/*
462 	 * Ensure that execution state is AArch64 and the caller
463 	 * is highest non-secure exception level
464 	 */
465 	ret = drtm_dl_check_caller_el(handle);
466 	if (ret != SUCCESS) {
467 		SMC_RET1(handle, ret);
468 	}
469 
470 	ret = drtm_dl_check_args(x1, &args);
471 	if (ret != SUCCESS) {
472 		SMC_RET1(handle, ret);
473 	}
474 
475 	/*
476 	 * Engage the DMA protections.  The launch cannot proceed without the DMA
477 	 * protections due to potential TOC/TOU vulnerabilities w.r.t. the DLME
478 	 * region (and to the NWd DCE region).
479 	 */
480 	ret = drtm_dma_prot_engage(&args.dma_prot_args,
481 				   DL_ARGS_GET_DMA_PROT_TYPE(&args));
482 	if (ret != SUCCESS) {
483 		SMC_RET1(handle, ret);
484 	}
485 
486 	/*
487 	 * The DMA protection is now engaged.  Note that any failure mode that
488 	 * returns an error to the DRTM-launch caller must now disengage DMA
489 	 * protections before returning to the caller.
490 	 */
491 
492 	ret = drtm_take_measurements(&args);
493 	if (ret != SUCCESS) {
494 		goto err_undo_dma_prot;
495 	}
496 
497 	ret = drtm_dl_prepare_dlme_data(&args);
498 	if (ret != SUCCESS) {
499 		goto err_undo_dma_prot;
500 	}
501 
502 	SMC_RET1(handle, ret);
503 
504 err_undo_dma_prot:
505 	dma_prot_ret = drtm_dma_prot_disengage();
506 	if (dma_prot_ret != SUCCESS) {
507 		ERROR("%s(): drtm_dma_prot_disengage() failed unexpectedly"
508 		      " rc=%d\n", __func__, ret);
509 		panic();
510 	}
511 
512 	SMC_RET1(handle, ret);
513 }
514 
515 uint64_t drtm_smc_handler(uint32_t smc_fid,
516 			  uint64_t x1,
517 			  uint64_t x2,
518 			  uint64_t x3,
519 			  uint64_t x4,
520 			  void *cookie,
521 			  void *handle,
522 			  uint64_t flags)
523 {
524 	/* Check that the SMC call is from the Normal World. */
525 	if (!is_caller_non_secure(flags)) {
526 		SMC_RET1(handle, NOT_SUPPORTED);
527 	}
528 
529 	switch (smc_fid) {
530 	case ARM_DRTM_SVC_VERSION:
531 		INFO("DRTM service handler: version\n");
532 		/* Return the version of current implementation */
533 		SMC_RET1(handle, ARM_DRTM_VERSION);
534 		break;	/* not reached */
535 
536 	case ARM_DRTM_SVC_FEATURES:
537 		if (((x1 >> ARM_DRTM_FUNC_SHIFT) & ARM_DRTM_FUNC_MASK) ==
538 		    ARM_DRTM_FUNC_ID) {
539 			/* Dispatch function-based queries. */
540 			switch (x1 & FUNCID_MASK) {
541 			case ARM_DRTM_SVC_VERSION:
542 				SMC_RET1(handle, SUCCESS);
543 				break;	/* not reached */
544 
545 			case ARM_DRTM_SVC_FEATURES:
546 				SMC_RET1(handle, SUCCESS);
547 				break;	/* not reached */
548 
549 			case ARM_DRTM_SVC_UNPROTECT_MEM:
550 				SMC_RET1(handle, SUCCESS);
551 				break;	/* not reached */
552 
553 			case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
554 				SMC_RET1(handle, SUCCESS);
555 				break;	/* not reached */
556 
557 			case ARM_DRTM_SVC_CLOSE_LOCALITY:
558 				WARN("ARM_DRTM_SVC_CLOSE_LOCALITY feature %s",
559 				     "is not supported\n");
560 				SMC_RET1(handle, NOT_SUPPORTED);
561 				break;	/* not reached */
562 
563 			case ARM_DRTM_SVC_GET_ERROR:
564 				SMC_RET1(handle, SUCCESS);
565 				break;	/* not reached */
566 
567 			case ARM_DRTM_SVC_SET_ERROR:
568 				SMC_RET1(handle, SUCCESS);
569 				break;	/* not reached */
570 
571 			case ARM_DRTM_SVC_SET_TCB_HASH:
572 				WARN("ARM_DRTM_SVC_TCB_HASH feature %s",
573 				     "is not supported\n");
574 				SMC_RET1(handle, NOT_SUPPORTED);
575 				break;	/* not reached */
576 
577 			case ARM_DRTM_SVC_LOCK_TCB_HASH:
578 				WARN("ARM_DRTM_SVC_LOCK_TCB_HASH feature %s",
579 				     "is not supported\n");
580 				SMC_RET1(handle, NOT_SUPPORTED);
581 				break;	/* not reached */
582 
583 			default:
584 				ERROR("Unknown DRTM service function\n");
585 				SMC_RET1(handle, NOT_SUPPORTED);
586 				break;	/* not reached */
587 			}
588 		} else {
589 			/* Dispatch feature-based queries. */
590 			switch (x1 & ARM_DRTM_FEAT_ID_MASK) {
591 			case ARM_DRTM_FEATURES_TPM:
592 				INFO("++ DRTM service handler: TPM features\n");
593 				return drtm_features_tpm(handle);
594 				break;	/* not reached */
595 
596 			case ARM_DRTM_FEATURES_MEM_REQ:
597 				INFO("++ DRTM service handler: Min. mem."
598 				     " requirement features\n");
599 				return drtm_features_mem_req(handle);
600 				break;	/* not reached */
601 
602 			case ARM_DRTM_FEATURES_DMA_PROT:
603 				INFO("++ DRTM service handler: "
604 				     "DMA protection features\n");
605 				return drtm_features_dma_prot(handle);
606 				break;	/* not reached */
607 
608 			case ARM_DRTM_FEATURES_BOOT_PE_ID:
609 				INFO("++ DRTM service handler: "
610 				     "Boot PE ID features\n");
611 				return drtm_features_boot_pe_id(handle);
612 				break;	/* not reached */
613 
614 			case ARM_DRTM_FEATURES_TCB_HASHES:
615 				INFO("++ DRTM service handler: "
616 				     "TCB-hashes features\n");
617 				return drtm_features_tcb_hashes(handle);
618 				break;	/* not reached */
619 
620 			default:
621 				ERROR("Unknown ARM DRTM service feature\n");
622 				SMC_RET1(handle, NOT_SUPPORTED);
623 				break;	/* not reached */
624 			}
625 		}
626 
627 	case ARM_DRTM_SVC_UNPROTECT_MEM:
628 		INFO("DRTM service handler: unprotect mem\n");
629 		return drtm_unprotect_mem(handle);
630 		break;	/* not reached */
631 
632 	case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
633 		INFO("DRTM service handler: dynamic launch\n");
634 		return drtm_dynamic_launch(x1, handle);
635 		break;	/* not reached */
636 
637 	case ARM_DRTM_SVC_CLOSE_LOCALITY:
638 		WARN("DRTM service handler: close locality %s\n",
639 		     "is not supported");
640 		SMC_RET1(handle, NOT_SUPPORTED);
641 		break;	/* not reached */
642 
643 	case ARM_DRTM_SVC_GET_ERROR:
644 		INFO("DRTM service handler: get error\n");
645 		drtm_get_error(handle);
646 		break;	/* not reached */
647 
648 	case ARM_DRTM_SVC_SET_ERROR:
649 		INFO("DRTM service handler: set error\n");
650 		drtm_set_error(x1, handle);
651 		break;	/* not reached */
652 
653 	case ARM_DRTM_SVC_SET_TCB_HASH:
654 		WARN("DRTM service handler: set TCB hash %s\n",
655 		     "is not supported");
656 		SMC_RET1(handle, NOT_SUPPORTED);
657 		break;  /* not reached */
658 
659 	case ARM_DRTM_SVC_LOCK_TCB_HASH:
660 		WARN("DRTM service handler: lock TCB hash %s\n",
661 		     "is not supported");
662 		SMC_RET1(handle, NOT_SUPPORTED);
663 		break;  /* not reached */
664 
665 	default:
666 		ERROR("Unknown DRTM service function: 0x%x\n", smc_fid);
667 		SMC_RET1(handle, SMC_UNK);
668 		break;	/* not reached */
669 	}
670 
671 	/* not reached */
672 	SMC_RET1(handle, SMC_UNK);
673 }
674