xref: /rk3399_ARM-atf/services/std_svc/drtm/drtm_main.c (revision 8e26db7002c175e35b71b9ef2e54a7f3f94b2bb0)
1 /*
2  * Copyright (c) 2022-2026 Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier:    BSD-3-Clause
5  *
6  * DRTM service
7  *
8  * Authors:
9  *	Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
10  *	Brian Nezvadovitz <brinez@microsoft.com> 2021-02-01
11  */
12 
13 #include <stdint.h>
14 
15 #include <arch.h>
16 #include <arch_helpers.h>
17 #include <common/bl_common.h>
18 #include <common/debug.h>
19 #include <common/runtime_svc.h>
20 #include <drivers/auth/crypto_mod.h>
21 #include "drtm_main.h"
22 #include "drtm_measurements.h"
23 #include "drtm_remediation.h"
24 #include <lib/el3_runtime/context_mgmt.h>
25 #include <lib/psci/psci_lib.h>
26 #include <lib/xlat_tables/xlat_tables_v2.h>
27 #include <plat/common/platform.h>
28 #include <services/drtm_svc.h>
29 #include <services/sdei.h>
30 #include <platform_def.h>
31 
32 /* Structure to store DRTM features specific to the platform. */
33 static drtm_features_t plat_drtm_features;
34 static bool dlme_img_auth_supported;
35 
36 /* DRTM-formatted memory map. */
37 static drtm_memory_region_descriptor_table_t *plat_drtm_mem_map;
38 static const plat_drtm_dma_prot_features_t *plat_dma_prot_feat;
39 static const plat_drtm_tpm_features_t *plat_tpm_feat;
40 
41 /* DLME header */
42 struct_dlme_data_header dlme_data_hdr_init;
43 
44 /* Minimum data memory requirement */
45 uint64_t dlme_data_min_size;
46 
drtm_setup(void)47 int drtm_setup(void)
48 {
49 	bool rc;
50 
51 	INFO("DRTM service setup\n");
52 
53 	/* Read boot PE ID from MPIDR */
54 	plat_drtm_features.boot_pe_id = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
55 
56 	rc = drtm_dma_prot_init();
57 	if (rc) {
58 		return INTERNAL_ERROR;
59 	}
60 
61 	/*
62 	 * initialise the platform supported crypto module that will
63 	 * be used by the DRTM-service to calculate hash of DRTM-
64 	 * implementation specific components
65 	 */
66 	crypto_mod_init();
67 
68 	/* Build DRTM-compatible address map. */
69 	plat_drtm_mem_map = drtm_build_address_map();
70 	if (plat_drtm_mem_map == NULL) {
71 		return INTERNAL_ERROR;
72 	}
73 
74 	/* Get DRTM features from platform hooks. */
75 	plat_tpm_feat = plat_drtm_get_tpm_features();
76 	if (plat_tpm_feat == NULL) {
77 		return INTERNAL_ERROR;
78 	}
79 
80 	plat_dma_prot_feat = plat_drtm_get_dma_prot_features();
81 	if (plat_dma_prot_feat == NULL) {
82 		return INTERNAL_ERROR;
83 	}
84 
85 	/*
86 	 * Add up minimum DLME data memory.
87 	 *
88 	 * For systems with complete DMA protection there is only one entry in
89 	 * the protected regions table.
90 	 */
91 	if (plat_dma_prot_feat->dma_protection_support ==
92 			ARM_DRTM_DMA_PROT_FEATURES_DMA_SUPPORT_COMPLETE) {
93 		dlme_data_min_size =
94 			sizeof(drtm_memory_region_descriptor_table_t) +
95 			sizeof(drtm_mem_region_t);
96 		dlme_data_hdr_init.dlme_prot_regions_size = dlme_data_min_size;
97 	} else {
98 		/*
99 		 * TODO set protected regions table size based on platform DMA
100 		 * protection configuration
101 		 */
102 		panic();
103 	}
104 
105 	dlme_data_hdr_init.dlme_addr_map_size = drtm_get_address_map_size();
106 	dlme_data_hdr_init.dlme_tcb_hashes_table_size =
107 				plat_drtm_get_tcb_hash_table_size();
108 	dlme_data_hdr_init.dlme_acpi_tables_region_size =
109 				plat_drtm_get_acpi_tables_region_size();
110 	dlme_data_hdr_init.dlme_impdef_region_size =
111 				plat_drtm_get_imp_def_dlme_region_size();
112 
113 	dlme_data_min_size += sizeof(struct_dlme_data_header) +
114 			      dlme_data_hdr_init.dlme_addr_map_size +
115 			      ARM_DRTM_MIN_EVENT_LOG_SIZE +
116 			      dlme_data_hdr_init.dlme_tcb_hashes_table_size +
117 			      dlme_data_hdr_init.dlme_acpi_tables_region_size +
118 			      dlme_data_hdr_init.dlme_impdef_region_size;
119 
120 	/* Fill out platform DRTM features structure */
121 	/* Only support default PCR schema (0x1) in this implementation. */
122 	ARM_DRTM_TPM_FEATURES_SET_PCR_SCHEMA(plat_drtm_features.tpm_features,
123 		ARM_DRTM_TPM_FEATURES_PCR_SCHEMA_DEFAULT);
124 	ARM_DRTM_TPM_FEATURES_SET_TPM_HASH(plat_drtm_features.tpm_features,
125 		plat_tpm_feat->tpm_based_hash_support);
126 	ARM_DRTM_TPM_FEATURES_SET_FW_HASH(plat_drtm_features.tpm_features,
127 		plat_tpm_feat->firmware_hash_algorithm);
128 	ARM_DRTM_MIN_MEM_REQ_SET_MIN_DLME_DATA_SIZE(plat_drtm_features.minimum_memory_requirement,
129 		page_align(dlme_data_min_size, UP)/PAGE_SIZE);
130 	ARM_DRTM_MIN_MEM_REQ_SET_DCE_SIZE(plat_drtm_features.minimum_memory_requirement,
131 		plat_drtm_get_min_size_normal_world_dce());
132 	ARM_DRTM_DMA_PROT_FEATURES_SET_MAX_REGIONS(plat_drtm_features.dma_prot_features,
133 		plat_dma_prot_feat->max_num_mem_prot_regions);
134 	ARM_DRTM_DMA_PROT_FEATURES_SET_DMA_SUPPORT(plat_drtm_features.dma_prot_features,
135 		plat_dma_prot_feat->dma_protection_support);
136 	ARM_DRTM_TCB_HASH_FEATURES_SET_MAX_NUM_HASHES(plat_drtm_features.tcb_hash_features,
137 		plat_drtm_get_tcb_hash_features());
138 	ARM_DRTM_DLME_IMG_AUTH_SUPPORT(plat_drtm_features.dlme_image_auth_features,
139 		plat_drtm_get_dlme_img_auth_features());
140 	dlme_img_auth_supported =
141 		((plat_drtm_features.dlme_image_auth_features &
142 		  (ARM_DRTM_DLME_IMAGE_AUTH_SUPPORT_MASK <<
143 		   ARM_DRTM_DLME_IMAGE_AUTH_SUPPORT_SHIFT)) != 0ULL);
144 
145 	return 0;
146 }
147 
invalidate_icache_all(void)148 static inline void invalidate_icache_all(void)
149 {
150 	__asm__ volatile("ic      ialluis");
151 	dsb();
152 	isb();
153 }
154 
drtm_features_tpm(void * ctx)155 static inline uint64_t drtm_features_tpm(void *ctx)
156 {
157 	SMC_RET2(ctx, 1ULL, /* TPM feature is supported */
158 		 plat_drtm_features.tpm_features);
159 }
160 
drtm_features_mem_req(void * ctx)161 static inline uint64_t drtm_features_mem_req(void *ctx)
162 {
163 	SMC_RET2(ctx, 1ULL, /* memory req Feature is supported */
164 		 plat_drtm_features.minimum_memory_requirement);
165 }
166 
drtm_features_boot_pe_id(void * ctx)167 static inline uint64_t drtm_features_boot_pe_id(void *ctx)
168 {
169 	SMC_RET2(ctx, 1ULL, /* Boot PE feature is supported */
170 		 plat_drtm_features.boot_pe_id);
171 }
172 
drtm_features_dma_prot(void * ctx)173 static inline uint64_t drtm_features_dma_prot(void *ctx)
174 {
175 	SMC_RET2(ctx, 1ULL, /* DMA protection feature is supported */
176 		 plat_drtm_features.dma_prot_features);
177 }
178 
drtm_features_tcb_hashes(void * ctx)179 static inline uint64_t drtm_features_tcb_hashes(void *ctx)
180 {
181 	SMC_RET2(ctx, 1ULL, /* TCB hash feature is supported */
182 		 plat_drtm_features.tcb_hash_features);
183 }
184 
drtm_features_dlme_img_auth_features(void * ctx)185 static inline uint64_t drtm_features_dlme_img_auth_features(void *ctx)
186 {
187 	SMC_RET2(ctx, 1ULL, /* DLME Image auth is supported */
188 		 plat_drtm_features.dlme_image_auth_features);
189 }
190 
drtm_dl_check_caller_el(void * ctx)191 static enum drtm_retc drtm_dl_check_caller_el(void *ctx)
192 {
193 	uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
194 	uint64_t dl_caller_el;
195 	uint64_t dl_caller_aarch;
196 
197 	dl_caller_el = spsr_el3 >> MODE_EL_SHIFT & MODE_EL_MASK;
198 	dl_caller_aarch = spsr_el3 >> MODE_RW_SHIFT & MODE_RW_MASK;
199 
200 	/* Caller's security state is checked from drtm_smc_handle function */
201 
202 	/* Caller can be NS-EL2/EL1 */
203 	if (dl_caller_el == MODE_EL3) {
204 		ERROR("DRTM: invalid launch from EL3\n");
205 		return DENIED;
206 	}
207 
208 	if (dl_caller_aarch != MODE_RW_64) {
209 		ERROR("DRTM: invalid launch from non-AArch64 execution state\n");
210 		return DENIED;
211 	}
212 
213 	return SUCCESS;
214 }
215 
drtm_dl_check_cores(void)216 static enum drtm_retc drtm_dl_check_cores(void)
217 {
218 	bool running_on_single_core;
219 	uint64_t this_pe_aff_value = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
220 
221 	if (this_pe_aff_value != plat_drtm_features.boot_pe_id) {
222 		ERROR("DRTM: invalid launch on a non-boot PE\n");
223 		return DENIED;
224 	}
225 
226 	running_on_single_core = psci_is_last_on_cpu_safe(plat_my_core_pos());
227 	if (!running_on_single_core) {
228 		ERROR("DRTM: invalid launch due to non-boot PE not being turned off\n");
229 		return SECONDARY_PE_NOT_OFF;
230 	}
231 
232 	return SUCCESS;
233 }
234 
drtm_dl_prepare_dlme_data(const struct_drtm_dl_args * args)235 static enum drtm_retc drtm_dl_prepare_dlme_data(const struct_drtm_dl_args *args)
236 {
237 	int rc;
238 	uint64_t dlme_data_paddr;
239 	size_t dlme_data_max_size;
240 	uintptr_t dlme_data_mapping;
241 	struct_dlme_data_header *dlme_data_hdr;
242 	uint8_t *dlme_data_cursor;
243 	size_t dlme_data_mapping_bytes;
244 	size_t serialised_bytes_actual;
245 
246 	dlme_data_paddr = args->dlme_paddr + args->dlme_data_off;
247 	dlme_data_max_size = args->dlme_size - args->dlme_data_off;
248 
249 	/*
250 	 * The capacity of the given DLME data region is checked when
251 	 * the other dynamic launch arguments are.
252 	 */
253 	if (dlme_data_max_size < dlme_data_min_size) {
254 		ERROR("%s: assertion failed:"
255 		      " dlme_data_max_size (%ld) < dlme_data_min_size (%ld)\n",
256 		      __func__, dlme_data_max_size, dlme_data_min_size);
257 		panic();
258 	}
259 
260 	/* Map the DLME data region as NS memory. */
261 	dlme_data_mapping_bytes = ALIGNED_UP(dlme_data_max_size, DRTM_PAGE_SIZE);
262 	rc = mmap_add_dynamic_region_alloc_va(dlme_data_paddr,
263 					      &dlme_data_mapping,
264 					      dlme_data_mapping_bytes,
265 					      MT_RW_DATA | MT_NS |
266 					      MT_SHAREABILITY_ISH);
267 	if (rc != 0) {
268 		WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
269 		     __func__, rc);
270 		return INTERNAL_ERROR;
271 	}
272 	dlme_data_hdr = (struct_dlme_data_header *)dlme_data_mapping;
273 	dlme_data_cursor = (uint8_t *)dlme_data_hdr + sizeof(*dlme_data_hdr);
274 
275 	memcpy(dlme_data_hdr, (const void *)&dlme_data_hdr_init,
276 	       sizeof(*dlme_data_hdr));
277 
278 	/* Set the header version and size. */
279 	dlme_data_hdr->version = 1;
280 	dlme_data_hdr->this_hdr_size = sizeof(*dlme_data_hdr);
281 
282 	/* Prepare DLME protected regions. */
283 	drtm_dma_prot_serialise_table(dlme_data_cursor,
284 				      &serialised_bytes_actual);
285 	assert(serialised_bytes_actual ==
286 	       dlme_data_hdr->dlme_prot_regions_size);
287 	dlme_data_cursor += serialised_bytes_actual;
288 
289 	/* Prepare DLME address map. */
290 	if (plat_drtm_mem_map != NULL) {
291 		memcpy(dlme_data_cursor, plat_drtm_mem_map,
292 		       dlme_data_hdr->dlme_addr_map_size);
293 	} else {
294 		WARN("DRTM: DLME address map is not in the cache\n");
295 	}
296 	dlme_data_cursor += dlme_data_hdr->dlme_addr_map_size;
297 
298 	/* Prepare DRTM event log for DLME. */
299 	drtm_serialise_event_log(dlme_data_cursor, &serialised_bytes_actual);
300 	assert(serialised_bytes_actual <= ARM_DRTM_MIN_EVENT_LOG_SIZE);
301 	dlme_data_hdr->dlme_tpm_log_size = serialised_bytes_actual;
302 	dlme_data_cursor +=  serialised_bytes_actual;
303 
304 	/*
305 	 * TODO: Prepare the TCB hashes for DLME, currently its size
306 	 * 0
307 	 */
308 	dlme_data_cursor += dlme_data_hdr->dlme_tcb_hashes_table_size;
309 
310 	/* Implementation-specific region size is unused. */
311 	dlme_data_cursor += dlme_data_hdr->dlme_impdef_region_size;
312 
313 	/*
314 	 * Prepare DLME data size, includes all data region referenced above
315 	 * alongwith the DLME data header
316 	 */
317 	dlme_data_hdr->dlme_data_size = dlme_data_cursor - (uint8_t *)dlme_data_hdr;
318 
319 	/* Unmap the DLME data region. */
320 	rc = mmap_remove_dynamic_region(dlme_data_mapping, dlme_data_mapping_bytes);
321 	if (rc != 0) {
322 		ERROR("%s(): mmap_remove_dynamic_region() failed"
323 		      " unexpectedly rc=%d\n", __func__, rc);
324 		panic();
325 	}
326 
327 	return SUCCESS;
328 }
329 
330 /* Function to check if the value is valid for each bit field */
drtm_dl_check_features_sanity(uint32_t val)331 static int drtm_dl_check_features_sanity(uint32_t val)
332 {
333 	/**
334 	 * Ensure that if DLME Authorities Schema (Bits [2:1]) is set, then
335 	 * DLME image authentication (Bit[6]) must also be set
336 	 */
337 	if ((EXTRACT(DRTM_LAUNCH_FEAT_PCR_USAGE_SCHEMA, val) == DLME_AUTH_SCHEMA) &&
338 	    (EXTRACT(DRTM_LAUNCH_FEAT_DLME_IMG_AUTH, val) != DLME_IMG_AUTH)) {
339 		return INVALID_PARAMETERS;
340 	}
341 
342 	/**
343 	 * Check if DLME image authentication (Bit[6]) is supported by platform.
344 	 */
345 	if (EXTRACT(DRTM_LAUNCH_FEAT_DLME_IMG_AUTH, val) == DLME_IMG_AUTH) {
346 		if (!dlme_img_auth_supported) {
347 			return INVALID_PARAMETERS;
348 		}
349 	}
350 
351 	/**
352 	 * Check if Bits [5:3] (Memory protection type) matches with platform's
353 	 * memory protection type
354 	 */
355 	if (EXTRACT(DRTM_LAUNCH_FEAT_MEM_PROTECTION_TYPE, val) !=
356 	    __builtin_ctz(plat_dma_prot_feat->dma_protection_support)) {
357 		return INVALID_PARAMETERS;
358 	}
359 
360 	/**
361 	 * Check if Bits [0] (Type of hashing) matches with platform's
362 	 * supported hash type.
363 	 */
364 	if (EXTRACT(DRTM_LAUNCH_FEAT_HASHING_TYPE, val) !=
365 	    plat_tpm_feat->tpm_based_hash_support) {
366 		return INVALID_PARAMETERS;
367 	}
368 
369 	return 0;
370 }
371 
372 /*
373  * Note: accesses to the dynamic launch args, and to the DLME data are
374  * little-endian as required, thanks to TF-A BL31 init requirements.
375  */
drtm_dl_check_args(uint64_t x1,struct_drtm_dl_args * a_out)376 static enum drtm_retc drtm_dl_check_args(uint64_t x1,
377 					 struct_drtm_dl_args *a_out)
378 {
379 	uint64_t dlme_start, dlme_end;
380 	uint64_t dlme_img_start, dlme_img_ep, dlme_img_end;
381 	uint64_t dlme_data_start, dlme_data_end;
382 	uintptr_t va_mapping;
383 	size_t va_mapping_size;
384 	struct_drtm_dl_args *a;
385 	struct_drtm_dl_args args_buf;
386 	int rc;
387 
388 	if (x1 % DRTM_PAGE_SIZE != 0) {
389 		ERROR("DRTM: parameters structure is not "
390 		      DRTM_PAGE_SIZE_STR "-aligned\n");
391 		return INVALID_PARAMETERS;
392 	}
393 
394 	va_mapping_size = ALIGNED_UP(sizeof(struct_drtm_dl_args), DRTM_PAGE_SIZE);
395 
396 	/* check DRTM parameters are within NS address region */
397 	rc = plat_drtm_validate_ns_region(x1, va_mapping_size);
398 	if (rc != 0) {
399 		ERROR("DRTM: parameters lies within secure memory\n");
400 		return INVALID_PARAMETERS;
401 	}
402 
403 	rc = mmap_add_dynamic_region_alloc_va(x1, &va_mapping, va_mapping_size,
404 					      MT_NS | MT_RO_DATA |
405 					      MT_SHAREABILITY_ISH);
406 	if (rc != 0) {
407 		WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
408 		      __func__, rc);
409 		return INTERNAL_ERROR;
410 	}
411 	a = (struct_drtm_dl_args *)va_mapping;
412 
413 	/* Sanitize cache of data passed in args by the DCE Preamble. */
414 	flush_dcache_range(va_mapping, va_mapping_size);
415 
416 	args_buf = *a;
417 
418 	rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size);
419 	if (rc != 0) {
420 		ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
421 		      " rc=%d\n", __func__, rc);
422 		panic();
423 	}
424 	a = &args_buf;
425 
426 	if (!((a->version >= ARM_DRTM_PARAMS_MIN_VERSION) &&
427 	    (a->version <= ARM_DRTM_PARAMS_MAX_VERSION))) {
428 		ERROR("DRTM: parameters structure version %u is unsupported\n",
429 		      a->version);
430 		return NOT_SUPPORTED;
431 	}
432 
433 	rc = drtm_dl_check_features_sanity(a->features);
434 	if (rc != 0) {
435 		ERROR("%s(): drtm_dl_check_features_sanity() failed.\n"
436 				" rc=%d\n", __func__, rc);
437 		return rc;
438 	}
439 
440 	if (!(a->dlme_img_off < a->dlme_size &&
441 	      a->dlme_data_off < a->dlme_size)) {
442 		ERROR("DRTM: argument offset is outside of the DLME region\n");
443 		return INVALID_PARAMETERS;
444 	}
445 	dlme_start = a->dlme_paddr;
446 	dlme_end = a->dlme_paddr + a->dlme_size;
447 	dlme_img_start = a->dlme_paddr + a->dlme_img_off;
448 	dlme_img_ep = dlme_img_start + a->dlme_img_ep_off;
449 	dlme_img_end = dlme_img_start + a->dlme_img_size;
450 	dlme_data_start = a->dlme_paddr + a->dlme_data_off;
451 	dlme_data_end = dlme_end;
452 
453 	/* Check the DLME regions arguments. */
454 	if ((dlme_start % DRTM_PAGE_SIZE) != 0) {
455 		ERROR("DRTM: argument DLME region is not "
456 		      DRTM_PAGE_SIZE_STR "-aligned\n");
457 		return INVALID_PARAMETERS;
458 	}
459 
460 	if (!(dlme_start < dlme_end &&
461 	      dlme_start <= dlme_img_start && dlme_img_start < dlme_img_end &&
462 	      dlme_start <= dlme_data_start && dlme_data_start < dlme_data_end)) {
463 		ERROR("DRTM: argument DLME region is discontiguous\n");
464 		return INVALID_PARAMETERS;
465 	}
466 
467 	if (dlme_img_start < dlme_data_end && dlme_data_start < dlme_img_end) {
468 		ERROR("DRTM: argument DLME regions overlap\n");
469 		return INVALID_PARAMETERS;
470 	}
471 
472 	/* Check the DLME image region arguments. */
473 	if ((dlme_img_start % DRTM_PAGE_SIZE) != 0) {
474 		ERROR("DRTM: argument DLME image region is not "
475 		      DRTM_PAGE_SIZE_STR "-aligned\n");
476 		return INVALID_PARAMETERS;
477 	}
478 
479 	if (!(dlme_img_start <= dlme_img_ep && dlme_img_ep < dlme_img_end)) {
480 		ERROR("DRTM: DLME entry point is outside of the DLME image region\n");
481 		return INVALID_PARAMETERS;
482 	}
483 
484 	if ((dlme_img_ep % 4) != 0) {
485 		ERROR("DRTM: DLME image entry point is not 4-byte-aligned\n");
486 		return INVALID_PARAMETERS;
487 	}
488 
489 	/* Check the DLME data region arguments. */
490 	if ((dlme_data_start % DRTM_PAGE_SIZE) != 0) {
491 		ERROR("DRTM: argument DLME data region is not "
492 		      DRTM_PAGE_SIZE_STR "-aligned\n");
493 		return INVALID_PARAMETERS;
494 	}
495 
496 	if (dlme_data_end - dlme_data_start < dlme_data_min_size) {
497 		ERROR("DRTM: argument DLME data region is short of %lu bytes\n",
498 		      dlme_data_min_size - (size_t)(dlme_data_end - dlme_data_start));
499 		return INVALID_PARAMETERS;
500 	}
501 
502 	/* check DLME region (paddr + size) is within a NS address region */
503 	rc = plat_drtm_validate_ns_region(dlme_start, (size_t)a->dlme_size);
504 	if (rc != 0) {
505 		ERROR("DRTM: DLME region lies within secure memory\n");
506 		return INVALID_PARAMETERS;
507 	}
508 
509 	/* Check the Normal World DCE region arguments. */
510 	if (a->dce_nwd_paddr != 0) {
511 		uint32_t dce_nwd_start = a->dce_nwd_paddr;
512 		uint32_t dce_nwd_end = dce_nwd_start + a->dce_nwd_size;
513 
514 		if (!(dce_nwd_start < dce_nwd_end)) {
515 			ERROR("DRTM: argument Normal World DCE region is dicontiguous\n");
516 			return INVALID_PARAMETERS;
517 		}
518 
519 		if (dce_nwd_start < dlme_end && dlme_start < dce_nwd_end) {
520 			ERROR("DRTM: argument Normal World DCE regions overlap\n");
521 			return INVALID_PARAMETERS;
522 		}
523 	}
524 
525 	/*
526 	 * Map and sanitize the cache of data range passed by DCE Preamble. This
527 	 * is required to avoid / defend against racing with cache evictions
528 	 */
529 	va_mapping_size = ALIGNED_UP((dlme_end - dlme_start), DRTM_PAGE_SIZE);
530 	rc = mmap_add_dynamic_region_alloc_va(dlme_start, &va_mapping, va_mapping_size,
531 					      MT_NS | MT_RO_DATA |
532 					      MT_SHAREABILITY_ISH);
533 	if (rc != 0) {
534 		ERROR("DRTM: %s: mmap_add_dynamic_region_alloc_va() failed rc=%d\n",
535 		      __func__, rc);
536 		return INTERNAL_ERROR;
537 	}
538 	flush_dcache_range(va_mapping, va_mapping_size);
539 
540 	rc = mmap_remove_dynamic_region(va_mapping, va_mapping_size);
541 	if (rc) {
542 		ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
543 		      " rc=%d\n", __func__, rc);
544 		panic();
545 	}
546 
547 	*a_out = *a;
548 	return SUCCESS;
549 }
550 
drtm_dl_reset_dlme_el_state(enum drtm_dlme_el dlme_el)551 static void drtm_dl_reset_dlme_el_state(enum drtm_dlme_el dlme_el)
552 {
553 	uint64_t sctlr;
554 
555 	switch (dlme_el) {
556 	case DLME_AT_EL1:
557 		sctlr = read_sctlr_el1();
558 		break;
559 
560 	case DLME_AT_EL2:
561 		sctlr = read_sctlr_el2();
562 		break;
563 
564 	default: /* Not reached */
565 		ERROR("%s(): dlme_el has the unexpected value %d\n",
566 		      __func__, dlme_el);
567 		panic();
568 	}
569 
570 	sctlr &= ~(/* Disable DLME's EL MMU, since the existing page-tables are untrusted. */
571 		   SCTLR_M_BIT
572 		   | SCTLR_EE_BIT               /* Little-endian data accesses. */
573 		   | SCTLR_C_BIT		/* disable data caching */
574 		   | SCTLR_I_BIT		/* disable instruction caching */
575 		  );
576 
577 	switch (dlme_el) {
578 	case DLME_AT_EL1:
579 		write_sctlr_el1(sctlr);
580 		break;
581 
582 	case DLME_AT_EL2:
583 		write_sctlr_el2(sctlr);
584 		break;
585 	}
586 }
587 
drtm_dl_reset_dlme_context(enum drtm_dlme_el dlme_el)588 static void drtm_dl_reset_dlme_context(enum drtm_dlme_el dlme_el)
589 {
590 	void *ns_ctx = cm_get_context(NON_SECURE);
591 	gp_regs_t *gpregs = get_gpregs_ctx(ns_ctx);
592 	uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3);
593 
594 	/* Reset all gpregs, including SP_EL0. */
595 	memset(gpregs, 0, sizeof(*gpregs));
596 
597 	/* Reset SP_ELx. */
598 	switch (dlme_el) {
599 	case DLME_AT_EL1:
600 		write_sp_el1(0);
601 		break;
602 
603 	case DLME_AT_EL2:
604 		write_sp_el2(0);
605 		break;
606 	}
607 
608 	/*
609 	 * DLME's async exceptions are masked to avoid a NWd attacker's timed
610 	 * interference with any state we established trust in or measured.
611 	 */
612 	spsr_el3 |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
613 
614 	write_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3, spsr_el3);
615 }
616 
drtm_dl_prepare_eret_to_dlme(const struct_drtm_dl_args * args,enum drtm_dlme_el dlme_el)617 static void drtm_dl_prepare_eret_to_dlme(const struct_drtm_dl_args *args, enum drtm_dlme_el dlme_el)
618 {
619 	void *ctx = cm_get_context(NON_SECURE);
620 	uint64_t dlme_ep = DL_ARGS_GET_DLME_ENTRY_POINT(args);
621 	uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
622 
623 	/* Next ERET is to the DLME's EL. */
624 	spsr_el3 &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
625 	switch (dlme_el) {
626 	case DLME_AT_EL1:
627 		spsr_el3 |= MODE_EL1 << MODE_EL_SHIFT;
628 		break;
629 
630 	case DLME_AT_EL2:
631 		spsr_el3 |= MODE_EL2 << MODE_EL_SHIFT;
632 		break;
633 	}
634 
635 	/* Next ERET is to the DLME entry point. */
636 	cm_set_elr_spsr_el3(NON_SECURE, dlme_ep, spsr_el3);
637 }
638 
drtm_dynamic_launch(uint64_t x1,void * handle)639 static uint64_t drtm_dynamic_launch(uint64_t x1, void *handle)
640 {
641 	enum drtm_retc ret = SUCCESS;
642 	enum drtm_retc dma_prot_ret;
643 	struct_drtm_dl_args args;
644 	/* DLME should be highest NS exception level */
645 	enum drtm_dlme_el dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
646 
647 	/* Ensure that only boot PE is powered on */
648 	ret = drtm_dl_check_cores();
649 	if (ret != SUCCESS) {
650 		SMC_RET1(handle, ret);
651 	}
652 
653 	/*
654 	 * Ensure that execution state is AArch64 and the caller
655 	 * is highest non-secure exception level
656 	 */
657 	ret = drtm_dl_check_caller_el(handle);
658 	if (ret != SUCCESS) {
659 		SMC_RET1(handle, ret);
660 	}
661 
662 	ret = drtm_dl_check_args(x1, &args);
663 	if (ret != SUCCESS) {
664 		SMC_RET1(handle, ret);
665 	}
666 
667 	/* Ensure that there are no SDEI event registered */
668 #if SDEI_SUPPORT
669 	if (sdei_get_registered_event_count() != 0) {
670 		SMC_RET1(handle, DENIED);
671 	}
672 #endif /* SDEI_SUPPORT */
673 
674 	/*
675 	 * Engage the DMA protections.  The launch cannot proceed without the DMA
676 	 * protections due to potential TOC/TOU vulnerabilities w.r.t. the DLME
677 	 * region (and to the NWd DCE region).
678 	 */
679 	ret = drtm_dma_prot_engage(&args.dma_prot_args,
680 				   DL_ARGS_GET_DMA_PROT_TYPE(&args));
681 	if (ret != SUCCESS) {
682 		SMC_RET1(handle, ret);
683 	}
684 
685 	/*
686 	 * The DMA protection is now engaged.  Note that any failure mode that
687 	 * returns an error to the DRTM-launch caller must now disengage DMA
688 	 * protections before returning to the caller.
689 	 */
690 
691 	ret = drtm_take_measurements(&args);
692 	if (ret != SUCCESS) {
693 		goto err_undo_dma_prot;
694 	}
695 
696 	ret = drtm_dl_prepare_dlme_data(&args);
697 	if (ret != SUCCESS) {
698 		goto err_undo_dma_prot;
699 	}
700 
701 	/*
702 	 * Note that, at the time of writing, the DRTM spec allows a successful
703 	 * launch from NS-EL1 to return to a DLME in NS-EL2.  The practical risk
704 	 * of a privilege escalation, e.g. due to a compromised hypervisor, is
705 	 * considered small enough not to warrant the specification of additional
706 	 * DRTM conduits that would be necessary to maintain OSs' abstraction from
707 	 * the presence of EL2 were the dynamic launch only be allowed from the
708 	 * highest NS EL.
709 	 */
710 
711 	dlme_el = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
712 
713 	drtm_dl_reset_dlme_el_state(dlme_el);
714 	drtm_dl_reset_dlme_context(dlme_el);
715 
716 	/*
717 	 * Setting the Generic Timer frequency is required before launching
718 	 * DLME and is already done for running CPU during PSCI setup.
719 	 */
720 	drtm_dl_prepare_eret_to_dlme(&args, dlme_el);
721 
722 	/*
723 	 * As per DRTM 1.0 spec table #30 invalidate the instruction cache
724 	 * before jumping to the DLME. This is required to defend against
725 	 * potentially-malicious cache contents.
726 	 */
727 	invalidate_icache_all();
728 
729 	/* Return the DLME region's address in x0, and the DLME data offset in x1.*/
730 	SMC_RET2(handle, args.dlme_paddr, args.dlme_data_off);
731 
732 err_undo_dma_prot:
733 	dma_prot_ret = drtm_dma_prot_disengage();
734 	if (dma_prot_ret != SUCCESS) {
735 		ERROR("%s(): drtm_dma_prot_disengage() failed unexpectedly"
736 		      " rc=%d\n", __func__, ret);
737 		panic();
738 	}
739 
740 	SMC_RET1(handle, ret);
741 }
742 
drtm_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)743 uint64_t drtm_smc_handler(uint32_t smc_fid,
744 			  uint64_t x1,
745 			  uint64_t x2,
746 			  uint64_t x3,
747 			  uint64_t x4,
748 			  void *cookie,
749 			  void *handle,
750 			  uint64_t flags)
751 {
752 	/* Check that the SMC call is from the Normal World. */
753 	if (!is_caller_non_secure(flags)) {
754 		SMC_RET1(handle, NOT_SUPPORTED);
755 	}
756 
757 	switch (smc_fid) {
758 	case ARM_DRTM_SVC_VERSION:
759 		INFO("DRTM service handler: version\n");
760 		/* Return the version of current implementation */
761 		SMC_RET1(handle, ARM_DRTM_VERSION);
762 		break;	/* not reached */
763 
764 	case ARM_DRTM_SVC_FEATURES:
765 		if (((x1 >> ARM_DRTM_FUNC_SHIFT) & ARM_DRTM_FUNC_MASK) ==
766 		    ARM_DRTM_FUNC_ID) {
767 			/* Dispatch function-based queries. */
768 			switch (x1 & FUNCID_MASK) {
769 			case ARM_DRTM_SVC_VERSION:
770 				SMC_RET1(handle, SUCCESS);
771 				break;	/* not reached */
772 
773 			case ARM_DRTM_SVC_FEATURES:
774 				SMC_RET1(handle, SUCCESS);
775 				break;	/* not reached */
776 
777 			case ARM_DRTM_SVC_UNPROTECT_MEM:
778 				SMC_RET1(handle, SUCCESS);
779 				break;	/* not reached */
780 
781 			case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
782 				SMC_RET1(handle, SUCCESS);
783 				break;	/* not reached */
784 
785 			case ARM_DRTM_SVC_CLOSE_LOCALITY:
786 				WARN("ARM_DRTM_SVC_CLOSE_LOCALITY feature %s",
787 				     "is not supported\n");
788 				SMC_RET1(handle, NOT_SUPPORTED);
789 				break;	/* not reached */
790 
791 			case ARM_DRTM_SVC_GET_ERROR:
792 				SMC_RET1(handle, SUCCESS);
793 				break;	/* not reached */
794 
795 			case ARM_DRTM_SVC_SET_ERROR:
796 				SMC_RET1(handle, SUCCESS);
797 				break;	/* not reached */
798 
799 			case ARM_DRTM_SVC_SET_TCB_HASH:
800 				WARN("ARM_DRTM_SVC_TCB_HASH feature %s",
801 				     "is not supported\n");
802 				SMC_RET1(handle, NOT_SUPPORTED);
803 				break;	/* not reached */
804 
805 			case ARM_DRTM_SVC_LOCK_TCB_HASH:
806 				WARN("ARM_DRTM_SVC_LOCK_TCB_HASH feature %s",
807 				     "is not supported\n");
808 				SMC_RET1(handle, NOT_SUPPORTED);
809 				break;	/* not reached */
810 
811 			default:
812 				ERROR("Unknown DRTM service function\n");
813 				SMC_RET1(handle, NOT_SUPPORTED);
814 				break;	/* not reached */
815 			}
816 		} else {
817 			/* Dispatch feature-based queries. */
818 			switch (x1 & ARM_DRTM_FEAT_ID_MASK) {
819 			case ARM_DRTM_FEATURES_TPM:
820 				INFO("++ DRTM service handler: TPM features\n");
821 				return drtm_features_tpm(handle);
822 				break;	/* not reached */
823 
824 			case ARM_DRTM_FEATURES_MEM_REQ:
825 				INFO("++ DRTM service handler: Min. mem."
826 				     " requirement features\n");
827 				return drtm_features_mem_req(handle);
828 				break;	/* not reached */
829 
830 			case ARM_DRTM_FEATURES_DMA_PROT:
831 				INFO("++ DRTM service handler: "
832 				     "DMA protection features\n");
833 				return drtm_features_dma_prot(handle);
834 				break;	/* not reached */
835 
836 			case ARM_DRTM_FEATURES_BOOT_PE_ID:
837 				INFO("++ DRTM service handler: "
838 				     "Boot PE ID features\n");
839 				return drtm_features_boot_pe_id(handle);
840 				break;	/* not reached */
841 
842 			case ARM_DRTM_FEATURES_TCB_HASHES:
843 				INFO("++ DRTM service handler: "
844 				     "TCB-hashes features\n");
845 				return drtm_features_tcb_hashes(handle);
846 				break;	/* not reached */
847 
848 			case ARM_DRTM_FEATURES_DLME_IMG_AUTH:
849 				INFO("++ DRTM service handler: "
850 				     "DLME Image authentication features\n");
851 				return drtm_features_dlme_img_auth_features(handle);
852 				break;	/* not reached */
853 
854 			default:
855 				ERROR("Unknown ARM DRTM service feature\n");
856 				SMC_RET1(handle, NOT_SUPPORTED);
857 				break;	/* not reached */
858 			}
859 		}
860 
861 	case ARM_DRTM_SVC_UNPROTECT_MEM:
862 		INFO("DRTM service handler: unprotect mem\n");
863 		return drtm_unprotect_mem(handle);
864 		break;	/* not reached */
865 
866 	case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
867 		INFO("DRTM service handler: dynamic launch\n");
868 		return drtm_dynamic_launch(x1, handle);
869 		break;	/* not reached */
870 
871 	case ARM_DRTM_SVC_CLOSE_LOCALITY:
872 		WARN("DRTM service handler: close locality %s\n",
873 		     "is not supported");
874 		SMC_RET1(handle, NOT_SUPPORTED);
875 		break;	/* not reached */
876 
877 	case ARM_DRTM_SVC_GET_ERROR:
878 		INFO("DRTM service handler: get error\n");
879 		return drtm_get_error(handle);
880 		break;	/* not reached */
881 
882 	case ARM_DRTM_SVC_SET_ERROR:
883 		INFO("DRTM service handler: set error\n");
884 		return drtm_set_error(x1, handle);
885 		break;	/* not reached */
886 
887 	case ARM_DRTM_SVC_SET_TCB_HASH:
888 		WARN("DRTM service handler: set TCB hash %s\n",
889 		     "is not supported");
890 		SMC_RET1(handle, NOT_SUPPORTED);
891 		break;  /* not reached */
892 
893 	case ARM_DRTM_SVC_LOCK_TCB_HASH:
894 		WARN("DRTM service handler: lock TCB hash %s\n",
895 		     "is not supported");
896 		SMC_RET1(handle, NOT_SUPPORTED);
897 		break;  /* not reached */
898 
899 	default:
900 		ERROR("Unknown DRTM service function: 0x%x\n", smc_fid);
901 		SMC_RET1(handle, SMC_UNK);
902 		break;	/* not reached */
903 	}
904 
905 	/* not reached */
906 	SMC_RET1(handle, SMC_UNK);
907 }
908