xref: /rk3399_ARM-atf/services/std_svc/spmd/spmd_main.c (revision 0aa9f3c0f2f2ff675c3c12ae5ac6ceb475d6a16f)
1 /*
2  * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <string.h>
10 
11 #include <arch_helpers.h>
12 #include <arch/aarch64/arch_features.h>
13 #include <bl31/bl31.h>
14 #include <common/debug.h>
15 #include <common/runtime_svc.h>
16 #include <lib/el3_runtime/context_mgmt.h>
17 #include <lib/smccc.h>
18 #include <lib/spinlock.h>
19 #include <lib/utils.h>
20 #include <plat/common/common_def.h>
21 #include <plat/common/platform.h>
22 #include <platform_def.h>
23 #include <services/ffa_svc.h>
24 #include <services/spmd_svc.h>
25 #include <smccc_helpers.h>
26 #include "spmd_private.h"
27 
28 /*******************************************************************************
29  * SPM Core context information.
30  ******************************************************************************/
31 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
32 
33 /*******************************************************************************
34  * SPM Core attribute information read from its manifest.
35  ******************************************************************************/
36 static spmc_manifest_attribute_t spmc_attrs;
37 
38 /*******************************************************************************
39  * SPM Core entry point information. Discovered on the primary core and reused
40  * on secondary cores.
41  ******************************************************************************/
42 static entry_point_info_t *spmc_ep_info;
43 
44 /*******************************************************************************
45  * SPM Core context on current CPU get helper.
46  ******************************************************************************/
47 spmd_spm_core_context_t *spmd_get_context(void)
48 {
49 	unsigned int linear_id = plat_my_core_pos();
50 
51 	return &spm_core_context[linear_id];
52 }
53 
54 /*******************************************************************************
55  * Static function declaration.
56  ******************************************************************************/
57 static int32_t spmd_init(void);
58 static int spmd_spmc_init(void *pm_addr);
59 static uint64_t spmd_ffa_error_return(void *handle,
60 				       int error_code);
61 static uint64_t spmd_smc_forward(uint32_t smc_fid,
62 				 bool secure_origin,
63 				 uint64_t x1,
64 				 uint64_t x2,
65 				 uint64_t x3,
66 				 uint64_t x4,
67 				 void *handle);
68 
69 /*******************************************************************************
70  * This function takes an SPMC context pointer and performs a synchronous
71  * SPMC entry.
72  ******************************************************************************/
73 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
74 {
75 	uint64_t rc;
76 
77 	assert(spmc_ctx != NULL);
78 
79 	cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
80 
81 	/* Restore the context assigned above */
82 	cm_el1_sysregs_context_restore(SECURE);
83 #if SPMD_SPM_AT_SEL2
84 	cm_el2_sysregs_context_restore(SECURE);
85 #endif
86 	cm_set_next_eret_context(SECURE);
87 
88 	/* Enter SPMC */
89 	rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
90 
91 	/* Save secure state */
92 	cm_el1_sysregs_context_save(SECURE);
93 #if SPMD_SPM_AT_SEL2
94 	cm_el2_sysregs_context_save(SECURE);
95 #endif
96 
97 	return rc;
98 }
99 
100 /*******************************************************************************
101  * This function returns to the place where spmd_spm_core_sync_entry() was
102  * called originally.
103  ******************************************************************************/
104 __dead2 void spmd_spm_core_sync_exit(uint64_t rc)
105 {
106 	spmd_spm_core_context_t *ctx = spmd_get_context();
107 
108 	/* Get current CPU context from SPMC context */
109 	assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
110 
111 	/*
112 	 * The SPMD must have initiated the original request through a
113 	 * synchronous entry into SPMC. Jump back to the original C runtime
114 	 * context with the value of rc in x0;
115 	 */
116 	spmd_spm_core_exit(ctx->c_rt_ctx, rc);
117 
118 	panic();
119 }
120 
121 /*******************************************************************************
122  * Jump to the SPM Core for the first time.
123  ******************************************************************************/
124 static int32_t spmd_init(void)
125 {
126 	spmd_spm_core_context_t *ctx = spmd_get_context();
127 	uint64_t rc;
128 
129 	VERBOSE("SPM Core init start.\n");
130 	ctx->state = SPMC_STATE_RESET;
131 
132 	rc = spmd_spm_core_sync_entry(ctx);
133 	if (rc != 0ULL) {
134 		ERROR("SPMC initialisation failed 0x%llx\n", rc);
135 		return 0;
136 	}
137 
138 	ctx->state = SPMC_STATE_IDLE;
139 	VERBOSE("SPM Core init end.\n");
140 
141 	return 1;
142 }
143 
144 /*******************************************************************************
145  * Loads SPMC manifest and inits SPMC.
146  ******************************************************************************/
147 static int spmd_spmc_init(void *pm_addr)
148 {
149 	spmd_spm_core_context_t *spm_ctx = spmd_get_context();
150 	uint32_t ep_attr;
151 	int rc;
152 
153 	/* Load the SPM Core manifest */
154 	rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
155 	if (rc != 0) {
156 		WARN("No or invalid SPM Core manifest image provided by BL2\n");
157 		return rc;
158 	}
159 
160 	/*
161 	 * Ensure that the SPM Core version is compatible with the SPM
162 	 * Dispatcher version.
163 	 */
164 	if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
165 	    (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
166 		WARN("Unsupported FFA version (%u.%u)\n",
167 		     spmc_attrs.major_version, spmc_attrs.minor_version);
168 		return -EINVAL;
169 	}
170 
171 	VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
172 	     spmc_attrs.minor_version);
173 
174 	VERBOSE("SPM Core run time EL%x.\n",
175 	     SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
176 
177 	/* Validate the SPMC ID, Ensure high bit is set */
178 	if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
179 			SPMC_SECURE_ID_MASK) == 0U) {
180 		WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
181 		return -EINVAL;
182 	}
183 
184 	/* Validate the SPM Core execution state */
185 	if ((spmc_attrs.exec_state != MODE_RW_64) &&
186 	    (spmc_attrs.exec_state != MODE_RW_32)) {
187 		WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
188 		     spmc_attrs.exec_state);
189 		return -EINVAL;
190 	}
191 
192 	VERBOSE("%s%x.\n", "SPM Core execution state 0x",
193 		spmc_attrs.exec_state);
194 
195 #if SPMD_SPM_AT_SEL2
196 	/* Ensure manifest has not requested AArch32 state in S-EL2 */
197 	if (spmc_attrs.exec_state == MODE_RW_32) {
198 		WARN("AArch32 state at S-EL2 is not supported.\n");
199 		return -EINVAL;
200 	}
201 
202 	/*
203 	 * Check if S-EL2 is supported on this system if S-EL2
204 	 * is required for SPM
205 	 */
206 	if (!is_armv8_4_sel2_present()) {
207 		WARN("SPM Core run time S-EL2 is not supported.\n");
208 		return -EINVAL;
209 	}
210 #endif /* SPMD_SPM_AT_SEL2 */
211 
212 	/* Initialise an entrypoint to set up the CPU context */
213 	ep_attr = SECURE | EP_ST_ENABLE;
214 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
215 		ep_attr |= EP_EE_BIG;
216 	}
217 
218 	SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
219 	assert(spmc_ep_info->pc == BL32_BASE);
220 
221 	/*
222 	 * Populate SPSR for SPM Core based upon validated parameters from the
223 	 * manifest.
224 	 */
225 	if (spmc_attrs.exec_state == MODE_RW_32) {
226 		spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
227 						 SPSR_E_LITTLE,
228 						 DAIF_FIQ_BIT |
229 						 DAIF_IRQ_BIT |
230 						 DAIF_ABT_BIT);
231 	} else {
232 
233 #if SPMD_SPM_AT_SEL2
234 		static const uint32_t runtime_el = MODE_EL2;
235 #else
236 		static const uint32_t runtime_el = MODE_EL1;
237 #endif
238 		spmc_ep_info->spsr = SPSR_64(runtime_el,
239 					     MODE_SP_ELX,
240 					     DISABLE_ALL_EXCEPTIONS);
241 	}
242 
243 	/* Initialise SPM Core context with this entry point information */
244 	cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info);
245 
246 	/* Reuse PSCI affinity states to mark this SPMC context as off */
247 	spm_ctx->state = AFF_STATE_OFF;
248 
249 	INFO("SPM Core setup done.\n");
250 
251 	/* Register init function for deferred init. */
252 	bl31_register_bl32_init(&spmd_init);
253 
254 	return 0;
255 }
256 
257 /*******************************************************************************
258  * Initialize context of SPM Core.
259  ******************************************************************************/
260 int spmd_setup(void)
261 {
262 	void *spmc_manifest;
263 	int rc;
264 
265 	spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
266 	if (spmc_ep_info == NULL) {
267 		WARN("No SPM Core image provided by BL2 boot loader.\n");
268 		return -EINVAL;
269 	}
270 
271 	/* Under no circumstances will this parameter be 0 */
272 	assert(spmc_ep_info->pc != 0ULL);
273 
274 	/*
275 	 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
276 	 * be used as a manifest for the SPM Core at the next lower EL/mode.
277 	 */
278 	spmc_manifest = (void *)spmc_ep_info->args.arg0;
279 	if (spmc_manifest == NULL) {
280 		ERROR("Invalid or absent SPM Core manifest.\n");
281 		return -EINVAL;
282 	}
283 
284 	/* Load manifest, init SPMC */
285 	rc = spmd_spmc_init(spmc_manifest);
286 	if (rc != 0) {
287 		WARN("Booting device without SPM initialization.\n");
288 	}
289 
290 	return rc;
291 }
292 
293 /*******************************************************************************
294  * Forward SMC to the other security state
295  ******************************************************************************/
296 static uint64_t spmd_smc_forward(uint32_t smc_fid,
297 				 bool secure_origin,
298 				 uint64_t x1,
299 				 uint64_t x2,
300 				 uint64_t x3,
301 				 uint64_t x4,
302 				 void *handle)
303 {
304 	uint32_t secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
305 	uint32_t secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
306 
307 	/* Save incoming security state */
308 	cm_el1_sysregs_context_save(secure_state_in);
309 #if SPMD_SPM_AT_SEL2
310 	cm_el2_sysregs_context_save(secure_state_in);
311 #endif
312 
313 	/* Restore outgoing security state */
314 	cm_el1_sysregs_context_restore(secure_state_out);
315 #if SPMD_SPM_AT_SEL2
316 	cm_el2_sysregs_context_restore(secure_state_out);
317 #endif
318 	cm_set_next_eret_context(secure_state_out);
319 
320 	SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
321 			SMC_GET_GP(handle, CTX_GPREG_X5),
322 			SMC_GET_GP(handle, CTX_GPREG_X6),
323 			SMC_GET_GP(handle, CTX_GPREG_X7));
324 }
325 
326 /*******************************************************************************
327  * Return FFA_ERROR with specified error code
328  ******************************************************************************/
329 static uint64_t spmd_ffa_error_return(void *handle, int error_code)
330 {
331 	SMC_RET8(handle, FFA_ERROR,
332 		 FFA_TARGET_INFO_MBZ, error_code,
333 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
334 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
335 }
336 
337 /*******************************************************************************
338  * This function handles all SMCs in the range reserved for FFA. Each call is
339  * either forwarded to the other security state or handled by the SPM dispatcher
340  ******************************************************************************/
341 uint64_t spmd_smc_handler(uint32_t smc_fid,
342 			  uint64_t x1,
343 			  uint64_t x2,
344 			  uint64_t x3,
345 			  uint64_t x4,
346 			  void *cookie,
347 			  void *handle,
348 			  uint64_t flags)
349 {
350 	spmd_spm_core_context_t *ctx = spmd_get_context();
351 	bool secure_origin;
352 	int32_t ret;
353 	uint32_t input_version;
354 
355 	/* Determine which security state this SMC originated from */
356 	secure_origin = is_caller_secure(flags);
357 
358 	INFO("SPM: 0x%x 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
359 	     smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5),
360 	     SMC_GET_GP(handle, CTX_GPREG_X6),
361 	     SMC_GET_GP(handle, CTX_GPREG_X7));
362 
363 	switch (smc_fid) {
364 	case FFA_ERROR:
365 		/*
366 		 * Check if this is the first invocation of this interface on
367 		 * this CPU. If so, then indicate that the SPM Core initialised
368 		 * unsuccessfully.
369 		 */
370 		if (secure_origin && (ctx->state == SPMC_STATE_RESET)) {
371 			spmd_spm_core_sync_exit(x2);
372 		}
373 
374 		return spmd_smc_forward(smc_fid, secure_origin,
375 					x1, x2, x3, x4, handle);
376 		break; /* not reached */
377 
378 	case FFA_VERSION:
379 		input_version = (uint32_t)(0xFFFFFFFF & x1);
380 		/*
381 		 * If caller is secure and SPMC was initialized,
382 		 * return FFA_VERSION of SPMD.
383 		 * If caller is non secure and SPMC was initialized,
384 		 * return SPMC's version.
385 		 * Sanity check to "input_version".
386 		 */
387 		if ((input_version & FFA_VERSION_BIT31_MASK) ||
388 			(ctx->state == SPMC_STATE_RESET)) {
389 			ret = FFA_ERROR_NOT_SUPPORTED;
390 		} else if (!secure_origin) {
391 			ret = MAKE_FFA_VERSION(spmc_attrs.major_version, spmc_attrs.minor_version);
392 		} else {
393 			ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
394 		}
395 
396 		SMC_RET8(handle, ret, FFA_TARGET_INFO_MBZ, FFA_TARGET_INFO_MBZ,
397 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
398 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
399 		break; /* not reached */
400 
401 	case FFA_FEATURES:
402 		/*
403 		 * This is an optional interface. Do the minimal checks and
404 		 * forward to SPM Core which will handle it if implemented.
405 		 */
406 
407 		/*
408 		 * Check if x1 holds a valid FFA fid. This is an
409 		 * optimization.
410 		 */
411 		if (!is_ffa_fid(x1)) {
412 			return spmd_ffa_error_return(handle,
413 						      FFA_ERROR_NOT_SUPPORTED);
414 		}
415 
416 		/* Forward SMC from Normal world to the SPM Core */
417 		if (!secure_origin) {
418 			return spmd_smc_forward(smc_fid, secure_origin,
419 						x1, x2, x3, x4, handle);
420 		}
421 
422 		/*
423 		 * Return success if call was from secure world i.e. all
424 		 * FFA functions are supported. This is essentially a
425 		 * nop.
426 		 */
427 		SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
428 			 SMC_GET_GP(handle, CTX_GPREG_X5),
429 			 SMC_GET_GP(handle, CTX_GPREG_X6),
430 			 SMC_GET_GP(handle, CTX_GPREG_X7));
431 
432 		break; /* not reached */
433 
434 	case FFA_ID_GET:
435 		/*
436 		 * Returns the ID of the calling FFA component.
437 		 */
438 		if (!secure_origin) {
439 			SMC_RET8(handle, FFA_SUCCESS_SMC32,
440 				 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
441 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
442 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
443 				 FFA_PARAM_MBZ);
444 		}
445 
446 		SMC_RET8(handle, FFA_SUCCESS_SMC32,
447 			 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
448 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
449 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
450 			 FFA_PARAM_MBZ);
451 
452 		break; /* not reached */
453 
454 	case FFA_RX_RELEASE:
455 	case FFA_RXTX_MAP_SMC32:
456 	case FFA_RXTX_MAP_SMC64:
457 	case FFA_RXTX_UNMAP:
458 	case FFA_MSG_RUN:
459 		/* This interface must be invoked only by the Normal world */
460 		if (secure_origin) {
461 			return spmd_ffa_error_return(handle,
462 						      FFA_ERROR_NOT_SUPPORTED);
463 		}
464 
465 		/* Fall through to forward the call to the other world */
466 
467 	case FFA_PARTITION_INFO_GET:
468 	case FFA_MSG_SEND:
469 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
470 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
471 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
472 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
473 	case FFA_MEM_DONATE_SMC32:
474 	case FFA_MEM_DONATE_SMC64:
475 	case FFA_MEM_LEND_SMC32:
476 	case FFA_MEM_LEND_SMC64:
477 	case FFA_MEM_SHARE_SMC32:
478 	case FFA_MEM_SHARE_SMC64:
479 	case FFA_MEM_RETRIEVE_REQ_SMC32:
480 	case FFA_MEM_RETRIEVE_REQ_SMC64:
481 	case FFA_MEM_RETRIEVE_RESP:
482 	case FFA_MEM_RELINQUISH:
483 	case FFA_MEM_RECLAIM:
484 	case FFA_SUCCESS_SMC32:
485 	case FFA_SUCCESS_SMC64:
486 		/*
487 		 * TODO: Assume that no requests originate from EL3 at the
488 		 * moment. This will change if a SP service is required in
489 		 * response to secure interrupts targeted to EL3. Until then
490 		 * simply forward the call to the Normal world.
491 		 */
492 
493 		return spmd_smc_forward(smc_fid, secure_origin,
494 					x1, x2, x3, x4, handle);
495 		break; /* not reached */
496 
497 	case FFA_MSG_WAIT:
498 		/*
499 		 * Check if this is the first invocation of this interface on
500 		 * this CPU from the Secure world. If so, then indicate that the
501 		 * SPM Core initialised successfully.
502 		 */
503 		if (secure_origin && (ctx->state == SPMC_STATE_RESET)) {
504 			spmd_spm_core_sync_exit(0);
505 		}
506 
507 		/* Fall through to forward the call to the other world */
508 
509 	case FFA_MSG_YIELD:
510 		/* This interface must be invoked only by the Secure world */
511 		if (!secure_origin) {
512 			return spmd_ffa_error_return(handle,
513 						      FFA_ERROR_NOT_SUPPORTED);
514 		}
515 
516 		return spmd_smc_forward(smc_fid, secure_origin,
517 					x1, x2, x3, x4, handle);
518 		break; /* not reached */
519 
520 	default:
521 		WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
522 		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
523 	}
524 }
525