xref: /rk3399_ARM-atf/services/spd/tspd/tspd_main.c (revision 916a2c1ec16eed4199f27a24b8e2985275cda423)
1375f538aSAchin Gupta /*
2375f538aSAchin Gupta  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3375f538aSAchin Gupta  *
4375f538aSAchin Gupta  * Redistribution and use in source and binary forms, with or without
5375f538aSAchin Gupta  * modification, are permitted provided that the following conditions are met:
6375f538aSAchin Gupta  *
7375f538aSAchin Gupta  * Redistributions of source code must retain the above copyright notice, this
8375f538aSAchin Gupta  * list of conditions and the following disclaimer.
9375f538aSAchin Gupta  *
10375f538aSAchin Gupta  * Redistributions in binary form must reproduce the above copyright notice,
11375f538aSAchin Gupta  * this list of conditions and the following disclaimer in the documentation
12375f538aSAchin Gupta  * and/or other materials provided with the distribution.
13375f538aSAchin Gupta  *
14375f538aSAchin Gupta  * Neither the name of ARM nor the names of its contributors may be used
15375f538aSAchin Gupta  * to endorse or promote products derived from this software without specific
16375f538aSAchin Gupta  * prior written permission.
17375f538aSAchin Gupta  *
18375f538aSAchin Gupta  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19375f538aSAchin Gupta  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20375f538aSAchin Gupta  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21375f538aSAchin Gupta  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22375f538aSAchin Gupta  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23375f538aSAchin Gupta  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24375f538aSAchin Gupta  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25375f538aSAchin Gupta  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26375f538aSAchin Gupta  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27375f538aSAchin Gupta  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28375f538aSAchin Gupta  * POSSIBILITY OF SUCH DAMAGE.
29375f538aSAchin Gupta  */
30375f538aSAchin Gupta 
31375f538aSAchin Gupta 
32375f538aSAchin Gupta /*******************************************************************************
33375f538aSAchin Gupta  * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
34375f538aSAchin Gupta  * plug-in component to the Secure Monitor, registered as a runtime service. The
35375f538aSAchin Gupta  * SPD is expected to be a functional extension of the Secure Payload (SP) that
36375f538aSAchin Gupta  * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
37375f538aSAchin Gupta  * the Trusted OS/Applications range to the dispatcher. The SPD will either
38375f538aSAchin Gupta  * handle the request locally or delegate it to the Secure Payload. It is also
39375f538aSAchin Gupta  * responsible for initialising and maintaining communication with the SP.
40375f538aSAchin Gupta  ******************************************************************************/
41375f538aSAchin Gupta #include <stdio.h>
42375f538aSAchin Gupta #include <string.h>
43375f538aSAchin Gupta #include <assert.h>
44375f538aSAchin Gupta #include <arch_helpers.h>
45375f538aSAchin Gupta #include <console.h>
46375f538aSAchin Gupta #include <platform.h>
47375f538aSAchin Gupta #include <psci_private.h>
48375f538aSAchin Gupta #include <context_mgmt.h>
49375f538aSAchin Gupta #include <runtime_svc.h>
50375f538aSAchin Gupta #include <bl31.h>
51375f538aSAchin Gupta #include <tsp.h>
52375f538aSAchin Gupta #include <psci.h>
53375f538aSAchin Gupta #include <tspd_private.h>
54375f538aSAchin Gupta #include <debug.h>
55375f538aSAchin Gupta 
56375f538aSAchin Gupta /*******************************************************************************
57375f538aSAchin Gupta  * Single structure to hold information about the various entry points into the
58375f538aSAchin Gupta  * Secure Payload. It is initialised once on the primary core after a cold boot.
59375f538aSAchin Gupta  ******************************************************************************/
60375f538aSAchin Gupta entry_info *tsp_entry_info;
61375f538aSAchin Gupta 
62375f538aSAchin Gupta /*******************************************************************************
63375f538aSAchin Gupta  * Array to keep track of per-cpu Secure Payload state
64375f538aSAchin Gupta  ******************************************************************************/
65375f538aSAchin Gupta tsp_context tspd_sp_context[TSPD_CORE_COUNT];
66375f538aSAchin Gupta 
67375f538aSAchin Gupta /*******************************************************************************
68375f538aSAchin Gupta  * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type
69375f538aSAchin Gupta  * (aarch32/aarch64) if not already known and initialises the context for entry
70375f538aSAchin Gupta  * into the SP for its initialisation.
71375f538aSAchin Gupta  ******************************************************************************/
72375f538aSAchin Gupta int32_t tspd_setup(void)
73375f538aSAchin Gupta {
74375f538aSAchin Gupta 	el_change_info *image_info;
75375f538aSAchin Gupta 	int32_t rc;
76375f538aSAchin Gupta 	uint64_t mpidr = read_mpidr();
77375f538aSAchin Gupta 	uint32_t linear_id;
78375f538aSAchin Gupta 
79375f538aSAchin Gupta 	linear_id = platform_get_core_pos(mpidr);
80375f538aSAchin Gupta 
81375f538aSAchin Gupta 	/*
82375f538aSAchin Gupta 	 * Get information about the Secure Payload (BL32) image. Its
83375f538aSAchin Gupta 	 * absence is a critical failure.  TODO: Add support to
84375f538aSAchin Gupta 	 * conditionally include the SPD service
85375f538aSAchin Gupta 	 */
86375f538aSAchin Gupta 	image_info = bl31_get_next_image_info(SECURE);
87375f538aSAchin Gupta 	assert(image_info);
88375f538aSAchin Gupta 
89375f538aSAchin Gupta 	/*
90375f538aSAchin Gupta 	 * We could inspect the SP image and determine it's execution
91375f538aSAchin Gupta 	 * state i.e whether AArch32 or AArch64. Assuming it's AArch64
92375f538aSAchin Gupta 	 * for the time being.
93375f538aSAchin Gupta 	 */
94375f538aSAchin Gupta 	rc = tspd_init_secure_context(image_info->entrypoint,
95375f538aSAchin Gupta 				     TSP_AARCH64,
96375f538aSAchin Gupta 				     mpidr,
97375f538aSAchin Gupta 				     &tspd_sp_context[linear_id]);
98375f538aSAchin Gupta 	assert(rc == 0);
99375f538aSAchin Gupta 
100375f538aSAchin Gupta 	return rc;
101375f538aSAchin Gupta }
102375f538aSAchin Gupta 
103375f538aSAchin Gupta /*******************************************************************************
104375f538aSAchin Gupta  * This function passes control to the Secure Payload image (BL32) for the first
105375f538aSAchin Gupta  * time on the primary cpu after a cold boot. It assumes that a valid secure
106375f538aSAchin Gupta  * context has already been created by tspd_setup() which can be directly used.
107375f538aSAchin Gupta  * It also assumes that a valid non-secure context has been initialised by PSCI
108375f538aSAchin Gupta  * so it does not need to save and restore any non-secure state. This function
109375f538aSAchin Gupta  * performs a synchronous entry into the Secure payload. The SP passes control
110375f538aSAchin Gupta  * back to this routine through a SMC. It also passes the extents of memory made
111375f538aSAchin Gupta  * available to BL32 by BL31.
112375f538aSAchin Gupta  ******************************************************************************/
113375f538aSAchin Gupta int32_t bl32_init(meminfo *bl32_meminfo)
114375f538aSAchin Gupta {
115375f538aSAchin Gupta 	uint64_t mpidr = read_mpidr();
116375f538aSAchin Gupta 	uint32_t linear_id = platform_get_core_pos(mpidr);
117375f538aSAchin Gupta 	uint64_t rc;
118375f538aSAchin Gupta 	tsp_context *tsp_ctx = &tspd_sp_context[linear_id];
119375f538aSAchin Gupta 
120375f538aSAchin Gupta 	/*
121375f538aSAchin Gupta 	 * Arrange for passing a pointer to the meminfo structure
122375f538aSAchin Gupta 	 * describing the memory extents available to the secure
123375f538aSAchin Gupta 	 * payload.
124375f538aSAchin Gupta 	 * TODO: We are passing a pointer to BL31 internal memory
125375f538aSAchin Gupta 	 * whereas this structure should be copied to a communication
126375f538aSAchin Gupta 	 * buffer between the SP and SPD.
127375f538aSAchin Gupta 	 */
128375f538aSAchin Gupta 	write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx),
129375f538aSAchin Gupta 		      CTX_GPREG_X0,
130375f538aSAchin Gupta 		      (uint64_t) bl32_meminfo);
131375f538aSAchin Gupta 
132607084eeSAchin Gupta 	/*
133607084eeSAchin Gupta 	 * Arrange for an entry into the test secure payload. We expect an array
134607084eeSAchin Gupta 	 * of vectors in return
135607084eeSAchin Gupta 	 */
136375f538aSAchin Gupta 	rc = tspd_synchronous_sp_entry(tsp_ctx);
137375f538aSAchin Gupta 	assert(rc != 0);
138375f538aSAchin Gupta 	if (rc)
139375f538aSAchin Gupta 		tsp_ctx->state = TSP_STATE_ON;
140375f538aSAchin Gupta 
141375f538aSAchin Gupta 	return rc;
142375f538aSAchin Gupta }
143375f538aSAchin Gupta 
144375f538aSAchin Gupta /*******************************************************************************
145375f538aSAchin Gupta  * This function is responsible for handling all SMCs in the Trusted OS/App
146375f538aSAchin Gupta  * range from the non-secure state as defined in the SMC Calling Convention
147375f538aSAchin Gupta  * Document. It is also responsible for communicating with the Secure payload
148375f538aSAchin Gupta  * to delegate work and return results back to the non-secure state. Lastly it
149375f538aSAchin Gupta  * will also return any information that the secure payload needs to do the
150375f538aSAchin Gupta  * work assigned to it.
151375f538aSAchin Gupta  ******************************************************************************/
152375f538aSAchin Gupta uint64_t tspd_smc_handler(uint32_t smc_fid,
153375f538aSAchin Gupta 			 uint64_t x1,
154375f538aSAchin Gupta 			 uint64_t x2,
155375f538aSAchin Gupta 			 uint64_t x3,
156375f538aSAchin Gupta 			 uint64_t x4,
157375f538aSAchin Gupta 			 void *cookie,
158375f538aSAchin Gupta 			 void *handle,
159375f538aSAchin Gupta 			 uint64_t flags)
160375f538aSAchin Gupta {
161*916a2c1eSAchin Gupta 	cpu_context *ns_cpu_context;
162*916a2c1eSAchin Gupta 	gp_regs *ns_gp_regs;
163375f538aSAchin Gupta 	unsigned long mpidr = read_mpidr();
164375f538aSAchin Gupta 	uint32_t linear_id = platform_get_core_pos(mpidr), ns;
165*916a2c1eSAchin Gupta 	tsp_context *tsp_ctx = &tspd_sp_context[linear_id];
166375f538aSAchin Gupta 
167375f538aSAchin Gupta 	/* Determine which security state this SMC originated from */
168375f538aSAchin Gupta 	ns = is_caller_non_secure(flags);
169375f538aSAchin Gupta 
170375f538aSAchin Gupta 	switch (smc_fid) {
171375f538aSAchin Gupta 
172375f538aSAchin Gupta 	/*
173375f538aSAchin Gupta 	 * This function ID is used only by the SP to indicate it has
174375f538aSAchin Gupta 	 * finished initialising itself after a cold boot
175375f538aSAchin Gupta 	 */
176375f538aSAchin Gupta 	case TSP_ENTRY_DONE:
177375f538aSAchin Gupta 		if (ns)
178375f538aSAchin Gupta 			SMC_RET1(handle, SMC_UNK);
179375f538aSAchin Gupta 
180375f538aSAchin Gupta 		/*
181375f538aSAchin Gupta 		 * Stash the SP entry points information. This is done
182375f538aSAchin Gupta 		 * only once on the primary cpu
183375f538aSAchin Gupta 		 */
184375f538aSAchin Gupta 		assert(tsp_entry_info == NULL);
185375f538aSAchin Gupta 		tsp_entry_info = (entry_info *) x1;
186375f538aSAchin Gupta 
187375f538aSAchin Gupta 		/*
188375f538aSAchin Gupta 		 * SP reports completion. The SPD must have initiated
189375f538aSAchin Gupta 		 * the original request through a synchronous entry
190375f538aSAchin Gupta 		 * into the SP. Jump back to the original C runtime
191375f538aSAchin Gupta 		 * context.
192375f538aSAchin Gupta 		 */
193*916a2c1eSAchin Gupta 		tspd_synchronous_sp_exit(tsp_ctx, x1);
194375f538aSAchin Gupta 
195375f538aSAchin Gupta 		/* Should never reach here */
196375f538aSAchin Gupta 		assert(0);
197375f538aSAchin Gupta 
198607084eeSAchin Gupta 	/*
199607084eeSAchin Gupta 	 * These function IDs is used only by the SP to indicate it has
200607084eeSAchin Gupta 	 * finished:
201607084eeSAchin Gupta 	 * 1. turning itself on in response to an earlier psci
202607084eeSAchin Gupta 	 *    cpu_on request
203607084eeSAchin Gupta 	 * 2. resuming itself after an earlier psci cpu_suspend
204607084eeSAchin Gupta 	 *    request.
205607084eeSAchin Gupta 	 */
206607084eeSAchin Gupta 	case TSP_ON_DONE:
207607084eeSAchin Gupta 	case TSP_RESUME_DONE:
208607084eeSAchin Gupta 
209607084eeSAchin Gupta 	/*
210607084eeSAchin Gupta 	 * These function IDs is used only by the SP to indicate it has
211607084eeSAchin Gupta 	 * finished:
212607084eeSAchin Gupta 	 * 1. suspending itself after an earlier psci cpu_suspend
213607084eeSAchin Gupta 	 *    request.
214607084eeSAchin Gupta 	 * 2. turning itself off in response to an earlier psci
215607084eeSAchin Gupta 	 *    cpu_off request.
216607084eeSAchin Gupta 	 */
217607084eeSAchin Gupta 	case TSP_OFF_DONE:
218607084eeSAchin Gupta 	case TSP_SUSPEND_DONE:
219607084eeSAchin Gupta 		if (ns)
220607084eeSAchin Gupta 			SMC_RET1(handle, SMC_UNK);
221607084eeSAchin Gupta 
222607084eeSAchin Gupta 		/*
223607084eeSAchin Gupta 		 * SP reports completion. The SPD must have initiated the
224607084eeSAchin Gupta 		 * original request through a synchronous entry into the SP.
225607084eeSAchin Gupta 		 * Jump back to the original C runtime context, and pass x1 as
226607084eeSAchin Gupta 		 * return value to the caller
227607084eeSAchin Gupta 		 */
228*916a2c1eSAchin Gupta 		tspd_synchronous_sp_exit(tsp_ctx, x1);
229607084eeSAchin Gupta 
230607084eeSAchin Gupta 		/* Should never reach here */
231607084eeSAchin Gupta 		assert(0);
232607084eeSAchin Gupta 
233*916a2c1eSAchin Gupta 		/*
234*916a2c1eSAchin Gupta 		 * Request from non-secure client to perform an
235*916a2c1eSAchin Gupta 		 * arithmetic operation or response from secure
236*916a2c1eSAchin Gupta 		 * payload to an earlier request.
237*916a2c1eSAchin Gupta 		 */
238*916a2c1eSAchin Gupta 	case TSP_FID_ADD:
239*916a2c1eSAchin Gupta 	case TSP_FID_SUB:
240*916a2c1eSAchin Gupta 	case TSP_FID_MUL:
241*916a2c1eSAchin Gupta 	case TSP_FID_DIV:
242*916a2c1eSAchin Gupta 		if (ns) {
243*916a2c1eSAchin Gupta 			/*
244*916a2c1eSAchin Gupta 			 * This is a fresh request from the non-secure client.
245*916a2c1eSAchin Gupta 			 * The parameters are in x1 and x2. Figure out which
246*916a2c1eSAchin Gupta 			 * registers need to be preserved, save the non-secure
247*916a2c1eSAchin Gupta 			 * state and send the request to the secure payload.
248*916a2c1eSAchin Gupta 			 */
249*916a2c1eSAchin Gupta 			assert(handle == cm_get_context(mpidr, NON_SECURE));
250*916a2c1eSAchin Gupta 			cm_el1_sysregs_context_save(NON_SECURE);
251*916a2c1eSAchin Gupta 
252*916a2c1eSAchin Gupta 			/* Save x1 and x2 for use by TSP_GET_ARGS call below */
253*916a2c1eSAchin Gupta 			SMC_SET_GP(handle, CTX_GPREG_X1, x1);
254*916a2c1eSAchin Gupta 			SMC_SET_GP(handle, CTX_GPREG_X2, x2);
255*916a2c1eSAchin Gupta 
256*916a2c1eSAchin Gupta 			/*
257*916a2c1eSAchin Gupta 			 * We are done stashing the non-secure context. Ask the
258*916a2c1eSAchin Gupta 			 * secure payload to do the work now.
259*916a2c1eSAchin Gupta 			 */
260*916a2c1eSAchin Gupta 
261*916a2c1eSAchin Gupta 			/*
262*916a2c1eSAchin Gupta 			 * Verify if there is a valid context to use, copy the
263*916a2c1eSAchin Gupta 			 * operation type and parameters to the secure context
264*916a2c1eSAchin Gupta 			 * and jump to the fast smc entry point in the secure
265*916a2c1eSAchin Gupta 			 * payload. Entry into S-EL1 will take place upon exit
266*916a2c1eSAchin Gupta 			 * from this function.
267*916a2c1eSAchin Gupta 			 */
268*916a2c1eSAchin Gupta 			assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE));
269*916a2c1eSAchin Gupta 			set_aapcs_args7(&tsp_ctx->cpu_ctx, smc_fid, x1, x2, 0, 0,
270*916a2c1eSAchin Gupta 					0, 0, 0);
271*916a2c1eSAchin Gupta 			cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry);
272*916a2c1eSAchin Gupta 			cm_el1_sysregs_context_restore(SECURE);
273*916a2c1eSAchin Gupta 			cm_set_next_eret_context(SECURE);
274*916a2c1eSAchin Gupta 
275*916a2c1eSAchin Gupta 			return smc_fid;
276*916a2c1eSAchin Gupta 		} else {
277*916a2c1eSAchin Gupta 			/*
278*916a2c1eSAchin Gupta 			 * This is the result from the secure client of an
279*916a2c1eSAchin Gupta 			 * earlier request. The results are in x1-x2. Copy it
280*916a2c1eSAchin Gupta 			 * into the non-secure context, save the secure state
281*916a2c1eSAchin Gupta 			 * and return to the non-secure state.
282*916a2c1eSAchin Gupta 			 */
283*916a2c1eSAchin Gupta 			assert(handle == cm_get_context(mpidr, SECURE));
284*916a2c1eSAchin Gupta 			cm_el1_sysregs_context_save(SECURE);
285*916a2c1eSAchin Gupta 
286*916a2c1eSAchin Gupta 			/* Get a reference to the non-secure context */
287*916a2c1eSAchin Gupta 			ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
288*916a2c1eSAchin Gupta 			assert(ns_cpu_context);
289*916a2c1eSAchin Gupta 			ns_gp_regs = get_gpregs_ctx(ns_cpu_context);
290*916a2c1eSAchin Gupta 
291*916a2c1eSAchin Gupta 			/* Restore non-secure state */
292*916a2c1eSAchin Gupta 			cm_el1_sysregs_context_restore(NON_SECURE);
293*916a2c1eSAchin Gupta 			cm_set_next_eret_context(NON_SECURE);
294*916a2c1eSAchin Gupta 
295*916a2c1eSAchin Gupta 			SMC_RET2(ns_gp_regs, x1, x2);
296*916a2c1eSAchin Gupta 		}
297*916a2c1eSAchin Gupta 
298*916a2c1eSAchin Gupta 		break;
299*916a2c1eSAchin Gupta 
300*916a2c1eSAchin Gupta 		/*
301*916a2c1eSAchin Gupta 		 * This is a request from the secure payload for more arguments
302*916a2c1eSAchin Gupta 		 * for an ongoing arithmetic operation requested by the
303*916a2c1eSAchin Gupta 		 * non-secure world. Simply return the arguments from the non-
304*916a2c1eSAchin Gupta 		 * secure client in the original call.
305*916a2c1eSAchin Gupta 		 */
306*916a2c1eSAchin Gupta 	case TSP_GET_ARGS:
307*916a2c1eSAchin Gupta 		if (ns)
308*916a2c1eSAchin Gupta 			SMC_RET1(handle, SMC_UNK);
309*916a2c1eSAchin Gupta 
310*916a2c1eSAchin Gupta 		/* Get a reference to the non-secure context */
311*916a2c1eSAchin Gupta 		ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
312*916a2c1eSAchin Gupta 		assert(ns_cpu_context);
313*916a2c1eSAchin Gupta 		ns_gp_regs = get_gpregs_ctx(ns_cpu_context);
314*916a2c1eSAchin Gupta 
315*916a2c1eSAchin Gupta 		SMC_RET2(handle, read_ctx_reg(ns_gp_regs, CTX_GPREG_X1),
316*916a2c1eSAchin Gupta 				read_ctx_reg(ns_gp_regs, CTX_GPREG_X2));
317*916a2c1eSAchin Gupta 
318375f538aSAchin Gupta 	default:
319607084eeSAchin Gupta 		break;
320375f538aSAchin Gupta 	}
321375f538aSAchin Gupta 
322607084eeSAchin Gupta 	SMC_RET1(handle, SMC_UNK);
323375f538aSAchin Gupta }
324375f538aSAchin Gupta 
325375f538aSAchin Gupta /* Define a SPD runtime service descriptor */
326375f538aSAchin Gupta DECLARE_RT_SVC(
327375f538aSAchin Gupta 	spd,
328375f538aSAchin Gupta 
329375f538aSAchin Gupta 	OEN_TOS_START,
330375f538aSAchin Gupta 	OEN_TOS_END,
331375f538aSAchin Gupta 	SMC_TYPE_FAST,
332375f538aSAchin Gupta 	tspd_setup,
333375f538aSAchin Gupta 	tspd_smc_handler
334375f538aSAchin Gupta );
335