xref: /rk3399_ARM-atf/bl32/sp_min/sp_min_main.c (revision c11ba852b970f2a125442da26d907c0842f09a25)
1*c11ba852SSoby Mathew /*
2*c11ba852SSoby Mathew  * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3*c11ba852SSoby Mathew  *
4*c11ba852SSoby Mathew  * Redistribution and use in source and binary forms, with or without
5*c11ba852SSoby Mathew  * modification, are permitted provided that the following conditions are met:
6*c11ba852SSoby Mathew  *
7*c11ba852SSoby Mathew  * Redistributions of source code must retain the above copyright notice, this
8*c11ba852SSoby Mathew  * list of conditions and the following disclaimer.
9*c11ba852SSoby Mathew  *
10*c11ba852SSoby Mathew  * Redistributions in binary form must reproduce the above copyright notice,
11*c11ba852SSoby Mathew  * this list of conditions and the following disclaimer in the documentation
12*c11ba852SSoby Mathew  * and/or other materials provided with the distribution.
13*c11ba852SSoby Mathew  *
14*c11ba852SSoby Mathew  * Neither the name of ARM nor the names of its contributors may be used
15*c11ba852SSoby Mathew  * to endorse or promote products derived from this software without specific
16*c11ba852SSoby Mathew  * prior written permission.
17*c11ba852SSoby Mathew  *
18*c11ba852SSoby Mathew  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19*c11ba852SSoby Mathew  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*c11ba852SSoby Mathew  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*c11ba852SSoby Mathew  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22*c11ba852SSoby Mathew  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23*c11ba852SSoby Mathew  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24*c11ba852SSoby Mathew  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25*c11ba852SSoby Mathew  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26*c11ba852SSoby Mathew  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27*c11ba852SSoby Mathew  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28*c11ba852SSoby Mathew  * POSSIBILITY OF SUCH DAMAGE.
29*c11ba852SSoby Mathew  */
30*c11ba852SSoby Mathew 
31*c11ba852SSoby Mathew #include <arch.h>
32*c11ba852SSoby Mathew #include <arch_helpers.h>
33*c11ba852SSoby Mathew #include <assert.h>
34*c11ba852SSoby Mathew #include <bl_common.h>
35*c11ba852SSoby Mathew #include <context.h>
36*c11ba852SSoby Mathew #include <context_mgmt.h>
37*c11ba852SSoby Mathew #include <debug.h>
38*c11ba852SSoby Mathew #include <platform.h>
39*c11ba852SSoby Mathew #include <platform_def.h>
40*c11ba852SSoby Mathew #include <platform_sp_min.h>
41*c11ba852SSoby Mathew #include <psci.h>
42*c11ba852SSoby Mathew #include <runtime_svc.h>
43*c11ba852SSoby Mathew #include <smcc_helpers.h>
44*c11ba852SSoby Mathew #include <stddef.h>
45*c11ba852SSoby Mathew #include <stdint.h>
46*c11ba852SSoby Mathew #include <string.h>
47*c11ba852SSoby Mathew #include <types.h>
48*c11ba852SSoby Mathew #include "sp_min_private.h"
49*c11ba852SSoby Mathew 
50*c11ba852SSoby Mathew /* Pointers to per-core cpu contexts */
51*c11ba852SSoby Mathew static void *sp_min_cpu_ctx_ptr[PLATFORM_CORE_COUNT];
52*c11ba852SSoby Mathew 
53*c11ba852SSoby Mathew /* SP_MIN only stores the non secure smc context */
54*c11ba852SSoby Mathew static smc_ctx_t sp_min_smc_context[PLATFORM_CORE_COUNT];
55*c11ba852SSoby Mathew 
56*c11ba852SSoby Mathew /******************************************************************************
57*c11ba852SSoby Mathew  * Define the smcc helper library API's
58*c11ba852SSoby Mathew  *****************************************************************************/
59*c11ba852SSoby Mathew void *smc_get_ctx(int security_state)
60*c11ba852SSoby Mathew {
61*c11ba852SSoby Mathew 	assert(security_state == NON_SECURE);
62*c11ba852SSoby Mathew 	return &sp_min_smc_context[plat_my_core_pos()];
63*c11ba852SSoby Mathew }
64*c11ba852SSoby Mathew 
65*c11ba852SSoby Mathew void smc_set_next_ctx(int security_state)
66*c11ba852SSoby Mathew {
67*c11ba852SSoby Mathew 	assert(security_state == NON_SECURE);
68*c11ba852SSoby Mathew 	/* SP_MIN stores only non secure smc context. Nothing to do here */
69*c11ba852SSoby Mathew }
70*c11ba852SSoby Mathew 
71*c11ba852SSoby Mathew void *smc_get_next_ctx(void)
72*c11ba852SSoby Mathew {
73*c11ba852SSoby Mathew 	return &sp_min_smc_context[plat_my_core_pos()];
74*c11ba852SSoby Mathew }
75*c11ba852SSoby Mathew 
76*c11ba852SSoby Mathew /*******************************************************************************
77*c11ba852SSoby Mathew  * This function returns a pointer to the most recent 'cpu_context' structure
78*c11ba852SSoby Mathew  * for the calling CPU that was set as the context for the specified security
79*c11ba852SSoby Mathew  * state. NULL is returned if no such structure has been specified.
80*c11ba852SSoby Mathew  ******************************************************************************/
81*c11ba852SSoby Mathew void *cm_get_context(uint32_t security_state)
82*c11ba852SSoby Mathew {
83*c11ba852SSoby Mathew 	assert(security_state == NON_SECURE);
84*c11ba852SSoby Mathew 	return sp_min_cpu_ctx_ptr[plat_my_core_pos()];
85*c11ba852SSoby Mathew }
86*c11ba852SSoby Mathew 
87*c11ba852SSoby Mathew /*******************************************************************************
88*c11ba852SSoby Mathew  * This function sets the pointer to the current 'cpu_context' structure for the
89*c11ba852SSoby Mathew  * specified security state for the calling CPU
90*c11ba852SSoby Mathew  ******************************************************************************/
91*c11ba852SSoby Mathew void cm_set_context(void *context, uint32_t security_state)
92*c11ba852SSoby Mathew {
93*c11ba852SSoby Mathew 	assert(security_state == NON_SECURE);
94*c11ba852SSoby Mathew 	sp_min_cpu_ctx_ptr[plat_my_core_pos()] = context;
95*c11ba852SSoby Mathew }
96*c11ba852SSoby Mathew 
97*c11ba852SSoby Mathew /*******************************************************************************
98*c11ba852SSoby Mathew  * This function returns a pointer to the most recent 'cpu_context' structure
99*c11ba852SSoby Mathew  * for the CPU identified by `cpu_idx` that was set as the context for the
100*c11ba852SSoby Mathew  * specified security state. NULL is returned if no such structure has been
101*c11ba852SSoby Mathew  * specified.
102*c11ba852SSoby Mathew  ******************************************************************************/
103*c11ba852SSoby Mathew void *cm_get_context_by_index(unsigned int cpu_idx,
104*c11ba852SSoby Mathew 				unsigned int security_state)
105*c11ba852SSoby Mathew {
106*c11ba852SSoby Mathew 	assert(security_state == NON_SECURE);
107*c11ba852SSoby Mathew 	return sp_min_cpu_ctx_ptr[cpu_idx];
108*c11ba852SSoby Mathew }
109*c11ba852SSoby Mathew 
110*c11ba852SSoby Mathew /*******************************************************************************
111*c11ba852SSoby Mathew  * This function sets the pointer to the current 'cpu_context' structure for the
112*c11ba852SSoby Mathew  * specified security state for the CPU identified by CPU index.
113*c11ba852SSoby Mathew  ******************************************************************************/
114*c11ba852SSoby Mathew void cm_set_context_by_index(unsigned int cpu_idx, void *context,
115*c11ba852SSoby Mathew 				unsigned int security_state)
116*c11ba852SSoby Mathew {
117*c11ba852SSoby Mathew 	assert(security_state == NON_SECURE);
118*c11ba852SSoby Mathew 	sp_min_cpu_ctx_ptr[cpu_idx] = context;
119*c11ba852SSoby Mathew }
120*c11ba852SSoby Mathew 
121*c11ba852SSoby Mathew static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx,
122*c11ba852SSoby Mathew 				smc_ctx_t *next_smc_ctx)
123*c11ba852SSoby Mathew {
124*c11ba852SSoby Mathew 	next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
125*c11ba852SSoby Mathew 	next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
126*c11ba852SSoby Mathew 	next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
127*c11ba852SSoby Mathew }
128*c11ba852SSoby Mathew 
129*c11ba852SSoby Mathew /*******************************************************************************
130*c11ba852SSoby Mathew  * This function invokes the PSCI library interface to initialize the
131*c11ba852SSoby Mathew  * non secure cpu context and copies the relevant cpu context register values
132*c11ba852SSoby Mathew  * to smc context. These registers will get programmed during `smc_exit`.
133*c11ba852SSoby Mathew  ******************************************************************************/
134*c11ba852SSoby Mathew static void sp_min_prepare_next_image_entry(void)
135*c11ba852SSoby Mathew {
136*c11ba852SSoby Mathew 	entry_point_info_t *next_image_info;
137*c11ba852SSoby Mathew 
138*c11ba852SSoby Mathew 	/* Program system registers to proceed to non-secure */
139*c11ba852SSoby Mathew 	next_image_info = sp_min_plat_get_bl33_ep_info();
140*c11ba852SSoby Mathew 	assert(next_image_info);
141*c11ba852SSoby Mathew 	assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr));
142*c11ba852SSoby Mathew 
143*c11ba852SSoby Mathew 	INFO("SP_MIN: Preparing exit to normal world\n");
144*c11ba852SSoby Mathew 
145*c11ba852SSoby Mathew 	psci_prepare_next_non_secure_ctx(next_image_info);
146*c11ba852SSoby Mathew 	smc_set_next_ctx(NON_SECURE);
147*c11ba852SSoby Mathew 
148*c11ba852SSoby Mathew 	/* Copy r0, lr and spsr from cpu context to SMC context */
149*c11ba852SSoby Mathew 	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
150*c11ba852SSoby Mathew 			smc_get_next_ctx());
151*c11ba852SSoby Mathew }
152*c11ba852SSoby Mathew 
153*c11ba852SSoby Mathew /******************************************************************************
154*c11ba852SSoby Mathew  * The SP_MIN main function. Do the platform and PSCI Library setup. Also
155*c11ba852SSoby Mathew  * initialize the runtime service framework.
156*c11ba852SSoby Mathew  *****************************************************************************/
157*c11ba852SSoby Mathew void sp_min_main(void)
158*c11ba852SSoby Mathew {
159*c11ba852SSoby Mathew 	/* Perform platform setup in TSP MIN */
160*c11ba852SSoby Mathew 	sp_min_platform_setup();
161*c11ba852SSoby Mathew 
162*c11ba852SSoby Mathew 	/*
163*c11ba852SSoby Mathew 	 * Initialize the PSCI library and perform the remaining generic
164*c11ba852SSoby Mathew 	 * architectural setup from PSCI.
165*c11ba852SSoby Mathew 	 */
166*c11ba852SSoby Mathew 	psci_setup((uintptr_t)sp_min_warm_entrypoint);
167*c11ba852SSoby Mathew 
168*c11ba852SSoby Mathew 	/*
169*c11ba852SSoby Mathew 	 * Initialize the runtime services e.g. psci
170*c11ba852SSoby Mathew 	 * This is where the monitor mode will be initialized
171*c11ba852SSoby Mathew 	 */
172*c11ba852SSoby Mathew 	INFO("SP_MIN: Initializing runtime services\n");
173*c11ba852SSoby Mathew 	runtime_svc_init();
174*c11ba852SSoby Mathew 
175*c11ba852SSoby Mathew 	/*
176*c11ba852SSoby Mathew 	 * We are ready to enter the next EL. Prepare entry into the image
177*c11ba852SSoby Mathew 	 * corresponding to the desired security state after the next ERET.
178*c11ba852SSoby Mathew 	 */
179*c11ba852SSoby Mathew 	sp_min_prepare_next_image_entry();
180*c11ba852SSoby Mathew }
181*c11ba852SSoby Mathew 
182*c11ba852SSoby Mathew /******************************************************************************
183*c11ba852SSoby Mathew  * This function is invoked during warm boot. Invoke the PSCI library
184*c11ba852SSoby Mathew  * warm boot entry point which takes care of Architectural and platform setup/
185*c11ba852SSoby Mathew  * restore. Copy the relevant cpu_context register values to smc context which
186*c11ba852SSoby Mathew  * will get programmed during `smc_exit`.
187*c11ba852SSoby Mathew  *****************************************************************************/
188*c11ba852SSoby Mathew void sp_min_warm_boot(void)
189*c11ba852SSoby Mathew {
190*c11ba852SSoby Mathew 	smc_ctx_t *next_smc_ctx;
191*c11ba852SSoby Mathew 
192*c11ba852SSoby Mathew 	psci_warmboot_entrypoint();
193*c11ba852SSoby Mathew 
194*c11ba852SSoby Mathew 	smc_set_next_ctx(NON_SECURE);
195*c11ba852SSoby Mathew 
196*c11ba852SSoby Mathew 	next_smc_ctx = smc_get_next_ctx();
197*c11ba852SSoby Mathew 	memset(next_smc_ctx, 0, sizeof(smc_ctx_t));
198*c11ba852SSoby Mathew 
199*c11ba852SSoby Mathew 	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
200*c11ba852SSoby Mathew 			next_smc_ctx);
201*c11ba852SSoby Mathew }
202