xref: /rk3399_ARM-atf/services/std_svc/spmd/spmd_pm.c (revision 46789a7c711d650ae9b2bad0c2b817c4ba4a214a)
1 /*
2  * Copyright (c) 2020-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <lib/el3_runtime/context_mgmt.h>
10 #include <lib/spinlock.h>
11 #include "spmd_private.h"
12 
13 static struct {
14 	bool secondary_ep_locked;
15 	uintptr_t secondary_ep;
16 	spinlock_t lock;
17 } g_spmd_pm;
18 
19 /*******************************************************************************
20  * spmd_build_spmc_message
21  *
22  * Builds an SPMD to SPMC direct message request.
23  ******************************************************************************/
24 static void spmd_build_spmc_message(gp_regs_t *gpregs, unsigned long long message)
25 {
26 	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
27 	write_ctx_reg(gpregs, CTX_GPREG_X1,
28 		(SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
29 		spmd_spmc_id_get());
30 	write_ctx_reg(gpregs, CTX_GPREG_X2, FFA_PARAM_MBZ);
31 	write_ctx_reg(gpregs, CTX_GPREG_X3, message);
32 }
33 
34 /*******************************************************************************
35  * spmd_pm_secondary_ep_register
36  ******************************************************************************/
37 int spmd_pm_secondary_ep_register(uintptr_t entry_point)
38 {
39 	int ret = FFA_ERROR_INVALID_PARAMETER;
40 
41 	spin_lock(&g_spmd_pm.lock);
42 
43 	if (g_spmd_pm.secondary_ep_locked == true) {
44 		goto out;
45 	}
46 
47 	/*
48 	 * Check entry_point address is a PA within
49 	 * load_address <= entry_point < load_address + binary_size
50 	 */
51 	if (!spmd_check_address_in_binary_image(entry_point)) {
52 		ERROR("%s entry point is not within image boundaries\n",
53 			__func__);
54 		goto out;
55 	}
56 
57 	g_spmd_pm.secondary_ep = entry_point;
58 	g_spmd_pm.secondary_ep_locked = true;
59 
60 	VERBOSE("%s %lx\n", __func__, entry_point);
61 
62 	ret = 0;
63 
64 out:
65 	spin_unlock(&g_spmd_pm.lock);
66 
67 	return ret;
68 }
69 
70 /*******************************************************************************
71  * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part
72  * of the SPMC initialization path, they will initialize any SPs that they
73  * manage. Entry into SPMC is done after initialising minimal architectural
74  * state that guarantees safe execution.
75  ******************************************************************************/
76 static void spmd_cpu_on_finish_handler(u_register_t unused)
77 {
78 	spmd_spm_core_context_t *ctx = spmd_get_context();
79 	unsigned int linear_id = plat_my_core_pos();
80 	el3_state_t *el3_state;
81 	uintptr_t entry_point;
82 	uint64_t rc;
83 
84 	assert(ctx != NULL);
85 	assert(ctx->state != SPMC_STATE_ON);
86 
87 	spin_lock(&g_spmd_pm.lock);
88 
89 	/*
90 	 * Leave the possibility that the SPMC does not call
91 	 * FFA_SECONDARY_EP_REGISTER in which case re-use the
92 	 * primary core address for booting secondary cores.
93 	 */
94 	if (g_spmd_pm.secondary_ep_locked == true) {
95 		/*
96 		 * The CPU context has already been initialized at boot time
97 		 * (in spmd_spmc_init by a call to cm_setup_context). Adjust
98 		 * below the target core entry point based on the address
99 		 * passed to by FFA_SECONDARY_EP_REGISTER.
100 		 */
101 		entry_point = g_spmd_pm.secondary_ep;
102 		el3_state = get_el3state_ctx(&ctx->cpu_ctx);
103 		write_ctx_reg(el3_state, CTX_ELR_EL3, entry_point);
104 	}
105 
106 	spin_unlock(&g_spmd_pm.lock);
107 
108 	/* Mark CPU as initiating ON operation. */
109 	ctx->state = SPMC_STATE_ON_PENDING;
110 
111 	rc = spmd_spm_core_sync_entry(ctx);
112 	if (rc != 0ULL) {
113 		ERROR("%s failed (%llu) on CPU%u\n", __func__, rc,
114 			linear_id);
115 		ctx->state = SPMC_STATE_OFF;
116 		return;
117 	}
118 
119 	ctx->state = SPMC_STATE_ON;
120 
121 	VERBOSE("CPU %u on!\n", linear_id);
122 }
123 
124 /*******************************************************************************
125  * spmd_cpu_off_handler
126  ******************************************************************************/
127 static int32_t spmd_cpu_off_handler(u_register_t unused)
128 {
129 	spmd_spm_core_context_t *ctx = spmd_get_context();
130 	unsigned int linear_id = plat_my_core_pos();
131 	int64_t rc;
132 
133 	assert(ctx != NULL);
134 	assert(ctx->state != SPMC_STATE_OFF);
135 
136 	/* Build an SPMD to SPMC direct message request. */
137 	spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx), PSCI_CPU_OFF);
138 
139 	rc = spmd_spm_core_sync_entry(ctx);
140 	if (rc != 0ULL) {
141 		ERROR("%s failed (%llu) on CPU%u\n", __func__, rc, linear_id);
142 	}
143 
144 	/* Expect a direct message response from the SPMC. */
145 	u_register_t ffa_resp_func = read_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx),
146 						  CTX_GPREG_X0);
147 	if (ffa_resp_func != FFA_MSG_SEND_DIRECT_RESP_SMC32) {
148 		ERROR("%s invalid SPMC response (%lx).\n",
149 			__func__, ffa_resp_func);
150 		return -EINVAL;
151 	}
152 
153 	ctx->state = SPMC_STATE_OFF;
154 
155 	VERBOSE("CPU %u off!\n", linear_id);
156 
157 	return 0;
158 }
159 
160 /*******************************************************************************
161  * Structure populated by the SPM Dispatcher to perform any bookkeeping before
162  * PSCI executes a power mgmt. operation.
163  ******************************************************************************/
164 const spd_pm_ops_t spmd_pm = {
165 	.svc_on_finish = spmd_cpu_on_finish_handler,
166 	.svc_off = spmd_cpu_off_handler
167 };
168