xref: /rk3399_ARM-atf/services/std_svc/spmd/spmd_pm.c (revision c3e5f6b9854ad12e2b6d768f0058c7629f86aceb)
1 /*
2  * Copyright (c) 2020-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 
12 #include <lib/el3_runtime/context_mgmt.h>
13 #include <lib/spinlock.h>
14 #include "spmd_private.h"
15 
16 static struct {
17 	bool secondary_ep_locked;
18 	uintptr_t secondary_ep;
19 	spinlock_t lock;
20 } g_spmd_pm;
21 
22 /*******************************************************************************
23  * spmd_pm_secondary_ep_register
24  ******************************************************************************/
25 int spmd_pm_secondary_ep_register(uintptr_t entry_point)
26 {
27 	int ret = FFA_ERROR_INVALID_PARAMETER;
28 
29 	spin_lock(&g_spmd_pm.lock);
30 
31 	if (g_spmd_pm.secondary_ep_locked == true) {
32 		goto out;
33 	}
34 
35 	/*
36 	 * Check entry_point address is a PA within
37 	 * load_address <= entry_point < load_address + binary_size
38 	 */
39 	if (!spmd_check_address_in_binary_image(entry_point)) {
40 		ERROR("%s entry point is not within image boundaries\n",
41 			__func__);
42 		goto out;
43 	}
44 
45 	g_spmd_pm.secondary_ep = entry_point;
46 	g_spmd_pm.secondary_ep_locked = true;
47 
48 	VERBOSE("%s %lx\n", __func__, entry_point);
49 
50 	ret = 0;
51 
52 out:
53 	spin_unlock(&g_spmd_pm.lock);
54 
55 	return ret;
56 }
57 
58 /*******************************************************************************
59  * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part
60  * of the SPMC initialization path, they will initialize any SPs that they
61  * manage. Entry into SPMC is done after initialising minimal architectural
62  * state that guarantees safe execution.
63  ******************************************************************************/
64 static void spmd_cpu_on_finish_handler(u_register_t unused)
65 {
66 	spmd_spm_core_context_t *ctx = spmd_get_context();
67 	unsigned int linear_id = plat_my_core_pos();
68 	el3_state_t *el3_state;
69 	uintptr_t entry_point;
70 	uint64_t rc;
71 
72 	assert(ctx != NULL);
73 	assert(ctx->state != SPMC_STATE_ON);
74 
75 	spin_lock(&g_spmd_pm.lock);
76 
77 	spmd_setup_context(linear_id);
78 
79 	/*
80 	 * Leave the possibility that the SPMC does not call
81 	 * FFA_SECONDARY_EP_REGISTER in which case re-use the
82 	 * primary core address for booting secondary cores.
83 	 */
84 	if (g_spmd_pm.secondary_ep_locked == true) {
85 		/*
86 		 * The CPU context has already been initialized
87 		 * (in spmd_setup_context by a call to cm_setup_context). Adjust
88 		 * below the target core entry point based on the address
89 		 * passed to by FFA_SECONDARY_EP_REGISTER.
90 		 */
91 		entry_point = g_spmd_pm.secondary_ep;
92 		el3_state = get_el3state_ctx(&ctx->cpu_ctx);
93 		write_ctx_reg(el3_state, CTX_ELR_EL3, entry_point);
94 	}
95 
96 	spin_unlock(&g_spmd_pm.lock);
97 
98 	/* Mark CPU as initiating ON operation. */
99 	ctx->state = SPMC_STATE_ON_PENDING;
100 
101 	rc = spmd_spm_core_sync_entry(ctx);
102 	if (rc != 0ULL) {
103 		ERROR("%s failed (%" PRIu64 ") on CPU%u\n", __func__, rc,
104 			linear_id);
105 		ctx->state = SPMC_STATE_OFF;
106 		return;
107 	}
108 
109 	ctx->state = SPMC_STATE_ON;
110 
111 	VERBOSE("CPU %u on!\n", linear_id);
112 }
113 
114 /*******************************************************************************
115  * spmd_cpu_off_handler
116  ******************************************************************************/
117 static int32_t spmd_cpu_off_handler(u_register_t unused)
118 {
119 	spmd_spm_core_context_t *ctx = spmd_get_context();
120 	unsigned int linear_id = plat_my_core_pos();
121 	int64_t rc;
122 	uint32_t ffa_resp_func_id, msg_flags;
123 	int status;
124 
125 	assert(ctx != NULL);
126 	assert(ctx->state != SPMC_STATE_OFF);
127 
128 	/* Build an SPMD to SPMC direct message request. */
129 	gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
130 	spmd_build_spmc_message(gpregs, FFA_FWK_MSG_PSCI, PSCI_CPU_OFF);
131 
132 	/* Clear remaining x8 - x17 at EL3/SEL2 or EL3/SEL1 boundary. */
133 	write_ctx_reg(gpregs, CTX_GPREG_X8, 0);
134 	write_ctx_reg(gpregs, CTX_GPREG_X9, 0);
135 	write_ctx_reg(gpregs, CTX_GPREG_X10, 0);
136 	write_ctx_reg(gpregs, CTX_GPREG_X11, 0);
137 	write_ctx_reg(gpregs, CTX_GPREG_X12, 0);
138 	write_ctx_reg(gpregs, CTX_GPREG_X13, 0);
139 	write_ctx_reg(gpregs, CTX_GPREG_X14, 0);
140 	write_ctx_reg(gpregs, CTX_GPREG_X15, 0);
141 	write_ctx_reg(gpregs, CTX_GPREG_X16, 0);
142 	write_ctx_reg(gpregs, CTX_GPREG_X17, 0);
143 
144 	/* Mark current core as processing a PSCI operation. */
145 	ctx->psci_operation_ongoing = true;
146 
147 	rc = spmd_spm_core_sync_entry(ctx);
148 
149 	if (rc != 0ULL) {
150 		ERROR("%s failed (%" PRIu64 ") on CPU%u\n", __func__, rc, linear_id);
151 	}
152 
153 	ctx->psci_operation_ongoing = false;
154 
155 	/* Expect a direct message response from the SPMC. */
156 	ffa_resp_func_id = (uint32_t)read_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx),
157 						  CTX_GPREG_X0);
158 
159 	/*
160 	 * Retrieve flags indicating framework message and power management
161 	 * response.
162 	 */
163 	msg_flags = (uint32_t)read_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx),
164 						  CTX_GPREG_X2);
165 
166 	/* Retrieve error code indicating status of power management operation. */
167 	status = (int)read_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx),
168 						  CTX_GPREG_X3);
169 
170 	if (ffa_resp_func_id == FFA_ERROR) {
171 		/*
172 		 * It is likely that SPMC does not support receiving PSCI
173 		 * operation through framework message. SPMD takes an
174 		 * implementation defined choice to not treat it as a fatal
175 		 * error. Consequently, SPMD ignores the error and continues
176 		 * with power management operation.
177 		 */
178 		VERBOSE("SPMC ignored PSCI CPU_OFF framework message\n");
179 	} else if (ffa_resp_func_id != FFA_MSG_SEND_DIRECT_RESP_SMC32) {
180 		ERROR("%s invalid SPMC response (%x).\n",
181 			__func__, ffa_resp_func_id);
182 		panic();
183 	} else if (((msg_flags & FFA_FWK_MSG_BIT) == 0U) ||
184 			 ((msg_flags & FFA_FWK_MSG_MASK) != FFA_PM_MSG_PM_RESP)) {
185 		ERROR("SPMC failed to send framework message response for power"
186 			" management operation, message flags = (%x)\n",
187 			 msg_flags);
188 		panic();
189 	} else if (status != PSCI_E_SUCCESS) {
190 		ERROR("SPMC denied CPU_OFF power management request\n");
191 		panic();
192 	} else {
193 		VERBOSE("CPU %u off!\n", linear_id);
194 	}
195 
196 	ctx->state = SPMC_STATE_OFF;
197 
198 	return 0;
199 }
200 
201 /*******************************************************************************
202  * Structure populated by the SPM Dispatcher to perform any bookkeeping before
203  * PSCI executes a power mgmt. operation.
204  ******************************************************************************/
205 const spd_pm_ops_t spmd_pm = {
206 	.svc_on_finish = spmd_cpu_on_finish_handler,
207 	.svc_off = spmd_cpu_off_handler
208 };
209