xref: /rk3399_ARM-atf/services/std_svc/lfa/lfa_main.c (revision 3b98554064bf572b9781720f935a097761f707b5)
1 /*
2  * Copyright (c) 2025, Arm Limited. All rights reserved.
3  * Copyright (c) 2025, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #include <errno.h>
9 #include <string.h>
10 
11 #include <lib/spinlock.h>
12 #include <plat/common/platform.h>
13 #include <services/bl31_lfa.h>
14 #include <services/lfa_svc.h>
15 #include <services/rmmd_rmm_lfa.h>
16 #include <smccc_helpers.h>
17 
18 static uint32_t lfa_component_count;
19 static plat_lfa_component_info_t *lfa_components;
20 static struct lfa_component_status current_activation;
21 static bool is_lfa_initialized;
22 
23 /*
24  * Spinlock to serialize LFA operations (PRIME, ACTIVATE).
25  * This ensures that these calls from different CPUs are properly
26  * serialized and do not execute concurrently, while still allowing
27  * the same operation to be invoked from any CPU.
28  */
29 static spinlock_t lfa_lock;
30 
lfa_reset_activation(void)31 void lfa_reset_activation(void)
32 {
33 	current_activation.component_id = LFA_INVALID_COMPONENT;
34 	current_activation.prime_status = PRIME_NONE;
35 	current_activation.cpu_rendezvous_required = false;
36 }
37 
convert_to_lfa_error(int ret)38 static int convert_to_lfa_error(int ret)
39 {
40 	switch (ret) {
41 	case 0:
42 		return LFA_SUCCESS;
43 	case -EAUTH:
44 		return LFA_AUTH_ERROR;
45 	case -ENOMEM:
46 		return LFA_NO_MEMORY;
47 	default:
48 		return LFA_DEVICE_ERROR;
49 	}
50 }
51 
lfa_initialize_components(void)52 static bool lfa_initialize_components(void)
53 {
54 	lfa_component_count = plat_lfa_get_components(&lfa_components);
55 
56 	if (lfa_component_count == 0U || lfa_components == NULL) {
57 		/* unlikely to reach here */
58 		ERROR("Invalid LFA component setup: count = 0 or components are NULL");
59 		return false;
60 	}
61 
62 	return true;
63 }
64 
get_fw_activation_flags(uint32_t fw_seq_id)65 static uint64_t get_fw_activation_flags(uint32_t fw_seq_id)
66 {
67 	const plat_lfa_component_info_t *comp =
68 				&lfa_components[fw_seq_id];
69 	uint64_t flags = 0ULL;
70 
71 	flags |= ((comp->activator == NULL ? 0ULL : 1ULL)
72 		 << LFA_ACTIVATION_CAPABLE_SHIFT);
73 	flags |= (uint64_t)(comp->activation_pending)
74 		 << LFA_ACTIVATION_PENDING_SHIFT;
75 
76 	if (comp->activator != NULL) {
77 		flags |= ((comp->activator->may_reset_cpu ? 1ULL : 0ULL)
78 			 << LFA_MAY_RESET_CPU_SHIFT);
79 		flags |= ((comp->activator->cpu_rendezvous_required ? 0ULL : 1ULL)
80 			 << LFA_CPU_RENDEZVOUS_OPTIONAL_SHIFT);
81 	}
82 
83 	return flags;
84 }
85 
lfa_cancel(uint32_t component_id)86 static int lfa_cancel(uint32_t component_id)
87 {
88 	int ret = LFA_SUCCESS;
89 
90 	if (lfa_component_count == 0U) {
91 		return LFA_WRONG_STATE;
92 	}
93 
94 	/* Check if component ID is in range. */
95 	if ((component_id >= lfa_component_count) ||
96 	    (component_id != current_activation.component_id)) {
97 		return LFA_INVALID_PARAMETERS;
98 	}
99 
100 	ret = plat_lfa_cancel(component_id);
101 	if (ret != LFA_SUCCESS) {
102 		return LFA_BUSY;
103 	}
104 
105 	/* TODO: add proper termination prime and activate phases */
106 	lfa_reset_activation();
107 
108 	return ret;
109 }
110 
lfa_activate(uint32_t component_id,uint64_t flags,uint64_t ep_address,uint64_t context_id)111 static int lfa_activate(uint32_t component_id, uint64_t flags,
112 			uint64_t ep_address, uint64_t context_id)
113 {
114 	int ret = LFA_ACTIVATION_FAILED;
115 	struct lfa_component_ops *activator;
116 
117 	if ((lfa_component_count == 0U) ||
118 	    (!lfa_components[component_id].activation_pending) ||
119 	    (current_activation.prime_status != PRIME_COMPLETE)) {
120 		return LFA_COMPONENT_WRONG_STATE;
121 	}
122 
123 	/* Check if fw_seq_id is in range. */
124 	if ((component_id >= lfa_component_count) ||
125 	    (current_activation.component_id != component_id)) {
126 		return LFA_INVALID_PARAMETERS;
127 	}
128 
129 	if (lfa_components[component_id].activator == NULL) {
130 		return LFA_NOT_SUPPORTED;
131 	}
132 
133 	ret = plat_lfa_notify_activate(component_id);
134 	if (ret != 0) {
135 		return LFA_ACTIVATION_FAILED;
136 	}
137 
138 	activator = lfa_components[component_id].activator;
139 	if (activator->activate != NULL) {
140 		/*
141 		 * Pass skip_cpu_rendezvous (flag[0]) only if flag[0]==1
142 		 * & CPU_RENDEZVOUS is not required.
143 		 */
144 		if (flags & LFA_SKIP_CPU_RENDEZVOUS_BIT) {
145 			if (!activator->cpu_rendezvous_required) {
146 				INFO("Skipping rendezvous requested by caller.\n");
147 				current_activation.cpu_rendezvous_required = false;
148 			}
149 			/*
150 			 * Return error if caller tries to skip rendezvous when
151 			 * it is required.
152 			 */
153 			else {
154 				ERROR("CPU Rendezvous is required, can't skip.\n");
155 				return LFA_INVALID_PARAMETERS;
156 			}
157 		}
158 
159 		ret = activator->activate(&current_activation, ep_address,
160 					  context_id);
161 	}
162 
163 	lfa_components[component_id].activation_pending = false;
164 
165 	return ret;
166 }
167 
lfa_prime(uint32_t component_id,uint64_t * flags)168 static int lfa_prime(uint32_t component_id, uint64_t *flags)
169 {
170 	int ret = LFA_SUCCESS;
171 	struct lfa_component_ops *activator;
172 
173 	if (lfa_component_count == 0U ||
174 	    !lfa_components[component_id].activation_pending) {
175 		return LFA_WRONG_STATE;
176 	}
177 
178 	/* Check if fw_seq_id is in range. */
179 	if (component_id >= lfa_component_count) {
180 		return LFA_INVALID_PARAMETERS;
181 	}
182 
183 	if (lfa_components[component_id].activator == NULL) {
184 		return LFA_NOT_SUPPORTED;
185 	}
186 
187 	switch (current_activation.prime_status) {
188 	case PRIME_NONE:
189 		current_activation.component_id = component_id;
190 		current_activation.prime_status = PRIME_STARTED;
191 		break;
192 
193 	case PRIME_STARTED:
194 		if (current_activation.component_id != component_id) {
195 			/* Mismatched component trying to continue PRIME - error */
196 			return LFA_WRONG_STATE;
197 		}
198 		break;
199 
200 	case PRIME_COMPLETE:
201 	default:
202 		break;
203 	}
204 
205 	/* Initialise the flags to start with. Only valid if ret=LFA_SUCCESS. */
206 	*flags = 0ULL;
207 
208 	ret = plat_lfa_load_auth_image(component_id);
209 	if (ret == 0) {
210 		activator = lfa_components[component_id].activator;
211 		if (activator->prime != NULL) {
212 			ret = activator->prime(&current_activation);
213 			if (ret != LFA_SUCCESS) {
214 				/*
215 				* TODO: it should be LFA_PRIME_FAILED but specification
216 				* has not define this error yet
217 				*/
218 				return ret;
219 			}
220 		}
221 
222 		current_activation.prime_status = PRIME_COMPLETE;
223 	}
224 
225 	/*
226 	 * Set lfa_flags to indicate that LFA_PRIME must be called again and
227 	 * reset ret to 0, as LFA_PRIME must return LFA_SUCCESS if it is
228 	 * incomplete.
229 	 */
230 	if (ret == -EAGAIN) {
231 		ret = 0;
232 		*flags = LFA_CALL_AGAIN;
233 	}
234 
235 	return convert_to_lfa_error(ret);
236 }
237 
lfa_is_prime_complete(uint32_t lfa_component_id)238 bool lfa_is_prime_complete(uint32_t lfa_component_id)
239 {
240 	if (lfa_component_id >= lfa_component_count) {
241 		return false;
242 	}
243 
244 	return (current_activation.component_id == lfa_component_id &&
245 		current_activation.prime_status == PRIME_COMPLETE &&
246 		lfa_components[lfa_component_id].activation_pending == true);
247 }
248 
lfa_setup(void)249 int lfa_setup(void)
250 {
251 	is_lfa_initialized = lfa_initialize_components();
252 	if (!is_lfa_initialized) {
253 		return -1;
254 	}
255 
256 	lfa_reset_activation();
257 
258 	return 0;
259 }
260 
lfa_smc_handler(uint32_t smc_fid,u_register_t x1,u_register_t x2,u_register_t x3,u_register_t x4,void * cookie,void * handle,u_register_t flags)261 uint64_t lfa_smc_handler(uint32_t smc_fid, u_register_t x1, u_register_t x2,
262 			 u_register_t x3, u_register_t x4, void *cookie,
263 			 void *handle, u_register_t flags)
264 {
265 	uint64_t retx1, retx2;
266 	uint64_t lfa_flags;
267 	uint8_t *uuid_p;
268 	uint32_t fw_seq_id = (uint32_t)x1;
269 	int ret;
270 
271 	/**
272 	 * TODO: Acquire serialization lock.
273 	 */
274 
275 	if (!is_lfa_initialized) {
276 		return LFA_NOT_SUPPORTED;
277 	}
278 
279 	switch (smc_fid) {
280 	case LFA_VERSION:
281 		SMC_RET1(handle, LFA_VERSION_VAL);
282 		break;
283 
284 	case LFA_FEATURES:
285 		SMC_RET1(handle, is_lfa_fid(x1) ? LFA_SUCCESS : LFA_NOT_SUPPORTED);
286 		break;
287 
288 	case LFA_GET_INFO:
289 		/**
290 		 * The current specification limits this input parameter to be zero for
291 		 * version 1.0 of LFA
292 		 */
293 		if (x1 == 0ULL) {
294 			SMC_RET3(handle, LFA_SUCCESS, lfa_component_count, 0);
295 		} else {
296 			SMC_RET1(handle, LFA_INVALID_PARAMETERS);
297 		}
298 		break;
299 
300 	case LFA_GET_INVENTORY:
301 		if (lfa_component_count == 0U) {
302 			SMC_RET1(handle, LFA_WRONG_STATE);
303 		}
304 
305 		/*
306 		 * Check if fw_seq_id is in range. LFA_GET_INFO must be called first to scan
307 		 * platform firmware and create a valid number of firmware components.
308 		 */
309 		if (fw_seq_id >= lfa_component_count) {
310 			SMC_RET1(handle, LFA_INVALID_PARAMETERS);
311 		}
312 
313 		/*
314 		 * grab the UUID of asked fw_seq_id and set the return UUID
315 		 * variables
316 		 */
317 		uuid_p = (uint8_t *)&lfa_components[fw_seq_id].uuid;
318 		memcpy(&retx1, uuid_p, sizeof(uint64_t));
319 		memcpy(&retx2, uuid_p + sizeof(uint64_t), sizeof(uint64_t));
320 
321 		/*
322 		 * check the given fw_seq_id's update available
323 		 * and accordingly set the active_pending flag
324 		 */
325 		lfa_components[fw_seq_id].activation_pending =
326 				is_plat_lfa_activation_pending(fw_seq_id);
327 
328 		INFO("Component %lu %s live activation:\n", x1,
329 		      lfa_components[fw_seq_id].activator ? "supports" :
330 		      "does not support");
331 
332 		if (lfa_components[fw_seq_id].activator != NULL) {
333 			INFO("Activation pending: %s\n",
334 			      lfa_components[fw_seq_id].activation_pending ? "true" : "false");
335 		}
336 
337 		INFO("x1 = 0x%016lx, x2 = 0x%016lx\n", retx1, retx2);
338 
339 		SMC_RET4(handle, LFA_SUCCESS, retx1, retx2, get_fw_activation_flags(fw_seq_id));
340 
341 		break;
342 
343 	case LFA_PRIME:
344 		/*
345 		 * Acquire lock to serialize PRIME operations across CPUs.
346 		 * This ensures that multiple PRIME calls to the same component
347 		 * do not execute concurrently, even if issued from different
348 		 * CPUs.
349 		 */
350 		if (!spin_trylock(&lfa_lock)) {
351 			SMC_RET1(handle, LFA_BUSY);
352 		}
353 
354 		ret = lfa_prime(x1, &lfa_flags);
355 
356 		spin_unlock(&lfa_lock);
357 
358 		if (ret != LFA_SUCCESS) {
359 			SMC_RET1(handle, ret);
360 		} else {
361 			SMC_RET2(handle, ret, lfa_flags);
362 		}
363 		break;
364 
365 	case LFA_ACTIVATE:
366 		ret = lfa_activate(fw_seq_id, x2, x3, x4);
367 		/* TODO: implement activate again */
368 		SMC_RET2(handle, ret, 0ULL);
369 
370 		break;
371 
372 	case LFA_CANCEL:
373 		ret = lfa_cancel(x1);
374 		SMC_RET1(handle, ret);
375 		break;
376 
377 	default:
378 		WARN("Unimplemented LFA Service Call: 0x%x\n", smc_fid);
379 		SMC_RET1(handle, SMC_UNK);
380 		break; /* unreachable */
381 
382 	}
383 
384 	SMC_RET1(handle, SMC_UNK);
385 
386 	return 0;
387 }
388