xref: /rk3399_ARM-atf/plat/amd/versal2/pm_service/pm_svc_main.c (revision 76d5d32fcf7e8859721e0d63a1ecc6b674a4ae0e)
1 /*
2  * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved.
3  * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 /*
9  * Top-level SMC handler for Versal Gen 2 power management calls and
10  * IPI setup functions for communication with PMC.
11  */
12 
13 #include <errno.h>
14 #include <stdbool.h>
15 
16 #include "../drivers/arm/gic/v3/gicv3_private.h"
17 
18 #include <common/ep_info.h>
19 #include <common/runtime_svc.h>
20 #include <drivers/arm/gicv3.h>
21 #include <lib/psci/psci.h>
22 #include <plat/arm/common/plat_arm.h>
23 #include <plat/common/platform.h>
24 
25 #include <plat_private.h>
26 #include "pm_api_sys.h"
27 #include "pm_client.h"
28 #include "pm_ipi.h"
29 #include "pm_svc_main.h"
30 
31 #define MODE				0x80000000U
32 
33 #define INVALID_SGI    0xFFU
34 #define PM_INIT_SUSPEND_CB	(30U)
35 #define PM_NOTIFY_CB		(32U)
36 #define EVENT_CPU_PWRDWN	(4U)
37 #define MBOX_SGI_SHARED_IPI	(7U)
38 
39 /**
40  * upper_32_bits - return bits 32-63 of a number
41  * @n: the number we're accessing
42  */
43 #define upper_32_bits(n)	((uint32_t)((n) >> 32U))
44 
45 /**
46  * lower_32_bits - return bits 0-31 of a number
47  * @n: the number we're accessing
48  */
49 #define lower_32_bits(n)	((uint32_t)((n) & 0xffffffffU))
50 
51 /**
52  * EXTRACT_SMC_ARGS - extracts 32-bit payloads from 64-bit SMC arguments
53  * @pm_arg: array of 32-bit payloads
54  * @x: array of 64-bit SMC arguments
55  */
56 #define EXTRACT_ARGS(pm_arg, x)						\
57 	for (uint32_t i = 0U; i < (PAYLOAD_ARG_CNT - 1U); i++) {	\
58 		if ((i % 2U) != 0U) {					\
59 			pm_arg[i] = lower_32_bits(x[(i / 2U) + 1U]);	\
60 		} else {						\
61 			pm_arg[i] = upper_32_bits(x[i / 2U]);		\
62 		}							\
63 	}
64 
65 /* 1 sec of wait timeout for secondary core down */
66 #define PWRDWN_WAIT_TIMEOUT	(1000U)
67 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_asgi1r_el1, S3_0_C12_C11_6)
68 
69 /* pm_up = true - UP, pm_up = false - DOWN */
70 static bool pm_up;
71 static uint32_t sgi = (uint32_t)INVALID_SGI;
72 static bool pwrdwn_req_received;
73 
74 bool pm_pwrdwn_req_status(void)
75 {
76 	return pwrdwn_req_received;
77 }
78 
79 static void notify_os(void)
80 {
81 	plat_ic_raise_ns_sgi((int)sgi, read_mpidr_el1());
82 }
83 
84 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags,
85 				       void *handle, void *cookie)
86 {
87 	uint32_t cpu_id = plat_my_core_pos();
88 
89 	VERBOSE("Powering down CPU %d\n", cpu_id);
90 
91 	/* Deactivate CPU power down SGI */
92 	plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR);
93 
94 	return (uint64_t) psci_cpu_off();
95 }
96 
97 /**
98  * raise_pwr_down_interrupt() - Callback function to raise SGI.
99  * @mpidr: MPIDR for the target CPU.
100  *
101  * Raise SGI interrupt to trigger the CPU power down sequence on all the
102  * online secondary cores.
103  */
104 static void raise_pwr_down_interrupt(u_register_t mpidr)
105 {
106 	plat_ic_raise_el3_sgi((int)CPU_PWR_DOWN_REQ_INTR, mpidr);
107 }
108 
109 void request_cpu_pwrdwn(void)
110 {
111 	int ret;
112 
113 	VERBOSE("CPU power down request received\n");
114 
115 	/* Send powerdown request to online secondary core(s) */
116 	ret = psci_stop_other_cores(plat_my_core_pos(), (unsigned int)PWRDWN_WAIT_TIMEOUT, raise_pwr_down_interrupt);
117 	if (ret != (int)PSCI_E_SUCCESS) {
118 		ERROR("Failed to powerdown secondary core(s)\n");
119 	}
120 
121 	/* Clear IPI IRQ */
122 	pm_ipi_irq_clear(primary_proc);
123 
124 	/* Deactivate IPI IRQ */
125 	plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ);
126 }
127 
128 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle,
129 				void *cookie)
130 {
131 	uint32_t payload[4] = {0};
132 	enum pm_ret_status ret;
133 	uint32_t ipi_status, i;
134 
135 	VERBOSE("Received IPI FIQ from firmware\n");
136 
137 	console_flush();
138 	(void)plat_ic_acknowledge_interrupt();
139 
140 	/* Check status register for each IPI except PMC */
141 	for (i = IPI_ID_APU; i <= IPI_ID_5; i++) {
142 		ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i);
143 
144 		/* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */
145 		if ((ipi_status & (uint32_t)IPI_MB_STATUS_RECV_PENDING) > (uint32_t) 0) {
146 			plat_ic_raise_ns_sgi((int)MBOX_SGI_SHARED_IPI, read_mpidr_el1());
147 			break;
148 		}
149 	}
150 
151 	/* If PMC has not generated interrupt then end ISR */
152 	ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC);
153 	if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) == (uint32_t)0) {
154 		plat_ic_end_of_interrupt(id);
155 		goto end;
156 	}
157 
158 	/* Handle PMC case */
159 	ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0);
160 	if (ret != PM_RET_SUCCESS) {
161 		payload[0] = (uint32_t) ret;
162 	}
163 
164 	switch (payload[0]) {
165 	case PM_INIT_SUSPEND_CB:
166 		if (sgi != INVALID_SGI) {
167 			notify_os();
168 		}
169 		break;
170 	case PM_NOTIFY_CB:
171 		if (sgi != INVALID_SGI) {
172 			if ((payload[2] == EVENT_CPU_PWRDWN) &&
173 			    (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) {
174 				if (pwrdwn_req_received) {
175 					pwrdwn_req_received = false;
176 					request_cpu_pwrdwn();
177 					(void)psci_cpu_off();
178 					break;
179 				} else {
180 					/* No action needed, added for MISRA
181 					 * complaince
182 					 */
183 				}
184 				pwrdwn_req_received = true;
185 
186 			} else {
187 				/* No action needed, added for MISRA
188 				 * complaince
189 				 */
190 			}
191 			notify_os();
192 		} else if ((payload[2] == EVENT_CPU_PWRDWN) &&
193 			  (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) {
194 			request_cpu_pwrdwn();
195 			(void)psci_cpu_off();
196 		} else {
197 			/* No action needed, added for MISRA
198 			 * complaince
199 			 */
200 		}
201 		break;
202 	case (uint32_t) PM_RET_ERROR_INVALID_CRC:
203 		pm_ipi_irq_clear(primary_proc);
204 		WARN("Invalid CRC in the payload\n");
205 		break;
206 
207 	default:
208 		pm_ipi_irq_clear(primary_proc);
209 		WARN("Invalid IPI payload\n");
210 		break;
211 	}
212 
213 	/* Clear FIQ */
214 	plat_ic_end_of_interrupt(id);
215 
216 end:
217 	return 0;
218 }
219 
220 /**
221  * pm_register_sgi() - PM register the IPI interrupt.
222  * @sgi_num: SGI number to be used for communication.
223  * @reset: Reset to invalid SGI when reset=1.
224  *
225  * Return: On success, the initialization function must return 0.
226  *         Any other return value will cause the framework to ignore
227  *         the service.
228  *
229  * Update the SGI number to be used.
230  *
231  */
232 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset)
233 {
234 	int32_t ret;
235 
236 	if (reset == 1U) {
237 		sgi = INVALID_SGI;
238 		ret = 0;
239 		goto end;
240 	}
241 
242 	if (sgi != INVALID_SGI) {
243 		ret = -EBUSY;
244 		goto end;
245 	}
246 
247 	if (sgi_num >= GICV3_MAX_SGI_TARGETS) {
248 		ret = -EINVAL;
249 		goto end;
250 	}
251 
252 	sgi = (uint32_t)sgi_num;
253 	ret = 0;
254 end:
255 	return ret;
256 }
257 
258 /**
259  * pm_setup() - PM service setup.
260  *
261  * Return: On success, the initialization function must return 0.
262  *         Any other return value will cause the framework to ignore
263  *         the service.
264  *
265  * Initialization functions for Versal power management for
266  * communicaton with PMC.
267  *
268  * Called from sip_svc_setup initialization function with the
269  * rt_svc_init signature.
270  *
271  */
272 int32_t pm_setup(void)
273 {
274 	int32_t ret = 0;
275 
276 	pm_ipi_init(primary_proc);
277 	pm_up = true;
278 	pwrdwn_req_received = false;
279 
280 	/* register SGI handler for CPU power down request */
281 	ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler);
282 	if (ret != 0) {
283 		WARN("BL31: registering SGI interrupt failed\n");
284 	}
285 
286 	/*
287 	 * Enable IPI IRQ
288 	 * assume the rich OS is OK to handle callback IRQs now.
289 	 * Even if we were wrong, it would not enable the IRQ in
290 	 * the GIC.
291 	 */
292 	pm_ipi_irq_enable(primary_proc);
293 
294 	ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler);
295 	if (ret != 0) {
296 		WARN("BL31: registering IPI interrupt failed\n");
297 	}
298 
299 	gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE);
300 
301 	/* Register for idle callback during force power down/restart */
302 	ret = (int32_t)pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN,
303 					    0x0U, 0x1U, SECURE);
304 	if (ret != 0) {
305 		WARN("BL31: registering idle callback for restart/force power down failed\n");
306 	}
307 
308 	return ret;
309 }
310 
311 /**
312  * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI.
313  * @api_id: identifier for the API being called.
314  * @pm_arg: pointer to the argument data for the API call.
315  * @handle: Pointer to caller's context structure.
316  * @security_flag: SECURE or NON_SECURE.
317  *
318  * These EEMI APIs performs CPU specific power management tasks.
319  * These EEMI APIs are invoked either from PSCI or from debugfs in kernel.
320  * These calls require CPU specific processing before sending IPI request to
321  * Platform Management Controller. For example enable/disable CPU specific
322  * interrupts. This requires separate handler for these calls and may not be
323  * handled using common eemi handler.
324  *
325  * Return: If EEMI API found then, uintptr_t type address, else 0.
326  *
327  */
328 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, uint32_t *pm_arg,
329 					   void *handle, uint32_t security_flag)
330 {
331 	enum pm_ret_status ret;
332 	uint32_t pm_api_id = api_id & API_ID_MASK;
333 
334 	switch (pm_api_id) {
335 
336 	case (uint32_t)PM_SELF_SUSPEND:
337 		ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
338 				      pm_arg[3], security_flag);
339 		SMC_RET1(handle, (u_register_t)ret);
340 
341 	case (uint32_t)PM_FORCE_POWERDOWN:
342 		ret = pm_force_powerdown(pm_arg[0], (uint8_t)pm_arg[1], security_flag);
343 		SMC_RET1(handle, (u_register_t)ret);
344 
345 	case (uint32_t)PM_SYSTEM_SHUTDOWN:
346 		ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag);
347 		SMC_RET1(handle, (u_register_t)ret);
348 
349 	default:
350 		return (uintptr_t)0;
351 	}
352 }
353 
354 /**
355  * TF_A_specific_handler() - SMC handler for TF-A specific functionality.
356  * @api_id: identifier for the API being called.
357  * @pm_arg: pointer to the argument data for the API call.
358  * @handle: Pointer to caller's context structure.
359  * @security_flag: SECURE or NON_SECURE.
360  *
361  * These EEMI calls performs functionality that does not require
362  * IPI transaction. The handler ends in TF-A and returns requested data to
363  * kernel from TF-A.
364  *
365  * Return: If TF-A specific API found then, uintptr_t type address, else 0
366  *
367  */
368 static uintptr_t TF_A_specific_handler(uint32_t api_id, uint32_t *pm_arg,
369 				       void *handle, uint32_t security_flag)
370 {
371 	switch (api_id) {
372 
373 	case TF_A_FEATURE_CHECK:
374 	{
375 		enum pm_ret_status ret;
376 		uint32_t result[PAYLOAD_ARG_CNT] = {0U};
377 
378 		ret = tfa_api_feature_check(pm_arg[0], result);
379 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U));
380 	}
381 
382 	case TF_A_PM_REGISTER_SGI:
383 	{
384 		int32_t ret;
385 
386 		ret = pm_register_sgi(pm_arg[0], pm_arg[1]);
387 		if (ret != 0) {
388 			SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS);
389 		}
390 
391 		SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS);
392 	}
393 
394 	case PM_GET_CALLBACK_DATA:
395 	{
396 		uint32_t result[4] = {0};
397 		enum pm_ret_status ret;
398 
399 		ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U);
400 		if (ret != PM_RET_SUCCESS) {
401 			result[0] = (uint32_t) ret;
402 		}
403 
404 		SMC_RET2(handle,
405 			(uint64_t)result[0] | ((uint64_t)result[1] << 32U),
406 			(uint64_t)result[2] | ((uint64_t)result[3] << 32U));
407 	}
408 
409 	case PM_GET_TRUSTZONE_VERSION:
410 		SMC_RET1(handle, ((uint64_t)PM_RET_SUCCESS) |
411 			 (((uint64_t)TZ_VERSION) << 32U));
412 
413 	default:
414 		return (uintptr_t)0U;
415 	}
416 }
417 
418 /**
419  * eemi_api_handler() - Prepare EEMI payload and perform IPI transaction.
420  * @api_id: identifier for the API being called.
421  * @pm_arg: pointer to the argument data for the API call.
422  * @handle: Pointer to caller's context structure.
423  * @security_flag: SECURE or NON_SECURE.
424  *
425  * EEMI - Embedded Energy Management Interface is AMD-Xilinx proprietary
426  * protocol to allow communication between power management controller and
427  * different processing clusters.
428  *
429  * This handler prepares EEMI protocol payload received from kernel and performs
430  * IPI transaction.
431  *
432  * Return: If EEMI API found then, uintptr_t type address, else 0
433  */
434 static uintptr_t eemi_api_handler(uint32_t api_id, const uint32_t *pm_arg,
435 				  void *handle, uint32_t security_flag)
436 {
437 	enum pm_ret_status ret;
438 	uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U};
439 	uint32_t payload[PAYLOAD_ARG_CNT] = {0U};
440 	uint32_t module_id;
441 
442 	module_id = (api_id & MODULE_ID_MASK) >> 8U;
443 
444 	PM_PACK_PAYLOAD7(payload, module_id, security_flag, api_id,
445 			 pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3],
446 			 pm_arg[4], pm_arg[5]);
447 
448 	ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf,
449 			       RET_PAYLOAD_ARG_CNT);
450 
451 	SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
452 		 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U),
453 		 (uint64_t)buf[3] | ((uint64_t)buf[4] << 32U),
454 		 (uint64_t)buf[5]);
455 }
456 
457 /**
458  * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
459  * @smc_fid: Function Identifier.
460  * @x1: SMC64 Arguments from kernel.
461  * @x2: SMC64 Arguments from kernel.
462  * @x3: SMC64 Arguments from kernel (upper 32-bits).
463  * @x4: Unused.
464  * @cookie: Unused.
465  * @handle: Pointer to caller's context structure.
466  * @flags: SECURE or NON_SECURE.
467  *
468  * Return: Unused.
469  *
470  * Determines that smc_fid is valid and supported PM SMC Function ID from the
471  * list of pm_api_ids, otherwise completes the request with
472  * the unknown SMC Function ID.
473  *
474  * The SMC calls for PM service are forwarded from SIP Service SMC handler
475  * function with rt_svc_handle signature.
476  *
477  */
478 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
479 			uint64_t x4, const void *cookie, void *handle, uint64_t flags)
480 {
481 	uintptr_t ret;
482 	uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0};
483 	uint32_t security_flag = NON_SECURE;
484 	uint32_t api_id;
485 	bool status = false, status_tmp = false;
486 	uint64_t x[4] = {x1, x2, x3, x4};
487 
488 	/* Handle case where PM wasn't initialized properly */
489 	if (pm_up == false) {
490 		SMC_RET1(handle, SMC_UNK);
491 	}
492 
493 	/*
494 	 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0)
495 	 * if smc called is secure
496 	 *
497 	 * Add redundant macro call to immune the code from glitches
498 	 */
499 	SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags);
500 	if ((status != false) && (status_tmp != false)) {
501 		security_flag = SECURE;
502 	}
503 
504 	if ((smc_fid & FUNCID_NUM_MASK) == PASS_THROUGH_FW_CMD_ID) {
505 		api_id = lower_32_bits(x[0]);
506 
507 		EXTRACT_ARGS(pm_arg, x);
508 
509 		ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle, (uint32_t)flags);
510 		if (ret !=  (uintptr_t)0) {
511 			return ret;
512 		}
513 
514 		return eemi_api_handler(api_id, pm_arg, handle, security_flag);
515 	}
516 
517 	pm_arg[0] = (uint32_t)x1;
518 	pm_arg[1] = (uint32_t)(x1 >> 32U);
519 	pm_arg[2] = (uint32_t)x2;
520 	pm_arg[3] = (uint32_t)(x2 >> 32U);
521 	pm_arg[4] = (uint32_t)x3;
522 	(void)(x4);
523 	api_id = smc_fid & FUNCID_NUM_MASK;
524 
525 	ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag);
526 
527 	return ret;
528 }
529