xref: /rk3399_ARM-atf/plat/amd/versal2/pm_service/pm_svc_main.c (revision d8fdff38b544b79c4f0b757e3b3c82ce9c8a2f9e)
1 /*
2  * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved.
3  * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 /*
9  * Top-level SMC handler for Versal2 power management calls and
10  * IPI setup functions for communication with PMC.
11  */
12 
13 #include <errno.h>
14 #include <stdbool.h>
15 
16 #include "../drivers/arm/gic/v3/gicv3_private.h"
17 
18 #include <common/runtime_svc.h>
19 #include <drivers/arm/gicv3.h>
20 #include <lib/psci/psci.h>
21 #include <plat/arm/common/plat_arm.h>
22 #include <plat/common/platform.h>
23 
24 #include <plat_private.h>
25 #include "pm_api_sys.h"
26 #include "pm_client.h"
27 #include "pm_ipi.h"
28 #include "pm_svc_main.h"
29 
30 #define MODE				0x80000000U
31 
32 #define INVALID_SGI    0xFFU
33 #define PM_INIT_SUSPEND_CB	(30U)
34 #define PM_NOTIFY_CB		(32U)
35 #define EVENT_CPU_PWRDWN	(4U)
36 #define MBOX_SGI_SHARED_IPI	(7U)
37 
38 /**
39  * upper_32_bits - return bits 32-63 of a number
40  * @n: the number we're accessing
41  */
42 #define upper_32_bits(n)	((uint32_t)((n) >> 32U))
43 
44 /**
45  * lower_32_bits - return bits 0-31 of a number
46  * @n: the number we're accessing
47  */
48 #define lower_32_bits(n)	((uint32_t)((n) & 0xffffffffU))
49 
50 /**
51  * EXTRACT_SMC_ARGS - extracts 32-bit payloads from 64-bit SMC arguments
52  * @pm_arg: array of 32-bit payloads
53  * @x: array of 64-bit SMC arguments
54  */
55 #define EXTRACT_ARGS(pm_arg, x)						\
56 	for (uint32_t i = 0U; i < (PAYLOAD_ARG_CNT - 1U); i++) {	\
57 		if ((i % 2U) != 0U) {					\
58 			pm_arg[i] = lower_32_bits(x[(i / 2U) + 1U]);	\
59 		} else {						\
60 			pm_arg[i] = upper_32_bits(x[i / 2U]);		\
61 		}							\
62 	}
63 
64 /* 1 sec of wait timeout for secondary core down */
65 #define PWRDWN_WAIT_TIMEOUT	(1000U)
66 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_asgi1r_el1, S3_0_C12_C11_6)
67 
68 /* pm_up = true - UP, pm_up = false - DOWN */
69 static bool pm_up;
70 static uint32_t sgi = (uint32_t)INVALID_SGI;
71 bool pwrdwn_req_received;
72 
73 static void notify_os(void)
74 {
75 	plat_ic_raise_ns_sgi((int)sgi, read_mpidr_el1());
76 }
77 
78 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags,
79 				       void *handle, void *cookie)
80 {
81 	uint32_t cpu_id = plat_my_core_pos();
82 
83 	VERBOSE("Powering down CPU %d\n", cpu_id);
84 
85 	/* Deactivate CPU power down SGI */
86 	plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR);
87 
88 	return (uint64_t) psci_cpu_off();
89 }
90 
91 /**
92  * raise_pwr_down_interrupt() - Callback function to raise SGI.
93  * @mpidr: MPIDR for the target CPU.
94  *
95  * Raise SGI interrupt to trigger the CPU power down sequence on all the
96  * online secondary cores.
97  */
98 static void raise_pwr_down_interrupt(u_register_t mpidr)
99 {
100 	plat_ic_raise_el3_sgi((int)CPU_PWR_DOWN_REQ_INTR, mpidr);
101 }
102 
103 void request_cpu_pwrdwn(void)
104 {
105 	int ret;
106 
107 	VERBOSE("CPU power down request received\n");
108 
109 	/* Send powerdown request to online secondary core(s) */
110 	ret = psci_stop_other_cores(plat_my_core_pos(), (unsigned int)PWRDWN_WAIT_TIMEOUT, raise_pwr_down_interrupt);
111 	if (ret != (int)PSCI_E_SUCCESS) {
112 		ERROR("Failed to powerdown secondary core(s)\n");
113 	}
114 
115 	/* Clear IPI IRQ */
116 	pm_ipi_irq_clear(primary_proc);
117 
118 	/* Deactivate IPI IRQ */
119 	plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ);
120 }
121 
122 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle,
123 				void *cookie)
124 {
125 	uint32_t payload[4] = {0};
126 	enum pm_ret_status ret;
127 	uint32_t ipi_status, i;
128 
129 	VERBOSE("Received IPI FIQ from firmware\n");
130 
131 	console_flush();
132 	(void)plat_ic_acknowledge_interrupt();
133 
134 	/* Check status register for each IPI except PMC */
135 	for (i = IPI_ID_APU; i <= IPI_ID_5; i++) {
136 		ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i);
137 
138 		/* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */
139 		if ((ipi_status & (uint32_t)IPI_MB_STATUS_RECV_PENDING) > (uint32_t) 0) {
140 			plat_ic_raise_ns_sgi((int)MBOX_SGI_SHARED_IPI, read_mpidr_el1());
141 			break;
142 		}
143 	}
144 
145 	/* If PMC has not generated interrupt then end ISR */
146 	ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC);
147 	if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) == (uint32_t)0) {
148 		plat_ic_end_of_interrupt(id);
149 		goto end;
150 	}
151 
152 	/* Handle PMC case */
153 	ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0);
154 	if (ret != PM_RET_SUCCESS) {
155 		payload[0] = (uint32_t) ret;
156 	}
157 
158 	switch (payload[0]) {
159 	case PM_INIT_SUSPEND_CB:
160 		if (sgi != INVALID_SGI) {
161 			notify_os();
162 		}
163 		break;
164 	case PM_NOTIFY_CB:
165 		if (sgi != INVALID_SGI) {
166 			if ((payload[2] == EVENT_CPU_PWRDWN) &&
167 			    (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) {
168 				if (pwrdwn_req_received) {
169 					pwrdwn_req_received = false;
170 					request_cpu_pwrdwn();
171 					(void)psci_cpu_off();
172 					break;
173 				} else {
174 					/* No action needed, added for MISRA
175 					 * complaince
176 					 */
177 				}
178 				pwrdwn_req_received = true;
179 
180 			} else {
181 				/* No action needed, added for MISRA
182 				 * complaince
183 				 */
184 			}
185 			notify_os();
186 		} else if ((payload[2] == EVENT_CPU_PWRDWN) &&
187 			  (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) {
188 			request_cpu_pwrdwn();
189 			(void)psci_cpu_off();
190 		} else {
191 			/* No action needed, added for MISRA
192 			 * complaince
193 			 */
194 		}
195 		break;
196 	case (uint32_t) PM_RET_ERROR_INVALID_CRC:
197 		pm_ipi_irq_clear(primary_proc);
198 		WARN("Invalid CRC in the payload\n");
199 		break;
200 
201 	default:
202 		pm_ipi_irq_clear(primary_proc);
203 		WARN("Invalid IPI payload\n");
204 		break;
205 	}
206 
207 	/* Clear FIQ */
208 	plat_ic_end_of_interrupt(id);
209 
210 end:
211 	return 0;
212 }
213 
214 /**
215  * pm_register_sgi() - PM register the IPI interrupt.
216  * @sgi_num: SGI number to be used for communication.
217  * @reset: Reset to invalid SGI when reset=1.
218  *
219  * Return: On success, the initialization function must return 0.
220  *         Any other return value will cause the framework to ignore
221  *         the service.
222  *
223  * Update the SGI number to be used.
224  *
225  */
226 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset)
227 {
228 	int32_t ret;
229 
230 	if (reset == 1U) {
231 		sgi = INVALID_SGI;
232 		ret = 0;
233 		goto end;
234 	}
235 
236 	if (sgi != INVALID_SGI) {
237 		ret = -EBUSY;
238 		goto end;
239 	}
240 
241 	if (sgi_num >= GICV3_MAX_SGI_TARGETS) {
242 		ret = -EINVAL;
243 		goto end;
244 	}
245 
246 	sgi = (uint32_t)sgi_num;
247 	ret = 0;
248 end:
249 	return ret;
250 }
251 
252 /**
253  * pm_setup() - PM service setup.
254  *
255  * Return: On success, the initialization function must return 0.
256  *         Any other return value will cause the framework to ignore
257  *         the service.
258  *
259  * Initialization functions for Versal power management for
260  * communicaton with PMC.
261  *
262  * Called from sip_svc_setup initialization function with the
263  * rt_svc_init signature.
264  *
265  */
266 int32_t pm_setup(void)
267 {
268 	int32_t ret = 0;
269 
270 	pm_ipi_init(primary_proc);
271 	pm_up = true;
272 
273 	/* register SGI handler for CPU power down request */
274 	ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler);
275 	if (ret != 0) {
276 		WARN("BL31: registering SGI interrupt failed\n");
277 	}
278 
279 	/*
280 	 * Enable IPI IRQ
281 	 * assume the rich OS is OK to handle callback IRQs now.
282 	 * Even if we were wrong, it would not enable the IRQ in
283 	 * the GIC.
284 	 */
285 	pm_ipi_irq_enable(primary_proc);
286 
287 	ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler);
288 	if (ret != 0) {
289 		WARN("BL31: registering IPI interrupt failed\n");
290 	}
291 
292 	gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE);
293 
294 	/* Register for idle callback during force power down/restart */
295 	ret = (int32_t)pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN,
296 				   0x0U, 0x1U, SECURE_FLAG);
297 	if (ret != 0) {
298 		WARN("BL31: registering idle callback for restart/force power down failed\n");
299 	}
300 
301 	return ret;
302 }
303 
304 /**
305  * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI.
306  * @api_id: identifier for the API being called.
307  * @pm_arg: pointer to the argument data for the API call.
308  * @handle: Pointer to caller's context structure.
309  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
310  *
311  * These EEMI APIs performs CPU specific power management tasks.
312  * These EEMI APIs are invoked either from PSCI or from debugfs in kernel.
313  * These calls require CPU specific processing before sending IPI request to
314  * Platform Management Controller. For example enable/disable CPU specific
315  * interrupts. This requires separate handler for these calls and may not be
316  * handled using common eemi handler.
317  *
318  * Return: If EEMI API found then, uintptr_t type address, else 0.
319  *
320  */
321 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, uint32_t *pm_arg,
322 					   void *handle, uint32_t security_flag)
323 {
324 	enum pm_ret_status ret;
325 
326 	switch (api_id) {
327 
328 	case (uint32_t)PM_SELF_SUSPEND:
329 		ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
330 				      pm_arg[3], security_flag);
331 		SMC_RET1(handle, (u_register_t)ret);
332 
333 	case (uint32_t)PM_FORCE_POWERDOWN:
334 		ret = pm_force_powerdown(pm_arg[0], (uint8_t)pm_arg[1], security_flag);
335 		SMC_RET1(handle, (u_register_t)ret);
336 
337 	case (uint32_t)PM_REQ_SUSPEND:
338 		ret = pm_req_suspend(pm_arg[0], (uint8_t)pm_arg[1], pm_arg[2],
339 				     pm_arg[3], security_flag);
340 		SMC_RET1(handle, (u_register_t)ret);
341 
342 	case (uint32_t)PM_ABORT_SUSPEND:
343 		ret = pm_abort_suspend(pm_arg[0], security_flag);
344 		SMC_RET1(handle, (u_register_t)ret);
345 
346 	case (uint32_t)PM_SYSTEM_SHUTDOWN:
347 		ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag);
348 		SMC_RET1(handle, (u_register_t)ret);
349 
350 	default:
351 		return (uintptr_t)0;
352 	}
353 }
354 
355 /**
356  * TF_A_specific_handler() - SMC handler for TF-A specific functionality.
357  * @api_id: identifier for the API being called.
358  * @pm_arg: pointer to the argument data for the API call.
359  * @handle: Pointer to caller's context structure.
360  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
361  *
362  * These EEMI calls performs functionality that does not require
363  * IPI transaction. The handler ends in TF-A and returns requested data to
364  * kernel from TF-A.
365  *
366  * Return: If TF-A specific API found then, uintptr_t type address, else 0
367  *
368  */
369 static uintptr_t TF_A_specific_handler(uint32_t api_id, uint32_t *pm_arg,
370 				       void *handle, uint32_t security_flag)
371 {
372 	switch (api_id) {
373 
374 	case TF_A_FEATURE_CHECK:
375 	{
376 		enum pm_ret_status ret;
377 		uint32_t result[PAYLOAD_ARG_CNT] = {0U};
378 
379 		ret = eemi_feature_check(pm_arg[0], result);
380 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U));
381 	}
382 
383 	case TF_A_PM_REGISTER_SGI:
384 	{
385 		int32_t ret;
386 
387 		ret = pm_register_sgi(pm_arg[0], pm_arg[1]);
388 		if (ret != 0) {
389 			SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS);
390 		}
391 
392 		SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS);
393 	}
394 
395 	case PM_GET_CALLBACK_DATA:
396 	{
397 		uint32_t result[4] = {0};
398 		enum pm_ret_status ret;
399 
400 		ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U);
401 		if (ret != PM_RET_SUCCESS) {
402 			result[0] = (uint32_t) ret;
403 		}
404 
405 		SMC_RET2(handle,
406 			(uint64_t)result[0] | ((uint64_t)result[1] << 32U),
407 			(uint64_t)result[2] | ((uint64_t)result[3] << 32U));
408 	}
409 
410 	case PM_GET_TRUSTZONE_VERSION:
411 		SMC_RET1(handle, ((uint64_t)PM_RET_SUCCESS) |
412 			 (((uint64_t)TZ_VERSION) << 32U));
413 
414 	default:
415 		return (uintptr_t)0U;
416 	}
417 }
418 
419 /**
420  * eemi_api_handler() - Prepare EEMI payload and perform IPI transaction.
421  * @api_id: identifier for the API being called.
422  * @pm_arg: pointer to the argument data for the API call.
423  * @handle: Pointer to caller's context structure.
424  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
425  *
426  * EEMI - Embedded Energy Management Interface is AMD-Xilinx proprietary
427  * protocol to allow communication between power management controller and
428  * different processing clusters.
429  *
430  * This handler prepares EEMI protocol payload received from kernel and performs
431  * IPI transaction.
432  *
433  * Return: If EEMI API found then, uintptr_t type address, else 0
434  */
435 static uintptr_t eemi_api_handler(uint32_t api_id, const uint32_t *pm_arg,
436 				  void *handle, uint32_t security_flag)
437 {
438 	enum pm_ret_status ret;
439 	uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U};
440 	uint32_t payload[PAYLOAD_ARG_CNT] = {0U};
441 	uint32_t module_id;
442 
443 	module_id = (api_id & MODULE_ID_MASK) >> 8U;
444 
445 	PM_PACK_PAYLOAD7(payload, module_id, security_flag, api_id,
446 			 pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3],
447 			 pm_arg[4], pm_arg[5]);
448 
449 	ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf,
450 			       RET_PAYLOAD_ARG_CNT);
451 
452 	SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
453 		 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U),
454 		 (uint64_t)buf[3] | ((uint64_t)buf[4] << 32U),
455 		 (uint64_t)buf[5]);
456 }
457 
458 /**
459  * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
460  * @smc_fid: Function Identifier.
461  * @x1: SMC64 Arguments from kernel.
462  * @x2: SMC64 Arguments from kernel.
463  * @x3: SMC64 Arguments from kernel (upper 32-bits).
464  * @x4: Unused.
465  * @cookie: Unused.
466  * @handle: Pointer to caller's context structure.
467  * @flags: SECURE_FLAG or NON_SECURE_FLAG.
468  *
469  * Return: Unused.
470  *
471  * Determines that smc_fid is valid and supported PM SMC Function ID from the
472  * list of pm_api_ids, otherwise completes the request with
473  * the unknown SMC Function ID.
474  *
475  * The SMC calls for PM service are forwarded from SIP Service SMC handler
476  * function with rt_svc_handle signature.
477  *
478  */
479 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
480 			uint64_t x4, const void *cookie, void *handle, uint64_t flags)
481 {
482 	uintptr_t ret;
483 	uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0};
484 	uint32_t security_flag = NON_SECURE_FLAG;
485 	uint32_t api_id;
486 	bool status = false, status_tmp = false;
487 	uint64_t x[4] = {x1, x2, x3, x4};
488 
489 	/* Handle case where PM wasn't initialized properly */
490 	if (pm_up == false) {
491 		SMC_RET1(handle, SMC_UNK);
492 	}
493 
494 	/*
495 	 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0)
496 	 * if smc called is secure
497 	 *
498 	 * Add redundant macro call to immune the code from glitches
499 	 */
500 	SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags);
501 	if ((status != false) && (status_tmp != false)) {
502 		security_flag = SECURE_FLAG;
503 	}
504 
505 	if ((smc_fid & FUNCID_NUM_MASK) == PASS_THROUGH_FW_CMD_ID) {
506 		api_id = lower_32_bits(x[0]);
507 
508 		EXTRACT_ARGS(pm_arg, x);
509 
510 		return eemi_api_handler(api_id, pm_arg, handle, security_flag);
511 	}
512 
513 	pm_arg[0] = (uint32_t)x1;
514 	pm_arg[1] = (uint32_t)(x1 >> 32U);
515 	pm_arg[2] = (uint32_t)x2;
516 	pm_arg[3] = (uint32_t)(x2 >> 32U);
517 	pm_arg[4] = (uint32_t)x3;
518 	(void)(x4);
519 	api_id = smc_fid & FUNCID_NUM_MASK;
520 
521 	ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle, (uint32_t)flags);
522 	if (ret !=  (uintptr_t)0)
523 		goto error;
524 
525 	ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag);
526 	if (ret !=  (uintptr_t)0)
527 		goto error;
528 
529 error:
530 	return ret;
531 }
532