xref: /rk3399_ARM-atf/plat/xilinx/common/pm_service/pm_svc_main.c (revision 7c4e1eea61a32291a6640070418e07ab98b42442)
1 /*
2  * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved.
3  * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 /*
9  * Top-level SMC handler for Versal power management calls and
10  * IPI setup functions for communication with PMC.
11  */
12 
13 #include <errno.h>
14 #include <stdbool.h>
15 
16 #include "../drivers/arm/gic/v3/gicv3_private.h"
17 
18 #include <common/runtime_svc.h>
19 #include <drivers/arm/gicv3.h>
20 #include <lib/psci/psci.h>
21 #include <plat/arm/common/plat_arm.h>
22 #include <plat/common/platform.h>
23 
24 #include <plat_private.h>
25 #include "pm_api_sys.h"
26 #include "pm_client.h"
27 #include "pm_ipi.h"
28 #include "pm_svc_main.h"
29 
30 #define MODE				0x80000000U
31 
32 #define XSCUGIC_SGIR_EL1_INITID_SHIFT    24U
33 #define INVALID_SGI    0xFFU
34 #define PM_INIT_SUSPEND_CB	(30U)
35 #define PM_NOTIFY_CB		(32U)
36 #define EVENT_CPU_PWRDWN	(4U)
37 #define MBOX_SGI_SHARED_IPI	(7U)
38 
39 /* 1 sec of wait timeout for secondary core down */
40 #define PWRDWN_WAIT_TIMEOUT	(1000U)
41 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_asgi1r_el1, S3_0_C12_C11_6)
42 
43 /* pm_up = true - UP, pm_up = false - DOWN */
44 static bool pm_up;
45 static uint32_t sgi = (uint32_t)INVALID_SGI;
46 bool pwrdwn_req_received;
47 
48 static void notify_os(void)
49 {
50 	plat_ic_raise_ns_sgi(sgi, read_mpidr_el1());
51 }
52 
53 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags,
54 				       void *handle, void *cookie)
55 {
56 	uint32_t cpu_id = plat_my_core_pos();
57 
58 	VERBOSE("Powering down CPU %d\n", cpu_id);
59 
60 	/* Deactivate CPU power down SGI */
61 	plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR);
62 
63 	return psci_cpu_off();
64 }
65 
66 /**
67  * raise_pwr_down_interrupt() - Callback function to raise SGI.
68  * @mpidr: MPIDR for the target CPU.
69  *
70  * Raise SGI interrupt to trigger the CPU power down sequence on all the
71  * online secondary cores.
72  */
73 static void raise_pwr_down_interrupt(u_register_t mpidr)
74 {
75 	plat_ic_raise_el3_sgi(CPU_PWR_DOWN_REQ_INTR, mpidr);
76 }
77 
78 void request_cpu_pwrdwn(void)
79 {
80 	enum pm_ret_status ret;
81 
82 	VERBOSE("CPU power down request received\n");
83 
84 	/* Send powerdown request to online secondary core(s) */
85 	ret = psci_stop_other_cores(PWRDWN_WAIT_TIMEOUT, raise_pwr_down_interrupt);
86 	if (ret != PSCI_E_SUCCESS) {
87 		ERROR("Failed to powerdown secondary core(s)\n");
88 	}
89 
90 	/* Clear IPI IRQ */
91 	pm_ipi_irq_clear(primary_proc);
92 
93 	/* Deactivate IPI IRQ */
94 	plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ);
95 }
96 
97 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle,
98 				void *cookie)
99 {
100 	uint32_t payload[4] = {0};
101 	enum pm_ret_status ret;
102 	int ipi_status, i;
103 
104 	VERBOSE("Received IPI FIQ from firmware\n");
105 
106 	console_flush();
107 	(void)plat_ic_acknowledge_interrupt();
108 
109 	/* Check status register for each IPI except PMC */
110 	for (i = IPI_ID_APU; i <= IPI_ID_5; i++) {
111 		ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i);
112 
113 		/* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */
114 		if (ipi_status & IPI_MB_STATUS_RECV_PENDING) {
115 			plat_ic_raise_ns_sgi(MBOX_SGI_SHARED_IPI, read_mpidr_el1());
116 			break;
117 		}
118 	}
119 
120 	/* If PMC has not generated interrupt then end ISR */
121 	ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC);
122 	if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) == 0) {
123 		plat_ic_end_of_interrupt(id);
124 		return 0;
125 	}
126 
127 	/* Handle PMC case */
128 	ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0);
129 	if (ret != PM_RET_SUCCESS) {
130 		payload[0] = ret;
131 	}
132 
133 	switch (payload[0]) {
134 	case PM_INIT_SUSPEND_CB:
135 		if (sgi != INVALID_SGI) {
136 			notify_os();
137 		}
138 		break;
139 	case PM_NOTIFY_CB:
140 		if (sgi != INVALID_SGI) {
141 			if (payload[2] == EVENT_CPU_PWRDWN) {
142 				if (pwrdwn_req_received) {
143 					pwrdwn_req_received = false;
144 					request_cpu_pwrdwn();
145 					(void)psci_cpu_off();
146 					break;
147 				} else {
148 					pwrdwn_req_received = true;
149 				}
150 			}
151 			notify_os();
152 		} else if (payload[2] == EVENT_CPU_PWRDWN) {
153 			request_cpu_pwrdwn();
154 			(void)psci_cpu_off();
155 		}
156 		break;
157 	case PM_RET_ERROR_INVALID_CRC:
158 		pm_ipi_irq_clear(primary_proc);
159 		WARN("Invalid CRC in the payload\n");
160 		break;
161 
162 	default:
163 		pm_ipi_irq_clear(primary_proc);
164 		WARN("Invalid IPI payload\n");
165 		break;
166 	}
167 
168 	/* Clear FIQ */
169 	plat_ic_end_of_interrupt(id);
170 
171 	return 0;
172 }
173 
174 /**
175  * pm_register_sgi() - PM register the IPI interrupt.
176  * @sgi_num: SGI number to be used for communication.
177  * @reset: Reset to invalid SGI when reset=1.
178  *
179  * Return: On success, the initialization function must return 0.
180  *         Any other return value will cause the framework to ignore
181  *         the service.
182  *
183  * Update the SGI number to be used.
184  *
185  */
186 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset)
187 {
188 	if (reset == 1U) {
189 		sgi = INVALID_SGI;
190 		return 0;
191 	}
192 
193 	if (sgi != INVALID_SGI) {
194 		return -EBUSY;
195 	}
196 
197 	if (sgi_num >= GICV3_MAX_SGI_TARGETS) {
198 		return -EINVAL;
199 	}
200 
201 	sgi = (uint32_t)sgi_num;
202 	return 0;
203 }
204 
205 /**
206  * pm_setup() - PM service setup.
207  *
208  * Return: On success, the initialization function must return 0.
209  *         Any other return value will cause the framework to ignore
210  *         the service.
211  *
212  * Initialization functions for Versal power management for
213  * communicaton with PMC.
214  *
215  * Called from sip_svc_setup initialization function with the
216  * rt_svc_init signature.
217  *
218  */
219 int32_t pm_setup(void)
220 {
221 	int32_t ret = 0;
222 
223 	pm_ipi_init(primary_proc);
224 	pm_up = true;
225 
226 	/* register SGI handler for CPU power down request */
227 	ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler);
228 	if (ret != 0) {
229 		WARN("BL31: registering SGI interrupt failed\n");
230 	}
231 
232 	/*
233 	 * Enable IPI IRQ
234 	 * assume the rich OS is OK to handle callback IRQs now.
235 	 * Even if we were wrong, it would not enable the IRQ in
236 	 * the GIC.
237 	 */
238 	pm_ipi_irq_enable(primary_proc);
239 
240 	ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler);
241 	if (ret != 0) {
242 		WARN("BL31: registering IPI interrupt failed\n");
243 	}
244 
245 	gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE);
246 
247 	/* Register for idle callback during force power down/restart */
248 	ret = pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN,
249 				   0x0U, 0x1U, SECURE_FLAG);
250 	if (ret != 0) {
251 		WARN("BL31: registering idle callback for restart/force power down failed\n");
252 	}
253 
254 	return ret;
255 }
256 
257 /**
258  * eemi_for_compatibility() - EEMI calls handler for deprecated calls.
259  * @api_id: identifier for the API being called.
260  * @pm_arg: pointer to the argument data for the API call.
261  * @handle: Pointer to caller's context structure.
262  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
263  *
264  * Return: If EEMI API found then, uintptr_t type address, else 0.
265  *
266  * Some EEMI API's use case needs to be changed in Linux driver, so they
267  * can take advantage of common EEMI handler in TF-A. As of now the old
268  * implementation of these APIs are required to maintain backward compatibility
269  * until their use case in linux driver changes.
270  *
271  */
272 static uintptr_t eemi_for_compatibility(uint32_t api_id, uint32_t *pm_arg,
273 					void *handle, uint32_t security_flag)
274 {
275 	enum pm_ret_status ret;
276 
277 	switch (api_id) {
278 
279 	case (uint32_t)PM_FEATURE_CHECK:
280 	{
281 		uint32_t result[PAYLOAD_ARG_CNT] = {0U};
282 
283 		ret = pm_feature_check(pm_arg[0], result, security_flag);
284 		SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U),
285 			 (uint64_t)result[1] | ((uint64_t)result[2] << 32U));
286 	}
287 
288 	case PM_LOAD_PDI:
289 	{
290 		ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2],
291 				  security_flag);
292 		SMC_RET1(handle, (uint64_t)ret);
293 	}
294 
295 	default:
296 		return (uintptr_t)0;
297 	}
298 }
299 
300 /**
301  * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI.
302  * @api_id: identifier for the API being called.
303  * @pm_arg: pointer to the argument data for the API call.
304  * @handle: Pointer to caller's context structure.
305  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
306  *
307  * These EEMI APIs performs CPU specific power management tasks.
308  * These EEMI APIs are invoked either from PSCI or from debugfs in kernel.
309  * These calls require CPU specific processing before sending IPI request to
310  * Platform Management Controller. For example enable/disable CPU specific
311  * interrupts. This requires separate handler for these calls and may not be
312  * handled using common eemi handler.
313  *
314  * Return: If EEMI API found then, uintptr_t type address, else 0.
315  *
316  */
317 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, uint32_t *pm_arg,
318 					   void *handle, uint32_t security_flag)
319 {
320 	enum pm_ret_status ret;
321 
322 	switch (api_id) {
323 
324 	case (uint32_t)PM_SELF_SUSPEND:
325 		ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
326 				      pm_arg[3], security_flag);
327 		SMC_RET1(handle, (u_register_t)ret);
328 
329 	case (uint32_t)PM_FORCE_POWERDOWN:
330 		ret = pm_force_powerdown(pm_arg[0], pm_arg[1], security_flag);
331 		SMC_RET1(handle, (u_register_t)ret);
332 
333 	case (uint32_t)PM_REQ_SUSPEND:
334 		ret = pm_req_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
335 				     pm_arg[3], security_flag);
336 		SMC_RET1(handle, (u_register_t)ret);
337 
338 	case (uint32_t)PM_ABORT_SUSPEND:
339 		ret = pm_abort_suspend(pm_arg[0], security_flag);
340 		SMC_RET1(handle, (u_register_t)ret);
341 
342 	case (uint32_t)PM_SYSTEM_SHUTDOWN:
343 		ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag);
344 		SMC_RET1(handle, (u_register_t)ret);
345 
346 	default:
347 		return (uintptr_t)0;
348 	}
349 }
350 
351 /**
352  * TF_A_specific_handler() - SMC handler for TF-A specific functionality.
353  * @api_id: identifier for the API being called.
354  * @pm_arg: pointer to the argument data for the API call.
355  * @handle: Pointer to caller's context structure.
356  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
357  *
358  * These EEMI calls performs functionality that does not require
359  * IPI transaction. The handler ends in TF-A and returns requested data to
360  * kernel from TF-A.
361  *
362  * Return: If TF-A specific API found then, uintptr_t type address, else 0
363  *
364  */
365 static uintptr_t TF_A_specific_handler(uint32_t api_id, uint32_t *pm_arg,
366 				       void *handle, uint32_t security_flag)
367 {
368 	switch (api_id) {
369 
370 	case TF_A_PM_REGISTER_SGI:
371 	{
372 		int32_t ret;
373 
374 		ret = pm_register_sgi(pm_arg[0], pm_arg[1]);
375 		if (ret != 0) {
376 			SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS);
377 		}
378 
379 		SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS);
380 	}
381 
382 	case PM_GET_CALLBACK_DATA:
383 	{
384 		uint32_t result[4] = {0};
385 		enum pm_ret_status ret;
386 
387 		ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U);
388 		if (ret != 0) {
389 			result[0] = ret;
390 		}
391 
392 		SMC_RET2(handle,
393 			(uint64_t)result[0] | ((uint64_t)result[1] << 32U),
394 			(uint64_t)result[2] | ((uint64_t)result[3] << 32U));
395 	}
396 
397 	case PM_GET_TRUSTZONE_VERSION:
398 		SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS |
399 			 ((uint64_t)TZ_VERSION << 32U));
400 
401 	default:
402 		return (uintptr_t)0;
403 	}
404 }
405 
406 /**
407  * eemi_handler() - Prepare EEMI payload and perform IPI transaction.
408  * @api_id: identifier for the API being called.
409  * @pm_arg: pointer to the argument data for the API call.
410  * @handle: Pointer to caller's context structure.
411  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
412  *
413  * EEMI - Embedded Energy Management Interface is Xilinx proprietary protocol
414  * to allow communication between power management controller and different
415  * processing clusters.
416  *
417  * This handler prepares EEMI protocol payload received from kernel and performs
418  * IPI transaction.
419  *
420  * Return: If EEMI API found then, uintptr_t type address, else 0
421  *
422  */
423 static uintptr_t eemi_handler(uint32_t api_id, uint32_t *pm_arg,
424 			      void *handle, uint32_t security_flag)
425 {
426 	enum pm_ret_status ret;
427 	uint32_t buf[PAYLOAD_ARG_CNT] = {0};
428 
429 	ret = pm_handle_eemi_call(security_flag, api_id, pm_arg[0], pm_arg[1],
430 				  pm_arg[2], pm_arg[3], pm_arg[4],
431 				  (uint64_t *)buf);
432 	/*
433 	 * Two IOCTLs, to get clock name and pinctrl name of pm_query_data API
434 	 * receives 5 words of respoonse from firmware. Currently linux driver can
435 	 * receive only 4 words from TF-A. So, this needs to be handled separately
436 	 * than other eemi calls.
437 	 */
438 	if (api_id == (uint32_t)PM_QUERY_DATA) {
439 		if ((pm_arg[0] == XPM_QID_CLOCK_GET_NAME ||
440 		    pm_arg[0] == XPM_QID_PINCTRL_GET_FUNCTION_NAME) &&
441 		    ret == PM_RET_SUCCESS) {
442 			SMC_RET2(handle, (uint64_t)buf[0] | ((uint64_t)buf[1] << 32U),
443 				(uint64_t)buf[2] | ((uint64_t)buf[3] << 32U));
444 		}
445 	}
446 
447 	SMC_RET2(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
448 		 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U));
449 }
450 
451 /**
452  * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
453  * @smc_fid: Function Identifier.
454  * @x1: SMC64 Arguments from kernel.
455  * @x2: SMC64 Arguments from kernel.
456  * @x3: SMC64 Arguments from kernel (upper 32-bits).
457  * @x4: Unused.
458  * @cookie: Unused.
459  * @handle: Pointer to caller's context structure.
460  * @flags: SECURE_FLAG or NON_SECURE_FLAG.
461  *
462  * Return: Unused.
463  *
464  * Determines that smc_fid is valid and supported PM SMC Function ID from the
465  * list of pm_api_ids, otherwise completes the request with
466  * the unknown SMC Function ID.
467  *
468  * The SMC calls for PM service are forwarded from SIP Service SMC handler
469  * function with rt_svc_handle signature.
470  *
471  */
472 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
473 			uint64_t x4, const void *cookie, void *handle, uint64_t flags)
474 {
475 	uintptr_t ret;
476 	uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0};
477 	uint32_t security_flag = NON_SECURE_FLAG;
478 	uint32_t api_id;
479 	bool status = false, status_tmp = false;
480 
481 	/* Handle case where PM wasn't initialized properly */
482 	if (pm_up == false) {
483 		SMC_RET1(handle, SMC_UNK);
484 	}
485 
486 	/*
487 	 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0)
488 	 * if smc called is secure
489 	 *
490 	 * Add redundant macro call to immune the code from glitches
491 	 */
492 	SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags);
493 	if ((status != false) && (status_tmp != false)) {
494 		security_flag = SECURE_FLAG;
495 	}
496 
497 	pm_arg[0] = (uint32_t)x1;
498 	pm_arg[1] = (uint32_t)(x1 >> 32U);
499 	pm_arg[2] = (uint32_t)x2;
500 	pm_arg[3] = (uint32_t)(x2 >> 32U);
501 	pm_arg[4] = (uint32_t)x3;
502 	(void)(x4);
503 	api_id = smc_fid & FUNCID_NUM_MASK;
504 
505 	ret = eemi_for_compatibility(api_id, pm_arg, handle, security_flag);
506 	if (ret != (uintptr_t)0) {
507 		return ret;
508 	}
509 
510 	ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle, flags);
511 	if (ret !=  (uintptr_t)0) {
512 		return ret;
513 	}
514 
515 	ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag);
516 	if (ret !=  (uintptr_t)0) {
517 		return ret;
518 	}
519 
520 	ret = eemi_handler(api_id, pm_arg, handle, security_flag);
521 
522 	return ret;
523 }
524