xref: /rk3399_ARM-atf/plat/xilinx/common/pm_service/pm_svc_main.c (revision c3280df1bb95ed09b5d5f91f8977bbe99c6a923b)
1 /*
2  * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved.
3  * Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 /*
9  * Top-level SMC handler for Versal power management calls and
10  * IPI setup functions for communication with PMC.
11  */
12 
13 #include <errno.h>
14 #include <stdbool.h>
15 
16 #include "../drivers/arm/gic/v3/gicv3_private.h"
17 
18 #include <common/runtime_svc.h>
19 #include <drivers/arm/gicv3.h>
20 #include <lib/psci/psci.h>
21 #include <plat/arm/common/plat_arm.h>
22 #include <plat/common/platform.h>
23 
24 #include <plat_private.h>
25 #include "pm_api_sys.h"
26 #include "pm_client.h"
27 #include "pm_ipi.h"
28 #include "pm_svc_main.h"
29 
30 #define MODE				0x80000000U
31 
32 #define XSCUGIC_SGIR_EL1_INITID_SHIFT    24U
33 #define INVALID_SGI    0xFFU
34 #define PM_INIT_SUSPEND_CB	(30U)
35 #define PM_NOTIFY_CB		(32U)
36 #define EVENT_CPU_PWRDWN	(4U)
37 /* 1 sec of wait timeout for secondary core down */
38 #define PWRDWN_WAIT_TIMEOUT	(1000U)
39 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_asgi1r_el1, S3_0_C12_C11_6)
40 
41 /* pm_up = true - UP, pm_up = false - DOWN */
42 static bool pm_up;
43 static uint32_t sgi = (uint32_t)INVALID_SGI;
44 static bool pwrdwn_req_received;
45 
46 static void notify_os(void)
47 {
48 	int32_t cpu;
49 	uint32_t reg;
50 
51 	cpu = plat_my_core_pos() + 1U;
52 
53 	reg = (cpu | (sgi << XSCUGIC_SGIR_EL1_INITID_SHIFT));
54 	write_icc_asgi1r_el1(reg);
55 }
56 
57 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags,
58 				       void *handle, void *cookie)
59 {
60 	uint32_t cpu_id = plat_my_core_pos();
61 
62 	VERBOSE("Powering down CPU %d\n", cpu_id);
63 
64 	/* Deactivate CPU power down SGI */
65 	plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR);
66 
67 	return psci_cpu_off();
68 }
69 
70 /**
71  * raise_pwr_down_interrupt() - Callback function to raise SGI.
72  * @mpidr: MPIDR for the target CPU.
73  *
74  * Raise SGI interrupt to trigger the CPU power down sequence on all the
75  * online secondary cores.
76  */
77 static void raise_pwr_down_interrupt(u_register_t mpidr)
78 {
79 	plat_ic_raise_el3_sgi(CPU_PWR_DOWN_REQ_INTR, mpidr);
80 }
81 
82 static void request_cpu_pwrdwn(void)
83 {
84 	enum pm_ret_status ret;
85 
86 	VERBOSE("CPU power down request received\n");
87 
88 	/* Send powerdown request to online secondary core(s) */
89 	ret = psci_stop_other_cores(PWRDWN_WAIT_TIMEOUT, raise_pwr_down_interrupt);
90 	if (ret != PSCI_E_SUCCESS) {
91 		ERROR("Failed to powerdown secondary core(s)\n");
92 	}
93 
94 	/* Clear IPI IRQ */
95 	pm_ipi_irq_clear(primary_proc);
96 
97 	/* Deactivate IPI IRQ */
98 	plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ);
99 }
100 
101 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle,
102 				void *cookie)
103 {
104 	uint32_t payload[4] = {0};
105 	enum pm_ret_status ret;
106 
107 	VERBOSE("Received IPI FIQ from firmware\n");
108 
109 	(void)plat_ic_acknowledge_interrupt();
110 
111 	ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0);
112 	if (ret != PM_RET_SUCCESS) {
113 		payload[0] = ret;
114 	}
115 
116 	switch (payload[0]) {
117 	case PM_INIT_SUSPEND_CB:
118 		if (sgi != INVALID_SGI) {
119 			notify_os();
120 		}
121 		break;
122 	case PM_NOTIFY_CB:
123 		if (sgi != INVALID_SGI) {
124 			if (payload[2] == EVENT_CPU_PWRDWN) {
125 				if (pwrdwn_req_received) {
126 					pwrdwn_req_received = false;
127 					request_cpu_pwrdwn();
128 					(void)psci_cpu_off();
129 					break;
130 				} else {
131 					pwrdwn_req_received = true;
132 				}
133 			}
134 			notify_os();
135 		}
136 		break;
137 	case PM_RET_ERROR_INVALID_CRC:
138 		pm_ipi_irq_clear(primary_proc);
139 		WARN("Invalid CRC in the payload\n");
140 		break;
141 
142 	default:
143 		pm_ipi_irq_clear(primary_proc);
144 		WARN("Invalid IPI payload\n");
145 		break;
146 	}
147 
148 	/* Clear FIQ */
149 	plat_ic_end_of_interrupt(id);
150 
151 	return 0;
152 }
153 
154 /**
155  * pm_register_sgi() - PM register the IPI interrupt.
156  * @sgi_num: SGI number to be used for communication.
157  * @reset: Reset to invalid SGI when reset=1.
158  *
159  * Return: On success, the initialization function must return 0.
160  *         Any other return value will cause the framework to ignore
161  *         the service.
162  *
163  * Update the SGI number to be used.
164  *
165  */
166 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset)
167 {
168 	if (reset == 1U) {
169 		sgi = INVALID_SGI;
170 		return 0;
171 	}
172 
173 	if (sgi != INVALID_SGI) {
174 		return -EBUSY;
175 	}
176 
177 	if (sgi_num >= GICV3_MAX_SGI_TARGETS) {
178 		return -EINVAL;
179 	}
180 
181 	sgi = (uint32_t)sgi_num;
182 	return 0;
183 }
184 
185 /**
186  * pm_setup() - PM service setup.
187  *
188  * Return: On success, the initialization function must return 0.
189  *         Any other return value will cause the framework to ignore
190  *         the service.
191  *
192  * Initialization functions for Versal power management for
193  * communicaton with PMC.
194  *
195  * Called from sip_svc_setup initialization function with the
196  * rt_svc_init signature.
197  *
198  */
199 int32_t pm_setup(void)
200 {
201 	int32_t ret = 0;
202 
203 	pm_ipi_init(primary_proc);
204 	pm_up = true;
205 
206 	/* register SGI handler for CPU power down request */
207 	ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler);
208 	if (ret != 0) {
209 		WARN("BL31: registering SGI interrupt failed\n");
210 	}
211 
212 	/*
213 	 * Enable IPI IRQ
214 	 * assume the rich OS is OK to handle callback IRQs now.
215 	 * Even if we were wrong, it would not enable the IRQ in
216 	 * the GIC.
217 	 */
218 	pm_ipi_irq_enable(primary_proc);
219 
220 	ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler);
221 	if (ret != 0) {
222 		WARN("BL31: registering IPI interrupt failed\n");
223 	}
224 
225 	gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE);
226 	return ret;
227 }
228 
229 /**
230  * eemi_for_compatibility() - EEMI calls handler for deprecated calls.
231  * @api_id: identifier for the API being called.
232  * @pm_arg: pointer to the argument data for the API call.
233  * @handle: Pointer to caller's context structure.
234  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
235  *
236  * Return: If EEMI API found then, uintptr_t type address, else 0.
237  *
238  * Some EEMI API's use case needs to be changed in Linux driver, so they
239  * can take advantage of common EEMI handler in TF-A. As of now the old
240  * implementation of these APIs are required to maintain backward compatibility
241  * until their use case in linux driver changes.
242  *
243  */
244 static uintptr_t eemi_for_compatibility(uint32_t api_id, uint32_t *pm_arg,
245 					void *handle, uint32_t security_flag)
246 {
247 	enum pm_ret_status ret;
248 
249 	switch (api_id) {
250 
251 	case (uint32_t)PM_IOCTL:
252 	{
253 		uint32_t value = 0U;
254 
255 		ret = pm_api_ioctl(pm_arg[0], pm_arg[1], pm_arg[2],
256 				   pm_arg[3], pm_arg[4],
257 				   &value, security_flag);
258 		if (ret == PM_RET_ERROR_NOTSUPPORTED)
259 			return (uintptr_t)0;
260 
261 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)value) << 32U);
262 	}
263 
264 	case (uint32_t)PM_QUERY_DATA:
265 	{
266 		uint32_t data[PAYLOAD_ARG_CNT] = { 0 };
267 
268 		ret = pm_query_data(pm_arg[0], pm_arg[1], pm_arg[2],
269 				    pm_arg[3], data, security_flag);
270 
271 		SMC_RET2(handle, (uint64_t)ret | ((uint64_t)data[0] << 32U),
272 			 (uint64_t)data[1] | ((uint64_t)data[2] << 32U));
273 	}
274 
275 	case (uint32_t)PM_FEATURE_CHECK:
276 	{
277 		uint32_t result[PAYLOAD_ARG_CNT] = {0U};
278 
279 		ret = pm_feature_check(pm_arg[0], result, security_flag);
280 		SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U),
281 			 (uint64_t)result[1] | ((uint64_t)result[2] << 32U));
282 	}
283 
284 	case PM_LOAD_PDI:
285 	{
286 		ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2],
287 				  security_flag);
288 		SMC_RET1(handle, (uint64_t)ret);
289 	}
290 
291 	default:
292 		return (uintptr_t)0;
293 	}
294 }
295 
296 /**
297  * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI.
298  * @api_id: identifier for the API being called.
299  * @pm_arg: pointer to the argument data for the API call.
300  * @handle: Pointer to caller's context structure.
301  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
302  *
303  * These EEMI APIs performs CPU specific power management tasks.
304  * These EEMI APIs are invoked either from PSCI or from debugfs in kernel.
305  * These calls require CPU specific processing before sending IPI request to
306  * Platform Management Controller. For example enable/disable CPU specific
307  * interrupts. This requires separate handler for these calls and may not be
308  * handled using common eemi handler.
309  *
310  * Return: If EEMI API found then, uintptr_t type address, else 0.
311  *
312  */
313 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, uint32_t *pm_arg,
314 					   void *handle, uint32_t security_flag)
315 {
316 	enum pm_ret_status ret;
317 
318 	switch (api_id) {
319 
320 	case (uint32_t)PM_SELF_SUSPEND:
321 		ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
322 				      pm_arg[3], security_flag);
323 		SMC_RET1(handle, (u_register_t)ret);
324 
325 	case (uint32_t)PM_FORCE_POWERDOWN:
326 		ret = pm_force_powerdown(pm_arg[0], pm_arg[1], security_flag);
327 		SMC_RET1(handle, (u_register_t)ret);
328 
329 	case (uint32_t)PM_REQ_SUSPEND:
330 		ret = pm_req_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
331 				     pm_arg[3], security_flag);
332 		SMC_RET1(handle, (u_register_t)ret);
333 
334 	case (uint32_t)PM_ABORT_SUSPEND:
335 		ret = pm_abort_suspend(pm_arg[0], security_flag);
336 		SMC_RET1(handle, (u_register_t)ret);
337 
338 	case (uint32_t)PM_SYSTEM_SHUTDOWN:
339 		ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag);
340 		SMC_RET1(handle, (u_register_t)ret);
341 
342 	default:
343 		return (uintptr_t)0;
344 	}
345 }
346 
347 /**
348  * TF_A_specific_handler() - SMC handler for TF-A specific functionality.
349  * @api_id: identifier for the API being called.
350  * @pm_arg: pointer to the argument data for the API call.
351  * @handle: Pointer to caller's context structure.
352  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
353  *
354  * These EEMI calls performs functionality that does not require
355  * IPI transaction. The handler ends in TF-A and returns requested data to
356  * kernel from TF-A.
357  *
358  * Return: If TF-A specific API found then, uintptr_t type address, else 0
359  *
360  */
361 static uintptr_t TF_A_specific_handler(uint32_t api_id, uint32_t *pm_arg,
362 				       void *handle, uint32_t security_flag)
363 {
364 	switch (api_id) {
365 
366 	case TF_A_PM_REGISTER_SGI:
367 	{
368 		int32_t ret;
369 
370 		ret = pm_register_sgi(pm_arg[0], pm_arg[1]);
371 		if (ret != 0) {
372 			SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS);
373 		}
374 
375 		SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS);
376 	}
377 
378 	case PM_GET_CALLBACK_DATA:
379 	{
380 		uint32_t result[4] = {0};
381 		enum pm_ret_status ret;
382 
383 		ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U);
384 		if (ret != 0) {
385 			result[0] = ret;
386 		}
387 
388 		SMC_RET2(handle,
389 			(uint64_t)result[0] | ((uint64_t)result[1] << 32U),
390 			(uint64_t)result[2] | ((uint64_t)result[3] << 32U));
391 	}
392 
393 	case PM_GET_TRUSTZONE_VERSION:
394 		SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS |
395 			 ((uint64_t)TZ_VERSION << 32U));
396 
397 	default:
398 		return (uintptr_t)0;
399 	}
400 }
401 
402 /**
403  * eemi_handler() - Prepare EEMI payload and perform IPI transaction.
404  * @api_id: identifier for the API being called.
405  * @pm_arg: pointer to the argument data for the API call.
406  * @handle: Pointer to caller's context structure.
407  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
408  *
409  * EEMI - Embedded Energy Management Interface is Xilinx proprietary protocol
410  * to allow communication between power management controller and different
411  * processing clusters.
412  *
413  * This handler prepares EEMI protocol payload received from kernel and performs
414  * IPI transaction.
415  *
416  * Return: If EEMI API found then, uintptr_t type address, else 0
417  *
418  */
419 static uintptr_t eemi_handler(uint32_t api_id, uint32_t *pm_arg,
420 			      void *handle, uint32_t security_flag)
421 {
422 	enum pm_ret_status ret;
423 	uint32_t buf[PAYLOAD_ARG_CNT] = {0};
424 
425 	ret = pm_handle_eemi_call(security_flag, api_id, pm_arg[0], pm_arg[1],
426 				  pm_arg[2], pm_arg[3], pm_arg[4],
427 				  (uint64_t *)buf);
428 	/*
429 	 * Two IOCTLs, to get clock name and pinctrl name of pm_query_data API
430 	 * receives 5 words of respoonse from firmware. Currently linux driver can
431 	 * receive only 4 words from TF-A. So, this needs to be handled separately
432 	 * than other eemi calls.
433 	 */
434 	if (api_id == (uint32_t)PM_QUERY_DATA) {
435 		if ((pm_arg[0] == XPM_QID_CLOCK_GET_NAME ||
436 		    pm_arg[0] == XPM_QID_PINCTRL_GET_FUNCTION_NAME) &&
437 		    ret == PM_RET_SUCCESS) {
438 			SMC_RET2(handle, (uint64_t)buf[0] | ((uint64_t)buf[1] << 32U),
439 				(uint64_t)buf[2] | ((uint64_t)buf[3] << 32U));
440 		}
441 	}
442 
443 	SMC_RET2(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
444 		 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U));
445 }
446 
447 /**
448  * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
449  * @smc_fid: Function Identifier.
450  * @x1: SMC64 Arguments from kernel.
451  * @x2: SMC64 Arguments from kernel.
452  * @x3: SMC64 Arguments from kernel (upper 32-bits).
453  * @x4: Unused.
454  * @cookie: Unused.
455  * @handle: Pointer to caller's context structure.
456  * @flags: SECURE_FLAG or NON_SECURE_FLAG.
457  *
458  * Return: Unused.
459  *
460  * Determines that smc_fid is valid and supported PM SMC Function ID from the
461  * list of pm_api_ids, otherwise completes the request with
462  * the unknown SMC Function ID.
463  *
464  * The SMC calls for PM service are forwarded from SIP Service SMC handler
465  * function with rt_svc_handle signature.
466  *
467  */
468 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
469 			uint64_t x4, const void *cookie, void *handle, uint64_t flags)
470 {
471 	uintptr_t ret;
472 	uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0};
473 	uint32_t security_flag = NON_SECURE_FLAG;
474 	uint32_t api_id;
475 	bool status = false, status_tmp = false;
476 
477 	/* Handle case where PM wasn't initialized properly */
478 	if (pm_up == false) {
479 		SMC_RET1(handle, SMC_UNK);
480 	}
481 
482 	/*
483 	 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0)
484 	 * if smc called is secure
485 	 *
486 	 * Add redundant macro call to immune the code from glitches
487 	 */
488 	SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags);
489 	if ((status != false) && (status_tmp != false)) {
490 		security_flag = SECURE_FLAG;
491 	}
492 
493 	pm_arg[0] = (uint32_t)x1;
494 	pm_arg[1] = (uint32_t)(x1 >> 32U);
495 	pm_arg[2] = (uint32_t)x2;
496 	pm_arg[3] = (uint32_t)(x2 >> 32U);
497 	pm_arg[4] = (uint32_t)x3;
498 	(void)(x4);
499 	api_id = smc_fid & FUNCID_NUM_MASK;
500 
501 	ret = eemi_for_compatibility(api_id, pm_arg, handle, security_flag);
502 	if (ret != (uintptr_t)0) {
503 		return ret;
504 	}
505 
506 	ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle, flags);
507 	if (ret !=  (uintptr_t)0) {
508 		return ret;
509 	}
510 
511 	ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag);
512 	if (ret !=  (uintptr_t)0) {
513 		return ret;
514 	}
515 
516 	ret = eemi_handler(api_id, pm_arg, handle, security_flag);
517 
518 	return ret;
519 }
520