xref: /rk3399_ARM-atf/plat/xilinx/common/pm_service/pm_svc_main.c (revision b5f120b53182b3819ee14c1c10cce22714a57bf6)
1 /*
2  * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved.
3  * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 /*
9  * Top-level SMC handler for Versal power management calls and
10  * IPI setup functions for communication with PMC.
11  */
12 
13 #include <errno.h>
14 #include <stdbool.h>
15 
16 #include "../drivers/arm/gic/v3/gicv3_private.h"
17 
18 #include <common/ep_info.h>
19 #include <common/runtime_svc.h>
20 #include <drivers/arm/gicv3.h>
21 #include <lib/psci/psci.h>
22 #include <plat/arm/common/plat_arm.h>
23 #include <plat/common/platform.h>
24 
25 #include <plat_private.h>
26 #include "pm_api_sys.h"
27 #include "pm_client.h"
28 #include "pm_ipi.h"
29 #include "pm_svc_main.h"
30 
31 #define MODE				0x80000000U
32 
33 #define INVALID_SGI    0xFFU
34 #define PM_INIT_SUSPEND_CB	(30U)
35 #define PM_NOTIFY_CB		(32U)
36 #define EVENT_CPU_PWRDWN	(4U)
37 #define MBOX_SGI_SHARED_IPI	(7U)
38 
39 /**
40  * upper_32_bits - return bits 32-63 of a number
41  * @n: the number we're accessing
42  */
43 #define upper_32_bits(n)	((uint32_t)((n) >> 32U))
44 
45 /**
46  * lower_32_bits - return bits 0-31 of a number
47  * @n: the number we're accessing
48  */
49 #define lower_32_bits(n)	((uint32_t)((n) & 0xffffffffU))
50 
51 /**
52  * EXTRACT_SMC_ARGS - extracts 32-bit payloads from 64-bit SMC arguments
53  * @pm_arg: array of 32-bit payloads
54  * @x: array of 64-bit SMC arguments
55  */
56 #define EXTRACT_ARGS(pm_arg, x)						\
57 	for (uint32_t i = 0U; i < (PAYLOAD_ARG_CNT - 1U); i++) {	\
58 		if ((i % 2U) != 0U) {					\
59 			pm_arg[i] = lower_32_bits(x[(i / 2U) + 1U]);	\
60 		} else {						\
61 			pm_arg[i] = upper_32_bits(x[i / 2U]);		\
62 		}							\
63 	}
64 
65 /* 1 sec of wait timeout for secondary core down */
66 #define PWRDWN_WAIT_TIMEOUT	(1000U)
67 
68 /* pm_up = true - UP, pm_up = false - DOWN */
69 static bool pm_up;
70 static uint32_t sgi = (uint32_t)INVALID_SGI;
71 static bool pwrdwn_req_received;
72 
73 bool pm_pwrdwn_req_status(void)
74 {
75 	return pwrdwn_req_received;
76 }
77 
78 static void notify_os(void)
79 {
80 	plat_ic_raise_ns_sgi((int)sgi, read_mpidr_el1());
81 }
82 
83 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags,
84 				       void *handle, void *cookie)
85 {
86 	(void)id;
87 	(void)flags;
88 	(void)handle;
89 	(void)cookie;
90 	uint32_t cpu_id = plat_my_core_pos();
91 
92 	VERBOSE("Powering down CPU %d\n", cpu_id);
93 
94 	/* Deactivate CPU power down SGI */
95 	plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR);
96 
97 	return (uint64_t)psci_cpu_off();
98 }
99 
100 /**
101  * raise_pwr_down_interrupt() - Callback function to raise SGI.
102  * @mpidr: MPIDR for the target CPU.
103  *
104  * Raise SGI interrupt to trigger the CPU power down sequence on all the
105  * online secondary cores.
106  */
107 static void raise_pwr_down_interrupt(u_register_t mpidr)
108 {
109 	plat_ic_raise_el3_sgi((int)CPU_PWR_DOWN_REQ_INTR, mpidr);
110 }
111 
112 void request_cpu_pwrdwn(void)
113 {
114 	int ret;
115 
116 	VERBOSE("CPU power down request received\n");
117 
118 	/* Send powerdown request to online secondary core(s) */
119 	ret = psci_stop_other_cores(plat_my_core_pos(), PWRDWN_WAIT_TIMEOUT,
120 				    raise_pwr_down_interrupt);
121 	if (ret != PSCI_E_SUCCESS) {
122 		ERROR("Failed to powerdown secondary core(s)\n");
123 	}
124 
125 	/* Clear IPI IRQ */
126 	pm_ipi_irq_clear(primary_proc);
127 
128 	/* Deactivate IPI IRQ */
129 	plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ);
130 }
131 
132 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle,
133 				void *cookie)
134 {
135 	(void)flags;
136 	(void)handle;
137 	(void)cookie;
138 	uint32_t payload[4] = {0};
139 	enum pm_ret_status ret;
140 	uint32_t ipi_status, i;
141 
142 	VERBOSE("Received IPI FIQ from firmware\n");
143 
144 	console_flush();
145 	(void)plat_ic_acknowledge_interrupt();
146 
147 	/* Check status register for each IPI except PMC */
148 	for (i = IPI_ID_APU; i <= IPI_ID_5; i++) {
149 		ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i);
150 
151 		/* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */
152 		if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) != 0U) {
153 			plat_ic_raise_ns_sgi((int)MBOX_SGI_SHARED_IPI, read_mpidr_el1());
154 			break;
155 		}
156 	}
157 
158 	/* If PMC has not generated interrupt then end ISR */
159 	ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC);
160 	if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) == 0U) {
161 		plat_ic_end_of_interrupt(id);
162 		goto exit_label;
163 	}
164 
165 	/* Handle PMC case */
166 	ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0);
167 	if (ret != PM_RET_SUCCESS) {
168 		payload[0] = (uint32_t)ret;
169 	}
170 
171 	switch (payload[0]) {
172 	case PM_INIT_SUSPEND_CB:
173 		if (sgi != INVALID_SGI) {
174 			notify_os();
175 		}
176 		break;
177 	case PM_NOTIFY_CB:
178 		if (sgi != INVALID_SGI) {
179 			if ((payload[2] == EVENT_CPU_PWRDWN) &&
180 			    (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) {
181 				if (pwrdwn_req_received) {
182 					pwrdwn_req_received = false;
183 					request_cpu_pwrdwn();
184 					(void)psci_cpu_off();
185 					break;
186 				} else {
187 					pwrdwn_req_received = true;
188 				}
189 			}
190 			notify_os();
191 		} else {
192 			if ((payload[2] == EVENT_CPU_PWRDWN) &&
193 			    (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) {
194 				request_cpu_pwrdwn();
195 				(void)psci_cpu_off();
196 			}
197 		}
198 		break;
199 	case (uint32_t)PM_RET_ERROR_INVALID_CRC:
200 		pm_ipi_irq_clear(primary_proc);
201 		WARN("Invalid CRC in the payload\n");
202 		break;
203 
204 	default:
205 		pm_ipi_irq_clear(primary_proc);
206 		WARN("Invalid IPI payload\n");
207 		break;
208 	}
209 
210 	/* Clear FIQ */
211 	plat_ic_end_of_interrupt(id);
212 
213 exit_label:
214 	return 0;
215 }
216 
217 /**
218  * pm_register_sgi() - PM register the IPI interrupt.
219  * @sgi_num: SGI number to be used for communication.
220  * @reset: Reset to invalid SGI when reset=1.
221  *
222  * Return: On success, the initialization function must return 0.
223  *         Any other return value will cause the framework to ignore
224  *         the service.
225  *
226  * Update the SGI number to be used.
227  *
228  */
229 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset)
230 {
231 	int32_t ret = 0;
232 
233 	if (reset == 1U) {
234 		sgi = INVALID_SGI;
235 	} else if (sgi != INVALID_SGI) {
236 		ret = -EBUSY;
237 	} else if (sgi_num >= GICV3_MAX_SGI_TARGETS) {
238 		ret = -EINVAL;
239 	} else {
240 		sgi = (uint32_t)sgi_num;
241 	}
242 
243 	return ret;
244 }
245 
246 /**
247  * pm_setup() - PM service setup.
248  *
249  * Return: On success, the initialization function must return 0.
250  *         Any other return value will cause the framework to ignore
251  *         the service.
252  *
253  * Initialization functions for Versal power management for
254  * communicaton with PMC.
255  *
256  * Called from sip_svc_setup initialization function with the
257  * rt_svc_init signature.
258  *
259  */
260 int32_t pm_setup(void)
261 {
262 	int32_t ret = 0;
263 
264 	pm_ipi_init(primary_proc);
265 	pm_up = true;
266 	pwrdwn_req_received = false;
267 
268 	/* register SGI handler for CPU power down request */
269 	ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler);
270 	if (ret != 0) {
271 		WARN("BL31: registering SGI interrupt failed\n");
272 	}
273 
274 	/*
275 	 * Enable IPI IRQ
276 	 * assume the rich OS is OK to handle callback IRQs now.
277 	 * Even if we were wrong, it would not enable the IRQ in
278 	 * the GIC.
279 	 */
280 	pm_ipi_irq_enable(primary_proc);
281 
282 	ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler);
283 	if (ret != 0) {
284 		WARN("BL31: registering IPI interrupt failed\n");
285 	}
286 
287 	gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE);
288 
289 	/* Register for idle callback during force power down/restart */
290 	ret = (int32_t)pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN,
291 					    0x0U, 0x1U, SECURE);
292 	if (ret != 0) {
293 		WARN("BL31: registering idle callback for restart/force power down failed\n");
294 	}
295 
296 	return ret;
297 }
298 
299 /**
300  * eemi_for_compatibility() - EEMI calls handler for deprecated calls.
301  * @api_id: identifier for the API being called.
302  * @pm_arg: pointer to the argument data for the API call.
303  * @handle: Pointer to caller's context structure.
304  * @security_flag: SECURE or NON_SECURE.
305  *
306  * Return: If EEMI API found then, uintptr_t type address, else 0.
307  *
308  * Some EEMI API's use case needs to be changed in Linux driver, so they
309  * can take advantage of common EEMI handler in TF-A. As of now the old
310  * implementation of these APIs are required to maintain backward compatibility
311  * until their use case in linux driver changes.
312  *
313  */
314 static uintptr_t eemi_for_compatibility(uint32_t api_id, const uint32_t *pm_arg,
315 					void *handle, uint32_t security_flag)
316 {
317 	enum pm_ret_status ret;
318 
319 	switch (api_id) {
320 
321 	case (uint32_t)PM_FEATURE_CHECK:
322 	{
323 		uint32_t result[RET_PAYLOAD_ARG_CNT] = {0U};
324 
325 		ret = pm_feature_check(pm_arg[0], result, security_flag);
326 		SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U),
327 			 (uint64_t)result[1] | ((uint64_t)result[2] << 32U));
328 	}
329 
330 	case PM_LOAD_PDI:
331 	{
332 		ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2],
333 				  security_flag);
334 		SMC_RET1(handle, (uint64_t)ret);
335 	}
336 
337 	default:
338 		return (uintptr_t)0;
339 	}
340 }
341 
342 /**
343  * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI.
344  * @api_id: identifier for the API being called.
345  * @pm_arg: pointer to the argument data for the API call.
346  * @handle: Pointer to caller's context structure.
347  * @security_flag: SECURE or NON_SECURE.
348  *
349  * These EEMI APIs performs CPU specific power management tasks.
350  * These EEMI APIs are invoked either from PSCI or from debugfs in kernel.
351  * These calls require CPU specific processing before sending IPI request to
352  * Platform Management Controller. For example enable/disable CPU specific
353  * interrupts. This requires separate handler for these calls and may not be
354  * handled using common eemi handler.
355  *
356  * Return: If EEMI API found then, uintptr_t type address, else 0.
357  *
358  */
359 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, const uint32_t *pm_arg,
360 					   void *handle, uint32_t security_flag)
361 {
362 	enum pm_ret_status ret;
363 
364 	switch (api_id) {
365 
366 	case (uint32_t)PM_SELF_SUSPEND:
367 		ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
368 				      pm_arg[3], security_flag);
369 		SMC_RET1(handle, (u_register_t)ret);
370 
371 	case (uint32_t)PM_FORCE_POWERDOWN:
372 		ret = pm_force_powerdown(pm_arg[0], (uint8_t)pm_arg[1], security_flag);
373 		SMC_RET1(handle, (u_register_t)ret);
374 
375 	case (uint32_t)PM_ABORT_SUSPEND:
376 		ret = pm_abort_suspend((enum pm_abort_reason)pm_arg[0], security_flag);
377 		SMC_RET1(handle, (u_register_t)ret);
378 
379 	case (uint32_t)PM_SYSTEM_SHUTDOWN:
380 		ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag);
381 		SMC_RET1(handle, (u_register_t)ret);
382 
383 	default:
384 		return (uintptr_t)0;
385 	}
386 }
387 
388 /**
389  * tfa_clear_pm_state() - Reset TF-A-specific PM state.
390  *
391  * This function resets TF-A-specific state that may have been modified,
392  * such as during a kexec-based kernel reload. It resets the SGI number
393  * and the shutdown scope to its default value.
394  */
395 static enum pm_ret_status tfa_clear_pm_state(void)
396 {
397 	/* Reset SGI number to default value(-1). */
398 	sgi = (uint32_t)INVALID_SGI;
399 
400 	/* Reset the shutdown scope to its default value(system). */
401 	return pm_system_shutdown(XPM_SHUTDOWN_TYPE_SETSCOPE_ONLY, XPM_SHUTDOWN_SUBTYPE_RST_SYSTEM,
402 				  0U);
403 }
404 
405 /**
406  * TF_A_specific_handler() - SMC handler for TF-A specific functionality.
407  * @api_id: identifier for the API being called.
408  * @pm_arg: pointer to the argument data for the API call.
409  * @handle: Pointer to caller's context structure.
410  * @security_flag: SECURE or NON_SECURE.
411  *
412  * These EEMI calls performs functionality that does not require
413  * IPI transaction. The handler ends in TF-A and returns requested data to
414  * kernel from TF-A.
415  *
416  * Return: If TF-A specific API found then, uintptr_t type address, else 0
417  *
418  */
419 static uintptr_t TF_A_specific_handler(uint32_t api_id, const uint32_t *pm_arg,
420 				       void *handle, uint32_t security_flag)
421 {
422 	switch (api_id) {
423 
424 	case TF_A_FEATURE_CHECK:
425 	{
426 		enum pm_ret_status ret;
427 		uint32_t result[PAYLOAD_ARG_CNT] = {0U};
428 
429 		ret = tfa_api_feature_check(pm_arg[0], result);
430 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U));
431 	}
432 
433 	case TF_A_PM_REGISTER_SGI:
434 	{
435 		int32_t ret;
436 
437 		ret = pm_register_sgi(pm_arg[0], pm_arg[1]);
438 		if (ret != 0) {
439 			SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS);
440 		}
441 
442 		SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS);
443 	}
444 
445 	case PM_GET_CALLBACK_DATA:
446 	{
447 		uint32_t result[4] = {0};
448 		enum pm_ret_status ret;
449 
450 		ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U);
451 		if (ret != PM_RET_SUCCESS) {
452 			result[0] = (uint32_t)ret;
453 		}
454 
455 		SMC_RET2(handle,
456 			(uint64_t)result[0] | ((uint64_t)result[1] << 32U),
457 			(uint64_t)result[2] | ((uint64_t)result[3] << 32U));
458 	}
459 
460 	case PM_GET_TRUSTZONE_VERSION:
461 		SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS |
462 			 ((uint64_t)TZ_VERSION << 32U));
463 
464 	case TF_A_CLEAR_PM_STATE:
465 	{
466 		enum pm_ret_status ret;
467 
468 		ret = tfa_clear_pm_state();
469 
470 		SMC_RET1(handle, (uint64_t)ret);
471 	}
472 
473 	default:
474 		return (uintptr_t)0;
475 	}
476 }
477 
478 /**
479  * eemi_handler() - Prepare EEMI payload and perform IPI transaction.
480  * @api_id: identifier for the API being called.
481  * @pm_arg: pointer to the argument data for the API call.
482  * @handle: Pointer to caller's context structure.
483  * @security_flag: SECURE or NON_SECURE.
484  *
485  * EEMI - Embedded Energy Management Interface is Xilinx proprietary protocol
486  * to allow communication between power management controller and different
487  * processing clusters.
488  *
489  * This handler prepares EEMI protocol payload received from kernel and performs
490  * IPI transaction.
491  *
492  * Return: If EEMI API found then, uintptr_t type address, else 0
493  *
494  */
495 static uintptr_t eemi_handler(uint32_t api_id, const uint32_t *pm_arg,
496 			      void *handle, uint32_t security_flag)
497 {
498 	enum pm_ret_status ret;
499 	uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0};
500 
501 	ret = pm_handle_eemi_call(security_flag, api_id, pm_arg[0], pm_arg[1],
502 				  pm_arg[2], pm_arg[3], pm_arg[4], buf);
503 	/*
504 	 * Two IOCTLs, to get clock name and pinctrl name of pm_query_data API
505 	 * receives 5 words of respoonse from firmware. Currently linux driver can
506 	 * receive only 4 words from TF-A. So, this needs to be handled separately
507 	 * than other eemi calls.
508 	 */
509 	if (api_id == (uint32_t)PM_QUERY_DATA) {
510 		if (((pm_arg[0] == (uint32_t)XPM_QID_CLOCK_GET_NAME) ||
511 		    (pm_arg[0] == (uint32_t)XPM_QID_PINCTRL_GET_FUNCTION_NAME)) &&
512 		    (ret == PM_RET_SUCCESS)) {
513 			SMC_RET2(handle, (uint64_t)buf[0] | ((uint64_t)buf[1] << 32U),
514 				(uint64_t)buf[2] | ((uint64_t)buf[3] << 32U));
515 		}
516 	}
517 
518 	SMC_RET2(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
519 		 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U));
520 }
521 
522 /**
523  * eemi_api_handler() - Prepare EEMI payload and perform IPI transaction.
524  * @api_id: identifier for the API being called.
525  * @pm_arg: pointer to the argument data for the API call.
526  * @handle: Pointer to caller's context structure.
527  * @security_flag: SECURE or NON_SECURE.
528  *
529  * EEMI - Embedded Energy Management Interface is AMD-Xilinx proprietary
530  * protocol to allow communication between power management controller and
531  * different processing clusters.
532  *
533  * This handler prepares EEMI protocol payload received from kernel and performs
534  * IPI transaction.
535  *
536  * Return: If EEMI API found then, uintptr_t type address, else 0
537  */
538 static uintptr_t eemi_api_handler(uint32_t api_id, const uint32_t *pm_arg,
539 				  void *handle, uint32_t security_flag)
540 {
541 	enum pm_ret_status ret;
542 	uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U};
543 	uint32_t payload[PAYLOAD_ARG_CNT] = {0U};
544 	uint32_t module_id;
545 
546 	module_id = (api_id & MODULE_ID_MASK) >> 8U;
547 
548 	PM_PACK_PAYLOAD7(payload, module_id, security_flag, api_id,
549 			 pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3],
550 			 pm_arg[4], pm_arg[5]);
551 
552 	ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf,
553 			       RET_PAYLOAD_ARG_CNT);
554 
555 	SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
556 		 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U),
557 		 (uint64_t)buf[3] | ((uint64_t)buf[4] << 32U),
558 		 (uint64_t)buf[5]);
559 }
560 
561 /**
562  * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
563  * @smc_fid: Function Identifier.
564  * @x1: SMC64 Arguments from kernel.
565  * @x2: SMC64 Arguments from kernel.
566  * @x3: SMC64 Arguments from kernel (upper 32-bits).
567  * @x4: Unused.
568  * @cookie: Unused.
569  * @handle: Pointer to caller's context structure.
570  * @flags: SECURE or NON_SECURE.
571  *
572  * Return: Unused.
573  *
574  * Determines that smc_fid is valid and supported PM SMC Function ID from the
575  * list of pm_api_ids, otherwise completes the request with
576  * the unknown SMC Function ID.
577  *
578  * The SMC calls for PM service are forwarded from SIP Service SMC handler
579  * function with rt_svc_handle signature.
580  *
581  */
582 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
583 			uint64_t x4, const void *cookie, void *handle, uint64_t flags)
584 {
585 	(void)cookie;
586 	uintptr_t ret;
587 	uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0};
588 	uint32_t security_flag = NON_SECURE;
589 	uint32_t api_id;
590 	bool status = false, status_tmp = false;
591 	const uint64_t x[4] = {x1, x2, x3, x4};
592 
593 	/* Handle case where PM wasn't initialized properly */
594 	if (pm_up == false) {
595 		SMC_RET1(handle, SMC_UNK);
596 	}
597 
598 	/*
599 	 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0)
600 	 * if smc called is secure
601 	 *
602 	 * Add redundant macro call to immune the code from glitches
603 	 */
604 	SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags);
605 	if ((status != false) && (status_tmp != false)) {
606 		security_flag = SECURE;
607 	}
608 
609 	if ((smc_fid & FUNCID_NUM_MASK) == PASS_THROUGH_FW_CMD_ID) {
610 		api_id = lower_32_bits(x[0]);
611 
612 		EXTRACT_ARGS(pm_arg, x);
613 
614 		return eemi_api_handler(api_id, pm_arg, handle, security_flag);
615 	}
616 
617 	pm_arg[0] = (uint32_t)x1;
618 	pm_arg[1] = (uint32_t)(x1 >> 32U);
619 	pm_arg[2] = (uint32_t)x2;
620 	pm_arg[3] = (uint32_t)(x2 >> 32U);
621 	pm_arg[4] = (uint32_t)x3;
622 	(void)(x4);
623 	api_id = smc_fid & FUNCID_NUM_MASK;
624 
625 	ret = eemi_for_compatibility(api_id, pm_arg, handle, security_flag);
626 	if (ret != (uintptr_t)0) {
627 		return ret;
628 	}
629 
630 	ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle,
631 					(uint32_t)flags);
632 	if (ret !=  (uintptr_t)0) {
633 		return ret;
634 	}
635 
636 	ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag);
637 	if (ret !=  (uintptr_t)0) {
638 		return ret;
639 	}
640 
641 	ret = eemi_handler(api_id, pm_arg, handle, security_flag);
642 
643 	return ret;
644 }
645