xref: /rk3399_ARM-atf/plat/xilinx/common/pm_service/pm_svc_main.c (revision be3abed7cb3e84332f7fe7cf016ab9671e377cad)
1 /*
2  * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved.
3  * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 /*
9  * Top-level SMC handler for Versal power management calls and
10  * IPI setup functions for communication with PMC.
11  */
12 
13 #include <errno.h>
14 #include <stdbool.h>
15 
16 #include "../drivers/arm/gic/v3/gicv3_private.h"
17 
18 #include <common/runtime_svc.h>
19 #include <drivers/arm/gicv3.h>
20 #include <lib/psci/psci.h>
21 #include <plat/arm/common/plat_arm.h>
22 #include <plat/common/platform.h>
23 
24 #include <plat_private.h>
25 #include "pm_api_sys.h"
26 #include "pm_client.h"
27 #include "pm_ipi.h"
28 #include "pm_svc_main.h"
29 
30 #define MODE				0x80000000U
31 
32 #define INVALID_SGI    0xFFU
33 #define PM_INIT_SUSPEND_CB	(30U)
34 #define PM_NOTIFY_CB		(32U)
35 #define EVENT_CPU_PWRDWN	(4U)
36 #define MBOX_SGI_SHARED_IPI	(7U)
37 
38 /**
39  * upper_32_bits - return bits 32-63 of a number
40  * @n: the number we're accessing
41  */
42 #define upper_32_bits(n)	((uint32_t)((n) >> 32U))
43 
44 /**
45  * lower_32_bits - return bits 0-31 of a number
46  * @n: the number we're accessing
47  */
48 #define lower_32_bits(n)	((uint32_t)((n) & 0xffffffffU))
49 
50 /**
51  * EXTRACT_SMC_ARGS - extracts 32-bit payloads from 64-bit SMC arguments
52  * @pm_arg: array of 32-bit payloads
53  * @x: array of 64-bit SMC arguments
54  */
55 #define EXTRACT_ARGS(pm_arg, x)						\
56 	for (uint32_t i = 0U; i < (PAYLOAD_ARG_CNT - 1U); i++) {	\
57 		if ((i % 2U) != 0U) {					\
58 			pm_arg[i] = lower_32_bits(x[(i / 2U) + 1U]);	\
59 		} else {						\
60 			pm_arg[i] = upper_32_bits(x[i / 2U]);		\
61 		}							\
62 	}
63 
64 /* 1 sec of wait timeout for secondary core down */
65 #define PWRDWN_WAIT_TIMEOUT	(1000U)
66 
67 /* pm_up = true - UP, pm_up = false - DOWN */
68 static bool pm_up;
69 static uint32_t sgi = (uint32_t)INVALID_SGI;
70 static bool pwrdwn_req_received;
71 
72 bool pm_pwrdwn_req_status(void)
73 {
74 	return pwrdwn_req_received;
75 }
76 
77 static void notify_os(void)
78 {
79 	plat_ic_raise_ns_sgi((int)sgi, read_mpidr_el1());
80 }
81 
82 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags,
83 				       void *handle, void *cookie)
84 {
85 	(void)id;
86 	(void)flags;
87 	(void)handle;
88 	(void)cookie;
89 	uint32_t cpu_id = plat_my_core_pos();
90 
91 	VERBOSE("Powering down CPU %d\n", cpu_id);
92 
93 	/* Deactivate CPU power down SGI */
94 	plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR);
95 
96 	return (uint64_t)psci_cpu_off();
97 }
98 
99 /**
100  * raise_pwr_down_interrupt() - Callback function to raise SGI.
101  * @mpidr: MPIDR for the target CPU.
102  *
103  * Raise SGI interrupt to trigger the CPU power down sequence on all the
104  * online secondary cores.
105  */
106 static void raise_pwr_down_interrupt(u_register_t mpidr)
107 {
108 	plat_ic_raise_el3_sgi((int)CPU_PWR_DOWN_REQ_INTR, mpidr);
109 }
110 
111 void request_cpu_pwrdwn(void)
112 {
113 	int ret;
114 
115 	VERBOSE("CPU power down request received\n");
116 
117 	/* Send powerdown request to online secondary core(s) */
118 	ret = psci_stop_other_cores(plat_my_core_pos(), PWRDWN_WAIT_TIMEOUT,
119 				    raise_pwr_down_interrupt);
120 	if (ret != PSCI_E_SUCCESS) {
121 		ERROR("Failed to powerdown secondary core(s)\n");
122 	}
123 
124 	/* Clear IPI IRQ */
125 	pm_ipi_irq_clear(primary_proc);
126 
127 	/* Deactivate IPI IRQ */
128 	plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ);
129 }
130 
131 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle,
132 				void *cookie)
133 {
134 	(void)flags;
135 	(void)handle;
136 	(void)cookie;
137 	uint32_t payload[4] = {0};
138 	enum pm_ret_status ret;
139 	uint32_t ipi_status, i;
140 
141 	VERBOSE("Received IPI FIQ from firmware\n");
142 
143 	console_flush();
144 	(void)plat_ic_acknowledge_interrupt();
145 
146 	/* Check status register for each IPI except PMC */
147 	for (i = IPI_ID_APU; i <= IPI_ID_5; i++) {
148 		ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i);
149 
150 		/* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */
151 		if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) != 0U) {
152 			plat_ic_raise_ns_sgi((int)MBOX_SGI_SHARED_IPI, read_mpidr_el1());
153 			break;
154 		}
155 	}
156 
157 	/* If PMC has not generated interrupt then end ISR */
158 	ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC);
159 	if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) == 0U) {
160 		plat_ic_end_of_interrupt(id);
161 		goto exit_label;
162 	}
163 
164 	/* Handle PMC case */
165 	ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0);
166 	if (ret != PM_RET_SUCCESS) {
167 		payload[0] = (uint32_t)ret;
168 	}
169 
170 	switch (payload[0]) {
171 	case PM_INIT_SUSPEND_CB:
172 		if (sgi != INVALID_SGI) {
173 			notify_os();
174 		}
175 		break;
176 	case PM_NOTIFY_CB:
177 		if (sgi != INVALID_SGI) {
178 			if ((payload[2] == EVENT_CPU_PWRDWN) &&
179 			    (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) {
180 				if (pwrdwn_req_received) {
181 					pwrdwn_req_received = false;
182 					request_cpu_pwrdwn();
183 					(void)psci_cpu_off();
184 					break;
185 				} else {
186 					pwrdwn_req_received = true;
187 				}
188 			}
189 			notify_os();
190 		} else {
191 			if ((payload[2] == EVENT_CPU_PWRDWN) &&
192 			    (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) {
193 				request_cpu_pwrdwn();
194 				(void)psci_cpu_off();
195 			}
196 		}
197 		break;
198 	case (uint32_t)PM_RET_ERROR_INVALID_CRC:
199 		pm_ipi_irq_clear(primary_proc);
200 		WARN("Invalid CRC in the payload\n");
201 		break;
202 
203 	default:
204 		pm_ipi_irq_clear(primary_proc);
205 		WARN("Invalid IPI payload\n");
206 		break;
207 	}
208 
209 	/* Clear FIQ */
210 	plat_ic_end_of_interrupt(id);
211 
212 exit_label:
213 	return 0;
214 }
215 
216 /**
217  * pm_register_sgi() - PM register the IPI interrupt.
218  * @sgi_num: SGI number to be used for communication.
219  * @reset: Reset to invalid SGI when reset=1.
220  *
221  * Return: On success, the initialization function must return 0.
222  *         Any other return value will cause the framework to ignore
223  *         the service.
224  *
225  * Update the SGI number to be used.
226  *
227  */
228 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset)
229 {
230 	int32_t ret = 0;
231 
232 	if (reset == 1U) {
233 		sgi = INVALID_SGI;
234 	} else if (sgi != INVALID_SGI) {
235 		ret = -EBUSY;
236 	} else if (sgi_num >= GICV3_MAX_SGI_TARGETS) {
237 		ret = -EINVAL;
238 	} else {
239 		sgi = (uint32_t)sgi_num;
240 	}
241 
242 	return ret;
243 }
244 
245 /**
246  * pm_setup() - PM service setup.
247  *
248  * Return: On success, the initialization function must return 0.
249  *         Any other return value will cause the framework to ignore
250  *         the service.
251  *
252  * Initialization functions for Versal power management for
253  * communicaton with PMC.
254  *
255  * Called from sip_svc_setup initialization function with the
256  * rt_svc_init signature.
257  *
258  */
259 int32_t pm_setup(void)
260 {
261 	int32_t ret = 0;
262 
263 	pm_ipi_init(primary_proc);
264 	pm_up = true;
265 	pwrdwn_req_received = false;
266 
267 	/* register SGI handler for CPU power down request */
268 	ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler);
269 	if (ret != 0) {
270 		WARN("BL31: registering SGI interrupt failed\n");
271 	}
272 
273 	/*
274 	 * Enable IPI IRQ
275 	 * assume the rich OS is OK to handle callback IRQs now.
276 	 * Even if we were wrong, it would not enable the IRQ in
277 	 * the GIC.
278 	 */
279 	pm_ipi_irq_enable(primary_proc);
280 
281 	ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler);
282 	if (ret != 0) {
283 		WARN("BL31: registering IPI interrupt failed\n");
284 	}
285 
286 	gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE);
287 
288 	/* Register for idle callback during force power down/restart */
289 	ret = (int32_t)pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN,
290 				   0x0U, 0x1U, SECURE_FLAG);
291 	if (ret != 0) {
292 		WARN("BL31: registering idle callback for restart/force power down failed\n");
293 	}
294 
295 	return ret;
296 }
297 
298 /**
299  * eemi_for_compatibility() - EEMI calls handler for deprecated calls.
300  * @api_id: identifier for the API being called.
301  * @pm_arg: pointer to the argument data for the API call.
302  * @handle: Pointer to caller's context structure.
303  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
304  *
305  * Return: If EEMI API found then, uintptr_t type address, else 0.
306  *
307  * Some EEMI API's use case needs to be changed in Linux driver, so they
308  * can take advantage of common EEMI handler in TF-A. As of now the old
309  * implementation of these APIs are required to maintain backward compatibility
310  * until their use case in linux driver changes.
311  *
312  */
313 static uintptr_t eemi_for_compatibility(uint32_t api_id, const uint32_t *pm_arg,
314 					void *handle, uint32_t security_flag)
315 {
316 	enum pm_ret_status ret;
317 
318 	switch (api_id) {
319 
320 	case (uint32_t)PM_FEATURE_CHECK:
321 	{
322 		uint32_t result[RET_PAYLOAD_ARG_CNT] = {0U};
323 
324 		ret = pm_feature_check(pm_arg[0], result, security_flag);
325 		SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U),
326 			 (uint64_t)result[1] | ((uint64_t)result[2] << 32U));
327 	}
328 
329 	case PM_LOAD_PDI:
330 	{
331 		ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2],
332 				  security_flag);
333 		SMC_RET1(handle, (uint64_t)ret);
334 	}
335 
336 	default:
337 		return (uintptr_t)0;
338 	}
339 }
340 
341 /**
342  * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI.
343  * @api_id: identifier for the API being called.
344  * @pm_arg: pointer to the argument data for the API call.
345  * @handle: Pointer to caller's context structure.
346  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
347  *
348  * These EEMI APIs performs CPU specific power management tasks.
349  * These EEMI APIs are invoked either from PSCI or from debugfs in kernel.
350  * These calls require CPU specific processing before sending IPI request to
351  * Platform Management Controller. For example enable/disable CPU specific
352  * interrupts. This requires separate handler for these calls and may not be
353  * handled using common eemi handler.
354  *
355  * Return: If EEMI API found then, uintptr_t type address, else 0.
356  *
357  */
358 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, const uint32_t *pm_arg,
359 					   void *handle, uint32_t security_flag)
360 {
361 	enum pm_ret_status ret;
362 
363 	switch (api_id) {
364 
365 	case (uint32_t)PM_SELF_SUSPEND:
366 		ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
367 				      pm_arg[3], security_flag);
368 		SMC_RET1(handle, (u_register_t)ret);
369 
370 	case (uint32_t)PM_FORCE_POWERDOWN:
371 		ret = pm_force_powerdown(pm_arg[0], (uint8_t)pm_arg[1], security_flag);
372 		SMC_RET1(handle, (u_register_t)ret);
373 
374 	case (uint32_t)PM_REQ_SUSPEND:
375 		ret = pm_req_suspend(pm_arg[0], (uint8_t)pm_arg[1], pm_arg[2],
376 				     pm_arg[3], security_flag);
377 		SMC_RET1(handle, (u_register_t)ret);
378 
379 	case (uint32_t)PM_ABORT_SUSPEND:
380 		ret = pm_abort_suspend((enum pm_abort_reason)pm_arg[0], security_flag);
381 		SMC_RET1(handle, (u_register_t)ret);
382 
383 	case (uint32_t)PM_SYSTEM_SHUTDOWN:
384 		ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag);
385 		SMC_RET1(handle, (u_register_t)ret);
386 
387 	default:
388 		return (uintptr_t)0;
389 	}
390 }
391 
392 /**
393  * tfa_clear_pm_state() - Reset TF-A-specific PM state.
394  *
395  * This function resets TF-A-specific state that may have been modified,
396  * such as during a kexec-based kernel reload. It resets the SGI number
397  * and the shutdown scope to its default value.
398  */
399 static enum pm_ret_status tfa_clear_pm_state(void)
400 {
401 	/* Reset SGI number to default value(-1). */
402 	sgi = (uint32_t)INVALID_SGI;
403 
404 	/* Reset the shutdown scope to its default value(system). */
405 	return pm_system_shutdown(XPM_SHUTDOWN_TYPE_SETSCOPE_ONLY, XPM_SHUTDOWN_SUBTYPE_RST_SYSTEM,
406 				  0U);
407 }
408 
409 /**
410  * TF_A_specific_handler() - SMC handler for TF-A specific functionality.
411  * @api_id: identifier for the API being called.
412  * @pm_arg: pointer to the argument data for the API call.
413  * @handle: Pointer to caller's context structure.
414  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
415  *
416  * These EEMI calls performs functionality that does not require
417  * IPI transaction. The handler ends in TF-A and returns requested data to
418  * kernel from TF-A.
419  *
420  * Return: If TF-A specific API found then, uintptr_t type address, else 0
421  *
422  */
423 static uintptr_t TF_A_specific_handler(uint32_t api_id, const uint32_t *pm_arg,
424 				       void *handle, uint32_t security_flag)
425 {
426 	switch (api_id) {
427 
428 	case TF_A_FEATURE_CHECK:
429 	{
430 		enum pm_ret_status ret;
431 		uint32_t result[PAYLOAD_ARG_CNT] = {0U};
432 
433 		ret = eemi_feature_check(pm_arg[0], result);
434 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U));
435 	}
436 
437 	case TF_A_PM_REGISTER_SGI:
438 	{
439 		int32_t ret;
440 
441 		ret = pm_register_sgi(pm_arg[0], pm_arg[1]);
442 		if (ret != 0) {
443 			SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS);
444 		}
445 
446 		SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS);
447 	}
448 
449 	case PM_GET_CALLBACK_DATA:
450 	{
451 		uint32_t result[4] = {0};
452 		enum pm_ret_status ret;
453 
454 		ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U);
455 		if (ret != PM_RET_SUCCESS) {
456 			result[0] = (uint32_t)ret;
457 		}
458 
459 		SMC_RET2(handle,
460 			(uint64_t)result[0] | ((uint64_t)result[1] << 32U),
461 			(uint64_t)result[2] | ((uint64_t)result[3] << 32U));
462 	}
463 
464 	case PM_GET_TRUSTZONE_VERSION:
465 		SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS |
466 			 ((uint64_t)TZ_VERSION << 32U));
467 
468 	case TF_A_CLEAR_PM_STATE:
469 	{
470 		enum pm_ret_status ret;
471 
472 		ret = tfa_clear_pm_state();
473 
474 		SMC_RET1(handle, (uint64_t)ret);
475 	}
476 
477 	default:
478 		return (uintptr_t)0;
479 	}
480 }
481 
482 /**
483  * eemi_handler() - Prepare EEMI payload and perform IPI transaction.
484  * @api_id: identifier for the API being called.
485  * @pm_arg: pointer to the argument data for the API call.
486  * @handle: Pointer to caller's context structure.
487  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
488  *
489  * EEMI - Embedded Energy Management Interface is Xilinx proprietary protocol
490  * to allow communication between power management controller and different
491  * processing clusters.
492  *
493  * This handler prepares EEMI protocol payload received from kernel and performs
494  * IPI transaction.
495  *
496  * Return: If EEMI API found then, uintptr_t type address, else 0
497  *
498  */
499 static uintptr_t eemi_handler(uint32_t api_id, const uint32_t *pm_arg,
500 			      void *handle, uint32_t security_flag)
501 {
502 	enum pm_ret_status ret;
503 	uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0};
504 
505 	ret = pm_handle_eemi_call(security_flag, api_id, pm_arg[0], pm_arg[1],
506 				  pm_arg[2], pm_arg[3], pm_arg[4], buf);
507 	/*
508 	 * Two IOCTLs, to get clock name and pinctrl name of pm_query_data API
509 	 * receives 5 words of respoonse from firmware. Currently linux driver can
510 	 * receive only 4 words from TF-A. So, this needs to be handled separately
511 	 * than other eemi calls.
512 	 */
513 	if (api_id == (uint32_t)PM_QUERY_DATA) {
514 		if (((pm_arg[0] == (uint32_t)XPM_QID_CLOCK_GET_NAME) ||
515 		    (pm_arg[0] == (uint32_t)XPM_QID_PINCTRL_GET_FUNCTION_NAME)) &&
516 		    (ret == PM_RET_SUCCESS)) {
517 			SMC_RET2(handle, (uint64_t)buf[0] | ((uint64_t)buf[1] << 32U),
518 				(uint64_t)buf[2] | ((uint64_t)buf[3] << 32U));
519 		}
520 	}
521 
522 	SMC_RET2(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
523 		 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U));
524 }
525 
526 /**
527  * eemi_api_handler() - Prepare EEMI payload and perform IPI transaction.
528  * @api_id: identifier for the API being called.
529  * @pm_arg: pointer to the argument data for the API call.
530  * @handle: Pointer to caller's context structure.
531  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
532  *
533  * EEMI - Embedded Energy Management Interface is AMD-Xilinx proprietary
534  * protocol to allow communication between power management controller and
535  * different processing clusters.
536  *
537  * This handler prepares EEMI protocol payload received from kernel and performs
538  * IPI transaction.
539  *
540  * Return: If EEMI API found then, uintptr_t type address, else 0
541  */
542 static uintptr_t eemi_api_handler(uint32_t api_id, const uint32_t *pm_arg,
543 				  void *handle, uint32_t security_flag)
544 {
545 	enum pm_ret_status ret;
546 	uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U};
547 	uint32_t payload[PAYLOAD_ARG_CNT] = {0U};
548 	uint32_t module_id;
549 
550 	module_id = (api_id & MODULE_ID_MASK) >> 8U;
551 
552 	PM_PACK_PAYLOAD7(payload, module_id, security_flag, api_id,
553 			 pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3],
554 			 pm_arg[4], pm_arg[5]);
555 
556 	ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf,
557 			       RET_PAYLOAD_ARG_CNT);
558 
559 	SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
560 		 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U),
561 		 (uint64_t)buf[3] | ((uint64_t)buf[4] << 32U),
562 		 (uint64_t)buf[5]);
563 }
564 
565 /**
566  * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
567  * @smc_fid: Function Identifier.
568  * @x1: SMC64 Arguments from kernel.
569  * @x2: SMC64 Arguments from kernel.
570  * @x3: SMC64 Arguments from kernel (upper 32-bits).
571  * @x4: Unused.
572  * @cookie: Unused.
573  * @handle: Pointer to caller's context structure.
574  * @flags: SECURE_FLAG or NON_SECURE_FLAG.
575  *
576  * Return: Unused.
577  *
578  * Determines that smc_fid is valid and supported PM SMC Function ID from the
579  * list of pm_api_ids, otherwise completes the request with
580  * the unknown SMC Function ID.
581  *
582  * The SMC calls for PM service are forwarded from SIP Service SMC handler
583  * function with rt_svc_handle signature.
584  *
585  */
586 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
587 			uint64_t x4, const void *cookie, void *handle, uint64_t flags)
588 {
589 	(void)cookie;
590 	uintptr_t ret;
591 	uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0};
592 	uint32_t security_flag = NON_SECURE_FLAG;
593 	uint32_t api_id;
594 	bool status = false, status_tmp = false;
595 	const uint64_t x[4] = {x1, x2, x3, x4};
596 
597 	/* Handle case where PM wasn't initialized properly */
598 	if (pm_up == false) {
599 		SMC_RET1(handle, SMC_UNK);
600 	}
601 
602 	/*
603 	 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0)
604 	 * if smc called is secure
605 	 *
606 	 * Add redundant macro call to immune the code from glitches
607 	 */
608 	SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags);
609 	if ((status != false) && (status_tmp != false)) {
610 		security_flag = SECURE_FLAG;
611 	}
612 
613 	if ((smc_fid & FUNCID_NUM_MASK) == PASS_THROUGH_FW_CMD_ID) {
614 		api_id = lower_32_bits(x[0]);
615 
616 		EXTRACT_ARGS(pm_arg, x);
617 
618 		return eemi_api_handler(api_id, pm_arg, handle, security_flag);
619 	}
620 
621 	pm_arg[0] = (uint32_t)x1;
622 	pm_arg[1] = (uint32_t)(x1 >> 32U);
623 	pm_arg[2] = (uint32_t)x2;
624 	pm_arg[3] = (uint32_t)(x2 >> 32U);
625 	pm_arg[4] = (uint32_t)x3;
626 	(void)(x4);
627 	api_id = smc_fid & FUNCID_NUM_MASK;
628 
629 	ret = eemi_for_compatibility(api_id, pm_arg, handle, security_flag);
630 	if (ret != (uintptr_t)0) {
631 		return ret;
632 	}
633 
634 	ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle,
635 					(uint32_t)flags);
636 	if (ret !=  (uintptr_t)0) {
637 		return ret;
638 	}
639 
640 	ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag);
641 	if (ret !=  (uintptr_t)0) {
642 		return ret;
643 	}
644 
645 	ret = eemi_handler(api_id, pm_arg, handle, security_flag);
646 
647 	return ret;
648 }
649