1 /*
2 * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved.
3 * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 /*
9 * Top-level SMC handler for Versal power management calls and
10 * IPI setup functions for communication with PMC.
11 */
12
13 #include <errno.h>
14 #include <stdbool.h>
15
16 #include "../drivers/arm/gic/v3/gicv3_private.h"
17
18 #include <common/ep_info.h>
19 #include <common/runtime_svc.h>
20 #include <drivers/arm/gicv3.h>
21 #include <lib/psci/psci.h>
22 #include <plat/arm/common/plat_arm.h>
23 #include <plat/common/platform.h>
24
25 #include <plat_private.h>
26 #include "pm_api_sys.h"
27 #include "pm_client.h"
28 #include "pm_ipi.h"
29 #include "pm_svc_main.h"
30
31 #define MODE 0x80000000U
32
33 #define INVALID_SGI 0xFFU
34 #define PM_INIT_SUSPEND_CB (30U)
35 #define PM_NOTIFY_CB (32U)
36 #define EVENT_CPU_PWRDWN (4U)
37 #define MBOX_SGI_SHARED_IPI (7U)
38
39 /**
40 * upper_32_bits - return bits 32-63 of a number
41 * @n: the number we're accessing
42 */
43 #define upper_32_bits(n) ((uint32_t)((n) >> 32U))
44
45 /**
46 * lower_32_bits - return bits 0-31 of a number
47 * @n: the number we're accessing
48 */
49 #define lower_32_bits(n) ((uint32_t)((n) & 0xffffffffU))
50
51 /**
52 * EXTRACT_SMC_ARGS - extracts 32-bit payloads from 64-bit SMC arguments
53 * @pm_arg: array of 32-bit payloads
54 * @x: array of 64-bit SMC arguments
55 */
56 #define EXTRACT_ARGS(pm_arg, x) \
57 for (uint32_t i = 0U; i < (PAYLOAD_ARG_CNT - 1U); i++) { \
58 if ((i % 2U) != 0U) { \
59 pm_arg[i] = lower_32_bits(x[(i / 2U) + 1U]); \
60 } else { \
61 pm_arg[i] = upper_32_bits(x[i / 2U]); \
62 } \
63 }
64
65 /* 1 sec of wait timeout for secondary core down */
66 #define PWRDWN_WAIT_TIMEOUT (1000U)
67
68 /* pm_up = true - UP, pm_up = false - DOWN */
69 static bool pm_up;
70 static uint32_t sgi = (uint32_t)INVALID_SGI;
71 static bool pwrdwn_req_received;
72
pm_pwrdwn_req_status(void)73 bool pm_pwrdwn_req_status(void)
74 {
75 return pwrdwn_req_received;
76 }
77
notify_os(void)78 static void notify_os(void)
79 {
80 plat_ic_raise_ns_sgi((int)sgi, read_mpidr_el1());
81 }
82
cpu_pwrdwn_req_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)83 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags,
84 void *handle, void *cookie)
85 {
86 (void)id;
87 (void)flags;
88 (void)handle;
89 (void)cookie;
90 uint32_t cpu_id = plat_my_core_pos();
91
92 VERBOSE("Powering down CPU %d\n", cpu_id);
93
94 /* Deactivate CPU power down SGI */
95 plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR);
96
97 return (uint64_t)psci_cpu_off();
98 }
99
100 /**
101 * raise_pwr_down_interrupt() - Callback function to raise SGI.
102 * @mpidr: MPIDR for the target CPU.
103 *
104 * Raise SGI interrupt to trigger the CPU power down sequence on all the
105 * online secondary cores.
106 */
raise_pwr_down_interrupt(u_register_t mpidr)107 static void raise_pwr_down_interrupt(u_register_t mpidr)
108 {
109 plat_ic_raise_el3_sgi((int)CPU_PWR_DOWN_REQ_INTR, mpidr);
110 }
111
request_cpu_pwrdwn(void)112 void request_cpu_pwrdwn(void)
113 {
114 int ret;
115
116 VERBOSE("CPU power down request received\n");
117
118 /* Send powerdown request to online secondary core(s) */
119 ret = psci_stop_other_cores(plat_my_core_pos(), PWRDWN_WAIT_TIMEOUT,
120 raise_pwr_down_interrupt);
121 if (ret != PSCI_E_SUCCESS) {
122 ERROR("Failed to powerdown secondary core(s)\n");
123 }
124
125 /* Clear IPI IRQ */
126 pm_ipi_irq_clear(primary_proc);
127
128 /* Deactivate IPI IRQ */
129 plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ);
130 }
131
ipi_fiq_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)132 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle,
133 void *cookie)
134 {
135 (void)flags;
136 (void)handle;
137 (void)cookie;
138 uint32_t payload[4] = {0};
139 enum pm_ret_status ret;
140 uint32_t ipi_status, i;
141
142 VERBOSE("Received IPI FIQ from firmware\n");
143
144 console_flush();
145 (void)plat_ic_acknowledge_interrupt();
146
147 /* Check status register for each IPI except PMC */
148 for (i = IPI_ID_APU; i <= IPI_ID_5; i++) {
149 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i);
150
151 /* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */
152 if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) != 0U) {
153 plat_ic_raise_ns_sgi((int)MBOX_SGI_SHARED_IPI, read_mpidr_el1());
154 break;
155 }
156 }
157
158 /* If PMC has not generated interrupt then end ISR */
159 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC);
160 if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) == 0U) {
161 plat_ic_end_of_interrupt(id);
162 goto exit_label;
163 }
164
165 /* Handle PMC case */
166 ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0);
167 if (ret != PM_RET_SUCCESS) {
168 payload[0] = (uint32_t)ret;
169 }
170
171 switch (payload[0]) {
172 case PM_INIT_SUSPEND_CB:
173 if (sgi != INVALID_SGI) {
174 notify_os();
175 }
176 break;
177 case PM_NOTIFY_CB:
178 if (sgi != INVALID_SGI) {
179 if ((payload[2] == EVENT_CPU_PWRDWN) &&
180 (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) {
181 if (pwrdwn_req_received) {
182 pwrdwn_req_received = false;
183 request_cpu_pwrdwn();
184 (void)psci_cpu_off();
185 break;
186 } else {
187 pwrdwn_req_received = true;
188 }
189 }
190 notify_os();
191 } else {
192 if ((payload[2] == EVENT_CPU_PWRDWN) &&
193 (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) {
194 request_cpu_pwrdwn();
195 (void)psci_cpu_off();
196 }
197 }
198 break;
199 case (uint32_t)PM_RET_ERROR_INVALID_CRC:
200 pm_ipi_irq_clear(primary_proc);
201 WARN("Invalid CRC in the payload\n");
202 break;
203
204 default:
205 pm_ipi_irq_clear(primary_proc);
206 WARN("Invalid IPI payload\n");
207 break;
208 }
209
210 /* Clear FIQ */
211 plat_ic_end_of_interrupt(id);
212
213 exit_label:
214 return 0;
215 }
216
217 /**
218 * pm_register_sgi() - PM register the IPI interrupt.
219 * @sgi_num: SGI number to be used for communication.
220 * @reset: Reset to invalid SGI when reset=1.
221 *
222 * Return: On success, the initialization function must return 0.
223 * Any other return value will cause the framework to ignore
224 * the service.
225 *
226 * Update the SGI number to be used.
227 *
228 */
pm_register_sgi(uint32_t sgi_num,uint32_t reset)229 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset)
230 {
231 int32_t ret = 0;
232
233 if (reset == 1U) {
234 sgi = INVALID_SGI;
235 } else if (sgi != INVALID_SGI) {
236 ret = -EBUSY;
237 } else if (sgi_num >= GICV3_MAX_SGI_TARGETS) {
238 ret = -EINVAL;
239 } else {
240 sgi = (uint32_t)sgi_num;
241 }
242
243 return ret;
244 }
245
246 /**
247 * pm_setup() - PM service setup.
248 *
249 * Return: On success, the initialization function must return 0.
250 * Any other return value will cause the framework to ignore
251 * the service.
252 *
253 * Initialization functions for Versal power management for
254 * communicaton with PMC.
255 *
256 * Called from sip_svc_setup initialization function with the
257 * rt_svc_init signature.
258 *
259 */
pm_setup(void)260 int32_t pm_setup(void)
261 {
262 int32_t ret = 0;
263
264 pm_ipi_init(primary_proc);
265 pm_up = true;
266 pwrdwn_req_received = false;
267
268 /* register SGI handler for CPU power down request */
269 ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler);
270 if (ret != 0) {
271 WARN("BL31: registering SGI interrupt failed\n");
272 }
273
274 /*
275 * Enable IPI IRQ
276 * assume the rich OS is OK to handle callback IRQs now.
277 * Even if we were wrong, it would not enable the IRQ in
278 * the GIC.
279 */
280 pm_ipi_irq_enable(primary_proc);
281
282 ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler);
283 if (ret != 0) {
284 WARN("BL31: registering IPI interrupt failed\n");
285 }
286
287 gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE);
288
289 /* Register for idle callback during force power down/restart */
290 ret = (int32_t)pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN,
291 0x0U, 0x1U, SECURE);
292 if (ret != 0) {
293 WARN("BL31: registering idle callback for restart/force power down failed\n");
294 }
295
296 return ret;
297 }
298
299 /**
300 * eemi_for_compatibility() - EEMI calls handler for deprecated calls.
301 * @api_id: identifier for the API being called.
302 * @pm_arg: pointer to the argument data for the API call.
303 * @handle: Pointer to caller's context structure.
304 * @security_flag: SECURE or NON_SECURE.
305 *
306 * Return: If EEMI API found then, uintptr_t type address, else 0.
307 *
308 * Some EEMI API's use case needs to be changed in Linux driver, so they
309 * can take advantage of common EEMI handler in TF-A. As of now the old
310 * implementation of these APIs are required to maintain backward compatibility
311 * until their use case in linux driver changes.
312 *
313 */
eemi_for_compatibility(uint32_t api_id,const uint32_t * pm_arg,void * handle,uint32_t security_flag)314 static uintptr_t eemi_for_compatibility(uint32_t api_id, const uint32_t *pm_arg,
315 void *handle, uint32_t security_flag)
316 {
317 enum pm_ret_status ret;
318
319 switch (api_id) {
320
321 case (uint32_t)PM_FEATURE_CHECK:
322 {
323 uint32_t result[RET_PAYLOAD_ARG_CNT] = {0U};
324
325 ret = pm_feature_check(pm_arg[0], result, security_flag);
326 SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U),
327 (uint64_t)result[1] | ((uint64_t)result[2] << 32U));
328 }
329
330 case PM_LOAD_PDI:
331 {
332 ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2],
333 security_flag);
334 SMC_RET1(handle, (uint64_t)ret);
335 }
336
337 default:
338 return (uintptr_t)0;
339 }
340 }
341
342 /**
343 * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI.
344 * @api_id: identifier for the API being called.
345 * @pm_arg: pointer to the argument data for the API call.
346 * @handle: Pointer to caller's context structure.
347 * @security_flag: SECURE or NON_SECURE.
348 *
349 * These EEMI APIs performs CPU specific power management tasks.
350 * These EEMI APIs are invoked either from PSCI or from debugfs in kernel.
351 * These calls require CPU specific processing before sending IPI request to
352 * Platform Management Controller. For example enable/disable CPU specific
353 * interrupts. This requires separate handler for these calls and may not be
354 * handled using common eemi handler.
355 *
356 * Return: If EEMI API found then, uintptr_t type address, else 0.
357 *
358 */
eemi_psci_debugfs_handler(uint32_t api_id,const uint32_t * pm_arg,void * handle,uint32_t security_flag)359 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, const uint32_t *pm_arg,
360 void *handle, uint32_t security_flag)
361 {
362 enum pm_ret_status ret;
363
364 switch (api_id) {
365
366 case (uint32_t)PM_SELF_SUSPEND:
367 ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
368 pm_arg[3], security_flag);
369 SMC_RET1(handle, (u_register_t)ret);
370
371 case (uint32_t)PM_FORCE_POWERDOWN:
372 ret = pm_force_powerdown(pm_arg[0], (uint8_t)pm_arg[1], security_flag);
373 SMC_RET1(handle, (u_register_t)ret);
374
375 case (uint32_t)PM_SYSTEM_SHUTDOWN:
376 ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag);
377 SMC_RET1(handle, (u_register_t)ret);
378
379 default:
380 return (uintptr_t)0;
381 }
382 }
383
384 /**
385 * tfa_clear_pm_state() - Reset TF-A-specific PM state.
386 *
387 * This function resets TF-A-specific state that may have been modified,
388 * such as during a kexec-based kernel reload. It resets the SGI number
389 * and the shutdown scope to its default value.
390 */
tfa_clear_pm_state(void)391 static enum pm_ret_status tfa_clear_pm_state(void)
392 {
393 /* Reset SGI number to default value(-1). */
394 sgi = (uint32_t)INVALID_SGI;
395
396 /* Reset the shutdown scope to its default value(system). */
397 return pm_system_shutdown(XPM_SHUTDOWN_TYPE_SETSCOPE_ONLY, XPM_SHUTDOWN_SUBTYPE_RST_SYSTEM,
398 0U);
399 }
400
401 /**
402 * TF_A_specific_handler() - SMC handler for TF-A specific functionality.
403 * @api_id: identifier for the API being called.
404 * @pm_arg: pointer to the argument data for the API call.
405 * @handle: Pointer to caller's context structure.
406 * @security_flag: SECURE or NON_SECURE.
407 *
408 * These EEMI calls performs functionality that does not require
409 * IPI transaction. The handler ends in TF-A and returns requested data to
410 * kernel from TF-A.
411 *
412 * Return: If TF-A specific API found then, uintptr_t type address, else 0
413 *
414 */
TF_A_specific_handler(uint32_t api_id,const uint32_t * pm_arg,void * handle,uint32_t security_flag)415 static uintptr_t TF_A_specific_handler(uint32_t api_id, const uint32_t *pm_arg,
416 void *handle, uint32_t security_flag)
417 {
418 switch (api_id) {
419
420 case TF_A_FEATURE_CHECK:
421 {
422 enum pm_ret_status ret;
423 uint32_t result[PAYLOAD_ARG_CNT] = {0U};
424
425 ret = tfa_api_feature_check(pm_arg[0], result);
426 SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U));
427 }
428
429 case TF_A_PM_REGISTER_SGI:
430 {
431 int32_t ret;
432
433 ret = pm_register_sgi(pm_arg[0], pm_arg[1]);
434 if (ret != 0) {
435 SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS);
436 }
437
438 SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS);
439 }
440
441 case PM_GET_CALLBACK_DATA:
442 {
443 uint32_t result[4] = {0};
444 enum pm_ret_status ret;
445
446 ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U);
447 if (ret != PM_RET_SUCCESS) {
448 result[0] = (uint32_t)ret;
449 }
450
451 SMC_RET2(handle,
452 (uint64_t)result[0] | ((uint64_t)result[1] << 32U),
453 (uint64_t)result[2] | ((uint64_t)result[3] << 32U));
454 }
455
456 case PM_GET_TRUSTZONE_VERSION:
457 SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS |
458 ((uint64_t)TZ_VERSION << 32U));
459
460 case TF_A_CLEAR_PM_STATE:
461 {
462 enum pm_ret_status ret;
463
464 ret = tfa_clear_pm_state();
465
466 SMC_RET1(handle, (uint64_t)ret);
467 }
468
469 default:
470 return (uintptr_t)0;
471 }
472 }
473
474 /**
475 * eemi_handler() - Prepare EEMI payload and perform IPI transaction.
476 * @api_id: identifier for the API being called.
477 * @pm_arg: pointer to the argument data for the API call.
478 * @handle: Pointer to caller's context structure.
479 * @security_flag: SECURE or NON_SECURE.
480 *
481 * EEMI - Embedded Energy Management Interface is Xilinx proprietary protocol
482 * to allow communication between power management controller and different
483 * processing clusters.
484 *
485 * This handler prepares EEMI protocol payload received from kernel and performs
486 * IPI transaction.
487 *
488 * Return: If EEMI API found then, uintptr_t type address, else 0
489 *
490 */
eemi_handler(uint32_t api_id,const uint32_t * pm_arg,void * handle,uint32_t security_flag)491 static uintptr_t eemi_handler(uint32_t api_id, const uint32_t *pm_arg,
492 void *handle, uint32_t security_flag)
493 {
494 enum pm_ret_status ret;
495 uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0};
496
497 ret = pm_handle_eemi_call(security_flag, api_id, pm_arg[0], pm_arg[1],
498 pm_arg[2], pm_arg[3], pm_arg[4], buf);
499 /*
500 * Two IOCTLs, to get clock name and pinctrl name of pm_query_data API
501 * receives 5 words of respoonse from firmware. Currently linux driver can
502 * receive only 4 words from TF-A. So, this needs to be handled separately
503 * than other eemi calls.
504 */
505 if (api_id == (uint32_t)PM_QUERY_DATA) {
506 if (((pm_arg[0] == (uint32_t)XPM_QID_CLOCK_GET_NAME) ||
507 (pm_arg[0] == (uint32_t)XPM_QID_PINCTRL_GET_FUNCTION_NAME)) &&
508 (ret == PM_RET_SUCCESS)) {
509 SMC_RET2(handle, (uint64_t)buf[0] | ((uint64_t)buf[1] << 32U),
510 (uint64_t)buf[2] | ((uint64_t)buf[3] << 32U));
511 }
512 }
513
514 SMC_RET2(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
515 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U));
516 }
517
518 /**
519 * eemi_api_handler() - Prepare EEMI payload and perform IPI transaction.
520 * @api_id: identifier for the API being called.
521 * @pm_arg: pointer to the argument data for the API call.
522 * @handle: Pointer to caller's context structure.
523 * @security_flag: SECURE or NON_SECURE.
524 *
525 * EEMI - Embedded Energy Management Interface is AMD-Xilinx proprietary
526 * protocol to allow communication between power management controller and
527 * different processing clusters.
528 *
529 * This handler prepares EEMI protocol payload received from kernel and performs
530 * IPI transaction.
531 *
532 * Return: If EEMI API found then, uintptr_t type address, else 0
533 */
eemi_api_handler(uint32_t api_id,const uint32_t * pm_arg,void * handle,uint32_t security_flag)534 static uintptr_t eemi_api_handler(uint32_t api_id, const uint32_t *pm_arg,
535 void *handle, uint32_t security_flag)
536 {
537 enum pm_ret_status ret;
538 uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U};
539 uint32_t payload[PAYLOAD_ARG_CNT] = {0U};
540 uint32_t module_id;
541
542 module_id = (api_id & MODULE_ID_MASK) >> 8U;
543
544 PM_PACK_PAYLOAD7(payload, module_id, security_flag, api_id,
545 pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3],
546 pm_arg[4], pm_arg[5]);
547
548 ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf,
549 RET_PAYLOAD_ARG_CNT);
550
551 SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
552 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U),
553 (uint64_t)buf[3] | ((uint64_t)buf[4] << 32U),
554 (uint64_t)buf[5]);
555 }
556
557 /**
558 * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
559 * @smc_fid: Function Identifier.
560 * @x1: SMC64 Arguments from kernel.
561 * @x2: SMC64 Arguments from kernel.
562 * @x3: SMC64 Arguments from kernel (upper 32-bits).
563 * @x4: Unused.
564 * @cookie: Unused.
565 * @handle: Pointer to caller's context structure.
566 * @flags: SECURE or NON_SECURE.
567 *
568 * Return: Unused.
569 *
570 * Determines that smc_fid is valid and supported PM SMC Function ID from the
571 * list of pm_api_ids, otherwise completes the request with
572 * the unknown SMC Function ID.
573 *
574 * The SMC calls for PM service are forwarded from SIP Service SMC handler
575 * function with rt_svc_handle signature.
576 *
577 */
pm_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,const void * cookie,void * handle,uint64_t flags)578 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
579 uint64_t x4, const void *cookie, void *handle, uint64_t flags)
580 {
581 (void)cookie;
582 uintptr_t ret;
583 uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0};
584 uint32_t security_flag = NON_SECURE;
585 uint32_t api_id;
586 bool status = false, status_tmp = false;
587 const uint64_t x[4] = {x1, x2, x3, x4};
588
589 /* Handle case where PM wasn't initialized properly */
590 if (pm_up == false) {
591 SMC_RET1(handle, SMC_UNK);
592 }
593
594 /*
595 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0)
596 * if smc called is secure
597 *
598 * Add redundant macro call to immune the code from glitches
599 */
600 SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags);
601 if ((status != false) && (status_tmp != false)) {
602 security_flag = SECURE;
603 }
604
605 if ((smc_fid & FUNCID_NUM_MASK) == PASS_THROUGH_FW_CMD_ID) {
606 api_id = lower_32_bits(x[0]);
607
608 EXTRACT_ARGS(pm_arg, x);
609
610 return eemi_api_handler(api_id, pm_arg, handle, security_flag);
611 }
612
613 pm_arg[0] = (uint32_t)x1;
614 pm_arg[1] = (uint32_t)(x1 >> 32U);
615 pm_arg[2] = (uint32_t)x2;
616 pm_arg[3] = (uint32_t)(x2 >> 32U);
617 pm_arg[4] = (uint32_t)x3;
618 (void)(x4);
619 api_id = smc_fid & FUNCID_NUM_MASK;
620
621 ret = eemi_for_compatibility(api_id, pm_arg, handle, security_flag);
622 if (ret != (uintptr_t)0) {
623 return ret;
624 }
625
626 ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle,
627 (uint32_t)flags);
628 if (ret != (uintptr_t)0) {
629 return ret;
630 }
631
632 ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag);
633 if (ret != (uintptr_t)0) {
634 return ret;
635 }
636
637 ret = eemi_handler(api_id, pm_arg, handle, security_flag);
638
639 return ret;
640 }
641