xref: /rk3399_ARM-atf/plat/xilinx/common/pm_service/pm_svc_main.c (revision 06f3c7058c42a9f1a9f7df75ea2de71a000855e8)
1 /*
2  * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved.
3  * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 /*
9  * Top-level SMC handler for Versal power management calls and
10  * IPI setup functions for communication with PMC.
11  */
12 
13 #include <errno.h>
14 #include <stdbool.h>
15 
16 #include "../drivers/arm/gic/v3/gicv3_private.h"
17 
18 #include <common/runtime_svc.h>
19 #include <drivers/arm/gicv3.h>
20 #include <lib/psci/psci.h>
21 #include <plat/arm/common/plat_arm.h>
22 #include <plat/common/platform.h>
23 
24 #include <plat_private.h>
25 #include "pm_api_sys.h"
26 #include "pm_client.h"
27 #include "pm_ipi.h"
28 #include "pm_svc_main.h"
29 
30 #define MODE				0x80000000U
31 
32 #define INVALID_SGI    0xFFU
33 #define PM_INIT_SUSPEND_CB	(30U)
34 #define PM_NOTIFY_CB		(32U)
35 #define EVENT_CPU_PWRDWN	(4U)
36 #define MBOX_SGI_SHARED_IPI	(7U)
37 
38 /**
39  * upper_32_bits - return bits 32-63 of a number
40  * @n: the number we're accessing
41  */
42 #define upper_32_bits(n)	((uint32_t)((n) >> 32U))
43 
44 /**
45  * lower_32_bits - return bits 0-31 of a number
46  * @n: the number we're accessing
47  */
48 #define lower_32_bits(n)	((uint32_t)((n) & 0xffffffffU))
49 
50 /**
51  * EXTRACT_SMC_ARGS - extracts 32-bit payloads from 64-bit SMC arguments
52  * @pm_arg: array of 32-bit payloads
53  * @x: array of 64-bit SMC arguments
54  */
55 #define EXTRACT_ARGS(pm_arg, x)						\
56 	for (uint32_t i = 0U; i < (PAYLOAD_ARG_CNT - 1U); i++) {	\
57 		if ((i % 2U) != 0U) {					\
58 			pm_arg[i] = lower_32_bits(x[(i / 2U) + 1U]);	\
59 		} else {						\
60 			pm_arg[i] = upper_32_bits(x[i / 2U]);		\
61 		}							\
62 	}
63 
64 /* 1 sec of wait timeout for secondary core down */
65 #define PWRDWN_WAIT_TIMEOUT	(1000U)
66 
67 /* pm_up = true - UP, pm_up = false - DOWN */
68 static bool pm_up;
69 static uint32_t sgi = (uint32_t)INVALID_SGI;
70 bool pwrdwn_req_received;
71 
72 static void notify_os(void)
73 {
74 	plat_ic_raise_ns_sgi((int)sgi, read_mpidr_el1());
75 }
76 
77 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags,
78 				       void *handle, void *cookie)
79 {
80 	(void)id;
81 	(void)flags;
82 	(void)handle;
83 	(void)cookie;
84 	uint32_t cpu_id = plat_my_core_pos();
85 
86 	VERBOSE("Powering down CPU %d\n", cpu_id);
87 
88 	/* Deactivate CPU power down SGI */
89 	plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR);
90 
91 	return (uint64_t)psci_cpu_off();
92 }
93 
94 /**
95  * raise_pwr_down_interrupt() - Callback function to raise SGI.
96  * @mpidr: MPIDR for the target CPU.
97  *
98  * Raise SGI interrupt to trigger the CPU power down sequence on all the
99  * online secondary cores.
100  */
101 static void raise_pwr_down_interrupt(u_register_t mpidr)
102 {
103 	plat_ic_raise_el3_sgi((int)CPU_PWR_DOWN_REQ_INTR, mpidr);
104 }
105 
106 void request_cpu_pwrdwn(void)
107 {
108 	int ret;
109 
110 	VERBOSE("CPU power down request received\n");
111 
112 	/* Send powerdown request to online secondary core(s) */
113 	ret = psci_stop_other_cores(plat_my_core_pos(), PWRDWN_WAIT_TIMEOUT,
114 				    raise_pwr_down_interrupt);
115 	if (ret != PSCI_E_SUCCESS) {
116 		ERROR("Failed to powerdown secondary core(s)\n");
117 	}
118 
119 	/* Clear IPI IRQ */
120 	pm_ipi_irq_clear(primary_proc);
121 
122 	/* Deactivate IPI IRQ */
123 	plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ);
124 }
125 
126 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle,
127 				void *cookie)
128 {
129 	(void)flags;
130 	(void)handle;
131 	(void)cookie;
132 	uint32_t payload[4] = {0};
133 	enum pm_ret_status ret;
134 	uint32_t ipi_status, i;
135 
136 	VERBOSE("Received IPI FIQ from firmware\n");
137 
138 	console_flush();
139 	(void)plat_ic_acknowledge_interrupt();
140 
141 	/* Check status register for each IPI except PMC */
142 	for (i = IPI_ID_APU; i <= IPI_ID_5; i++) {
143 		ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i);
144 
145 		/* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */
146 		if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) != 0U) {
147 			plat_ic_raise_ns_sgi((int)MBOX_SGI_SHARED_IPI, read_mpidr_el1());
148 			break;
149 		}
150 	}
151 
152 	/* If PMC has not generated interrupt then end ISR */
153 	ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC);
154 	if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) == 0U) {
155 		plat_ic_end_of_interrupt(id);
156 		goto exit_label;
157 	}
158 
159 	/* Handle PMC case */
160 	ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0);
161 	if (ret != PM_RET_SUCCESS) {
162 		payload[0] = (uint32_t)ret;
163 	}
164 
165 	switch (payload[0]) {
166 	case PM_INIT_SUSPEND_CB:
167 		if (sgi != INVALID_SGI) {
168 			notify_os();
169 		}
170 		break;
171 	case PM_NOTIFY_CB:
172 		if (sgi != INVALID_SGI) {
173 			if (payload[2] == EVENT_CPU_PWRDWN) {
174 				if (pwrdwn_req_received) {
175 					pwrdwn_req_received = false;
176 					request_cpu_pwrdwn();
177 					(void)psci_cpu_off();
178 					break;
179 				} else {
180 					pwrdwn_req_received = true;
181 				}
182 			}
183 			notify_os();
184 		} else {
185 			if (payload[2] == EVENT_CPU_PWRDWN) {
186 				request_cpu_pwrdwn();
187 				(void)psci_cpu_off();
188 			}
189 		}
190 		break;
191 	case (uint32_t)PM_RET_ERROR_INVALID_CRC:
192 		pm_ipi_irq_clear(primary_proc);
193 		WARN("Invalid CRC in the payload\n");
194 		break;
195 
196 	default:
197 		pm_ipi_irq_clear(primary_proc);
198 		WARN("Invalid IPI payload\n");
199 		break;
200 	}
201 
202 	/* Clear FIQ */
203 	plat_ic_end_of_interrupt(id);
204 
205 exit_label:
206 	return 0;
207 }
208 
209 /**
210  * pm_register_sgi() - PM register the IPI interrupt.
211  * @sgi_num: SGI number to be used for communication.
212  * @reset: Reset to invalid SGI when reset=1.
213  *
214  * Return: On success, the initialization function must return 0.
215  *         Any other return value will cause the framework to ignore
216  *         the service.
217  *
218  * Update the SGI number to be used.
219  *
220  */
221 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset)
222 {
223 	int32_t ret = 0;
224 
225 	if (reset == 1U) {
226 		sgi = INVALID_SGI;
227 	} else if (sgi != INVALID_SGI) {
228 		ret = -EBUSY;
229 	} else if (sgi_num >= GICV3_MAX_SGI_TARGETS) {
230 		ret = -EINVAL;
231 	} else {
232 		sgi = (uint32_t)sgi_num;
233 	}
234 
235 	return ret;
236 }
237 
238 /**
239  * pm_setup() - PM service setup.
240  *
241  * Return: On success, the initialization function must return 0.
242  *         Any other return value will cause the framework to ignore
243  *         the service.
244  *
245  * Initialization functions for Versal power management for
246  * communicaton with PMC.
247  *
248  * Called from sip_svc_setup initialization function with the
249  * rt_svc_init signature.
250  *
251  */
252 int32_t pm_setup(void)
253 {
254 	int32_t ret = 0;
255 
256 	pm_ipi_init(primary_proc);
257 	pm_up = true;
258 
259 	/* register SGI handler for CPU power down request */
260 	ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler);
261 	if (ret != 0) {
262 		WARN("BL31: registering SGI interrupt failed\n");
263 	}
264 
265 	/*
266 	 * Enable IPI IRQ
267 	 * assume the rich OS is OK to handle callback IRQs now.
268 	 * Even if we were wrong, it would not enable the IRQ in
269 	 * the GIC.
270 	 */
271 	pm_ipi_irq_enable(primary_proc);
272 
273 	ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler);
274 	if (ret != 0) {
275 		WARN("BL31: registering IPI interrupt failed\n");
276 	}
277 
278 	gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE);
279 
280 	/* Register for idle callback during force power down/restart */
281 	ret = (int32_t)pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN,
282 				   0x0U, 0x1U, SECURE_FLAG);
283 	if (ret != 0) {
284 		WARN("BL31: registering idle callback for restart/force power down failed\n");
285 	}
286 
287 	return ret;
288 }
289 
290 /**
291  * eemi_for_compatibility() - EEMI calls handler for deprecated calls.
292  * @api_id: identifier for the API being called.
293  * @pm_arg: pointer to the argument data for the API call.
294  * @handle: Pointer to caller's context structure.
295  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
296  *
297  * Return: If EEMI API found then, uintptr_t type address, else 0.
298  *
299  * Some EEMI API's use case needs to be changed in Linux driver, so they
300  * can take advantage of common EEMI handler in TF-A. As of now the old
301  * implementation of these APIs are required to maintain backward compatibility
302  * until their use case in linux driver changes.
303  *
304  */
305 static uintptr_t eemi_for_compatibility(uint32_t api_id, const uint32_t *pm_arg,
306 					void *handle, uint32_t security_flag)
307 {
308 	enum pm_ret_status ret;
309 
310 	switch (api_id) {
311 
312 	case (uint32_t)PM_FEATURE_CHECK:
313 	{
314 		uint32_t result[RET_PAYLOAD_ARG_CNT] = {0U};
315 
316 		ret = pm_feature_check(pm_arg[0], result, security_flag);
317 		SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U),
318 			 (uint64_t)result[1] | ((uint64_t)result[2] << 32U));
319 	}
320 
321 	case PM_LOAD_PDI:
322 	{
323 		ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2],
324 				  security_flag);
325 		SMC_RET1(handle, (uint64_t)ret);
326 	}
327 
328 	default:
329 		return (uintptr_t)0;
330 	}
331 }
332 
333 /**
334  * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI.
335  * @api_id: identifier for the API being called.
336  * @pm_arg: pointer to the argument data for the API call.
337  * @handle: Pointer to caller's context structure.
338  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
339  *
340  * These EEMI APIs performs CPU specific power management tasks.
341  * These EEMI APIs are invoked either from PSCI or from debugfs in kernel.
342  * These calls require CPU specific processing before sending IPI request to
343  * Platform Management Controller. For example enable/disable CPU specific
344  * interrupts. This requires separate handler for these calls and may not be
345  * handled using common eemi handler.
346  *
347  * Return: If EEMI API found then, uintptr_t type address, else 0.
348  *
349  */
350 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, const uint32_t *pm_arg,
351 					   void *handle, uint32_t security_flag)
352 {
353 	enum pm_ret_status ret;
354 
355 	switch (api_id) {
356 
357 	case (uint32_t)PM_SELF_SUSPEND:
358 		ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
359 				      pm_arg[3], security_flag);
360 		SMC_RET1(handle, (u_register_t)ret);
361 
362 	case (uint32_t)PM_FORCE_POWERDOWN:
363 		ret = pm_force_powerdown(pm_arg[0], (uint8_t)pm_arg[1], security_flag);
364 		SMC_RET1(handle, (u_register_t)ret);
365 
366 	case (uint32_t)PM_REQ_SUSPEND:
367 		ret = pm_req_suspend(pm_arg[0], (uint8_t)pm_arg[1], pm_arg[2],
368 				     pm_arg[3], security_flag);
369 		SMC_RET1(handle, (u_register_t)ret);
370 
371 	case (uint32_t)PM_ABORT_SUSPEND:
372 		ret = pm_abort_suspend((enum pm_abort_reason)pm_arg[0], security_flag);
373 		SMC_RET1(handle, (u_register_t)ret);
374 
375 	case (uint32_t)PM_SYSTEM_SHUTDOWN:
376 		ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag);
377 		SMC_RET1(handle, (u_register_t)ret);
378 
379 	default:
380 		return (uintptr_t)0;
381 	}
382 }
383 
384 /**
385  * TF_A_specific_handler() - SMC handler for TF-A specific functionality.
386  * @api_id: identifier for the API being called.
387  * @pm_arg: pointer to the argument data for the API call.
388  * @handle: Pointer to caller's context structure.
389  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
390  *
391  * These EEMI calls performs functionality that does not require
392  * IPI transaction. The handler ends in TF-A and returns requested data to
393  * kernel from TF-A.
394  *
395  * Return: If TF-A specific API found then, uintptr_t type address, else 0
396  *
397  */
398 static uintptr_t TF_A_specific_handler(uint32_t api_id, const uint32_t *pm_arg,
399 				       void *handle, uint32_t security_flag)
400 {
401 	switch (api_id) {
402 
403 	case TF_A_FEATURE_CHECK:
404 	{
405 		enum pm_ret_status ret;
406 		uint32_t result[PAYLOAD_ARG_CNT] = {0U};
407 
408 		ret = eemi_feature_check(pm_arg[0], result);
409 		SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U));
410 	}
411 
412 	case TF_A_PM_REGISTER_SGI:
413 	{
414 		int32_t ret;
415 
416 		ret = pm_register_sgi(pm_arg[0], pm_arg[1]);
417 		if (ret != 0) {
418 			SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS);
419 		}
420 
421 		SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS);
422 	}
423 
424 	case PM_GET_CALLBACK_DATA:
425 	{
426 		uint32_t result[4] = {0};
427 		enum pm_ret_status ret;
428 
429 		ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U);
430 		if (ret != PM_RET_SUCCESS) {
431 			result[0] = (uint32_t)ret;
432 		}
433 
434 		SMC_RET2(handle,
435 			(uint64_t)result[0] | ((uint64_t)result[1] << 32U),
436 			(uint64_t)result[2] | ((uint64_t)result[3] << 32U));
437 	}
438 
439 	case PM_GET_TRUSTZONE_VERSION:
440 		SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS |
441 			 ((uint64_t)TZ_VERSION << 32U));
442 
443 	default:
444 		return (uintptr_t)0;
445 	}
446 }
447 
448 /**
449  * eemi_handler() - Prepare EEMI payload and perform IPI transaction.
450  * @api_id: identifier for the API being called.
451  * @pm_arg: pointer to the argument data for the API call.
452  * @handle: Pointer to caller's context structure.
453  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
454  *
455  * EEMI - Embedded Energy Management Interface is Xilinx proprietary protocol
456  * to allow communication between power management controller and different
457  * processing clusters.
458  *
459  * This handler prepares EEMI protocol payload received from kernel and performs
460  * IPI transaction.
461  *
462  * Return: If EEMI API found then, uintptr_t type address, else 0
463  *
464  */
465 static uintptr_t eemi_handler(uint32_t api_id, const uint32_t *pm_arg,
466 			      void *handle, uint32_t security_flag)
467 {
468 	enum pm_ret_status ret;
469 	uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0};
470 
471 	ret = pm_handle_eemi_call(security_flag, api_id, pm_arg[0], pm_arg[1],
472 				  pm_arg[2], pm_arg[3], pm_arg[4], buf);
473 	/*
474 	 * Two IOCTLs, to get clock name and pinctrl name of pm_query_data API
475 	 * receives 5 words of respoonse from firmware. Currently linux driver can
476 	 * receive only 4 words from TF-A. So, this needs to be handled separately
477 	 * than other eemi calls.
478 	 */
479 	if (api_id == (uint32_t)PM_QUERY_DATA) {
480 		if (((pm_arg[0] == (uint32_t)XPM_QID_CLOCK_GET_NAME) ||
481 		    (pm_arg[0] == (uint32_t)XPM_QID_PINCTRL_GET_FUNCTION_NAME)) &&
482 		    (ret == PM_RET_SUCCESS)) {
483 			SMC_RET2(handle, (uint64_t)buf[0] | ((uint64_t)buf[1] << 32U),
484 				(uint64_t)buf[2] | ((uint64_t)buf[3] << 32U));
485 		}
486 	}
487 
488 	SMC_RET2(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
489 		 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U));
490 }
491 
492 /**
493  * eemi_api_handler() - Prepare EEMI payload and perform IPI transaction.
494  * @api_id: identifier for the API being called.
495  * @pm_arg: pointer to the argument data for the API call.
496  * @handle: Pointer to caller's context structure.
497  * @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
498  *
499  * EEMI - Embedded Energy Management Interface is AMD-Xilinx proprietary
500  * protocol to allow communication between power management controller and
501  * different processing clusters.
502  *
503  * This handler prepares EEMI protocol payload received from kernel and performs
504  * IPI transaction.
505  *
506  * Return: If EEMI API found then, uintptr_t type address, else 0
507  */
508 static uintptr_t eemi_api_handler(uint32_t api_id, const uint32_t *pm_arg,
509 				  void *handle, uint32_t security_flag)
510 {
511 	enum pm_ret_status ret;
512 	uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U};
513 	uint32_t payload[PAYLOAD_ARG_CNT] = {0U};
514 	uint32_t module_id;
515 
516 	module_id = (api_id & MODULE_ID_MASK) >> 8U;
517 
518 	PM_PACK_PAYLOAD7(payload, module_id, security_flag, api_id,
519 			 pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3],
520 			 pm_arg[4], pm_arg[5]);
521 
522 	ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf,
523 			       RET_PAYLOAD_ARG_CNT);
524 
525 	SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
526 		 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U),
527 		 (uint64_t)buf[3] | ((uint64_t)buf[4] << 32U),
528 		 (uint64_t)buf[5]);
529 }
530 
531 /**
532  * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
533  * @smc_fid: Function Identifier.
534  * @x1: SMC64 Arguments from kernel.
535  * @x2: SMC64 Arguments from kernel.
536  * @x3: SMC64 Arguments from kernel (upper 32-bits).
537  * @x4: Unused.
538  * @cookie: Unused.
539  * @handle: Pointer to caller's context structure.
540  * @flags: SECURE_FLAG or NON_SECURE_FLAG.
541  *
542  * Return: Unused.
543  *
544  * Determines that smc_fid is valid and supported PM SMC Function ID from the
545  * list of pm_api_ids, otherwise completes the request with
546  * the unknown SMC Function ID.
547  *
548  * The SMC calls for PM service are forwarded from SIP Service SMC handler
549  * function with rt_svc_handle signature.
550  *
551  */
552 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
553 			uint64_t x4, const void *cookie, void *handle, uint64_t flags)
554 {
555 	(void)cookie;
556 	uintptr_t ret;
557 	uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0};
558 	uint32_t security_flag = NON_SECURE_FLAG;
559 	uint32_t api_id;
560 	bool status = false, status_tmp = false;
561 	const uint64_t x[4] = {x1, x2, x3, x4};
562 
563 	/* Handle case where PM wasn't initialized properly */
564 	if (pm_up == false) {
565 		SMC_RET1(handle, SMC_UNK);
566 	}
567 
568 	/*
569 	 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0)
570 	 * if smc called is secure
571 	 *
572 	 * Add redundant macro call to immune the code from glitches
573 	 */
574 	SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags);
575 	if ((status != false) && (status_tmp != false)) {
576 		security_flag = SECURE_FLAG;
577 	}
578 
579 	if ((smc_fid & FUNCID_NUM_MASK) == PASS_THROUGH_FW_CMD_ID) {
580 		api_id = lower_32_bits(x[0]);
581 
582 		EXTRACT_ARGS(pm_arg, x);
583 
584 		return eemi_api_handler(api_id, pm_arg, handle, security_flag);
585 	}
586 
587 	pm_arg[0] = (uint32_t)x1;
588 	pm_arg[1] = (uint32_t)(x1 >> 32U);
589 	pm_arg[2] = (uint32_t)x2;
590 	pm_arg[3] = (uint32_t)(x2 >> 32U);
591 	pm_arg[4] = (uint32_t)x3;
592 	(void)(x4);
593 	api_id = smc_fid & FUNCID_NUM_MASK;
594 
595 	ret = eemi_for_compatibility(api_id, pm_arg, handle, security_flag);
596 	if (ret != (uintptr_t)0) {
597 		return ret;
598 	}
599 
600 	ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle,
601 					(uint32_t)flags);
602 	if (ret !=  (uintptr_t)0) {
603 		return ret;
604 	}
605 
606 	ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag);
607 	if (ret !=  (uintptr_t)0) {
608 		return ret;
609 	}
610 
611 	ret = eemi_handler(api_id, pm_arg, handle, security_flag);
612 
613 	return ret;
614 }
615