1 /* 2 * Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved. 3 * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 /* 9 * Top-level SMC handler for Versal power management calls and 10 * IPI setup functions for communication with PMC. 11 */ 12 13 #include <errno.h> 14 #include <stdbool.h> 15 16 #include "../drivers/arm/gic/v3/gicv3_private.h" 17 18 #include <common/runtime_svc.h> 19 #include <drivers/arm/gicv3.h> 20 #include <lib/psci/psci.h> 21 #include <plat/arm/common/plat_arm.h> 22 #include <plat/common/platform.h> 23 24 #include <plat_private.h> 25 #include "pm_api_sys.h" 26 #include "pm_client.h" 27 #include "pm_ipi.h" 28 #include "pm_svc_main.h" 29 30 #define MODE 0x80000000U 31 32 #define INVALID_SGI 0xFFU 33 #define PM_INIT_SUSPEND_CB (30U) 34 #define PM_NOTIFY_CB (32U) 35 #define EVENT_CPU_PWRDWN (4U) 36 #define MBOX_SGI_SHARED_IPI (7U) 37 38 /** 39 * upper_32_bits - return bits 32-63 of a number 40 * @n: the number we're accessing 41 */ 42 #define upper_32_bits(n) ((uint32_t)((n) >> 32U)) 43 44 /** 45 * lower_32_bits - return bits 0-31 of a number 46 * @n: the number we're accessing 47 */ 48 #define lower_32_bits(n) ((uint32_t)((n) & 0xffffffffU)) 49 50 /** 51 * EXTRACT_SMC_ARGS - extracts 32-bit payloads from 64-bit SMC arguments 52 * @pm_arg: array of 32-bit payloads 53 * @x: array of 64-bit SMC arguments 54 */ 55 #define EXTRACT_ARGS(pm_arg, x) \ 56 for (uint32_t i = 0U; i < (PAYLOAD_ARG_CNT - 1U); i++) { \ 57 if ((i % 2U) != 0U) { \ 58 pm_arg[i] = lower_32_bits(x[(i / 2U) + 1U]); \ 59 } else { \ 60 pm_arg[i] = upper_32_bits(x[i / 2U]); \ 61 } \ 62 } 63 64 /* 1 sec of wait timeout for secondary core down */ 65 #define PWRDWN_WAIT_TIMEOUT (1000U) 66 67 /* pm_up = true - UP, pm_up = false - DOWN */ 68 static bool pm_up; 69 static uint32_t sgi = (uint32_t)INVALID_SGI; 70 static bool pwrdwn_req_received; 71 72 bool pm_pwrdwn_req_status(void) 73 { 74 return pwrdwn_req_received; 75 } 76 77 static void notify_os(void) 78 { 79 plat_ic_raise_ns_sgi((int)sgi, read_mpidr_el1()); 80 } 81 82 static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags, 83 void *handle, void *cookie) 84 { 85 (void)id; 86 (void)flags; 87 (void)handle; 88 (void)cookie; 89 uint32_t cpu_id = plat_my_core_pos(); 90 91 VERBOSE("Powering down CPU %d\n", cpu_id); 92 93 /* Deactivate CPU power down SGI */ 94 plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR); 95 96 return (uint64_t)psci_cpu_off(); 97 } 98 99 /** 100 * raise_pwr_down_interrupt() - Callback function to raise SGI. 101 * @mpidr: MPIDR for the target CPU. 102 * 103 * Raise SGI interrupt to trigger the CPU power down sequence on all the 104 * online secondary cores. 105 */ 106 static void raise_pwr_down_interrupt(u_register_t mpidr) 107 { 108 plat_ic_raise_el3_sgi((int)CPU_PWR_DOWN_REQ_INTR, mpidr); 109 } 110 111 void request_cpu_pwrdwn(void) 112 { 113 int ret; 114 115 VERBOSE("CPU power down request received\n"); 116 117 /* Send powerdown request to online secondary core(s) */ 118 ret = psci_stop_other_cores(plat_my_core_pos(), PWRDWN_WAIT_TIMEOUT, 119 raise_pwr_down_interrupt); 120 if (ret != PSCI_E_SUCCESS) { 121 ERROR("Failed to powerdown secondary core(s)\n"); 122 } 123 124 /* Clear IPI IRQ */ 125 pm_ipi_irq_clear(primary_proc); 126 127 /* Deactivate IPI IRQ */ 128 plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ); 129 } 130 131 static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle, 132 void *cookie) 133 { 134 (void)flags; 135 (void)handle; 136 (void)cookie; 137 uint32_t payload[4] = {0}; 138 enum pm_ret_status ret; 139 uint32_t ipi_status, i; 140 141 VERBOSE("Received IPI FIQ from firmware\n"); 142 143 console_flush(); 144 (void)plat_ic_acknowledge_interrupt(); 145 146 /* Check status register for each IPI except PMC */ 147 for (i = IPI_ID_APU; i <= IPI_ID_5; i++) { 148 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, i); 149 150 /* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */ 151 if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) != 0U) { 152 plat_ic_raise_ns_sgi((int)MBOX_SGI_SHARED_IPI, read_mpidr_el1()); 153 break; 154 } 155 } 156 157 /* If PMC has not generated interrupt then end ISR */ 158 ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC); 159 if ((ipi_status & IPI_MB_STATUS_RECV_PENDING) == 0U) { 160 plat_ic_end_of_interrupt(id); 161 goto exit_label; 162 } 163 164 /* Handle PMC case */ 165 ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0); 166 if (ret != PM_RET_SUCCESS) { 167 payload[0] = (uint32_t)ret; 168 } 169 170 switch (payload[0]) { 171 case PM_INIT_SUSPEND_CB: 172 if (sgi != INVALID_SGI) { 173 notify_os(); 174 } 175 break; 176 case PM_NOTIFY_CB: 177 if (sgi != INVALID_SGI) { 178 if ((payload[2] == EVENT_CPU_PWRDWN) && 179 (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) { 180 if (pwrdwn_req_received) { 181 pwrdwn_req_received = false; 182 request_cpu_pwrdwn(); 183 (void)psci_cpu_off(); 184 break; 185 } else { 186 pwrdwn_req_received = true; 187 } 188 } 189 notify_os(); 190 } else { 191 if ((payload[2] == EVENT_CPU_PWRDWN) && 192 (NODECLASS(payload[1]) == (uint32_t)XPM_NODECLASS_DEVICE)) { 193 request_cpu_pwrdwn(); 194 (void)psci_cpu_off(); 195 } 196 } 197 break; 198 case (uint32_t)PM_RET_ERROR_INVALID_CRC: 199 pm_ipi_irq_clear(primary_proc); 200 WARN("Invalid CRC in the payload\n"); 201 break; 202 203 default: 204 pm_ipi_irq_clear(primary_proc); 205 WARN("Invalid IPI payload\n"); 206 break; 207 } 208 209 /* Clear FIQ */ 210 plat_ic_end_of_interrupt(id); 211 212 exit_label: 213 return 0; 214 } 215 216 /** 217 * pm_register_sgi() - PM register the IPI interrupt. 218 * @sgi_num: SGI number to be used for communication. 219 * @reset: Reset to invalid SGI when reset=1. 220 * 221 * Return: On success, the initialization function must return 0. 222 * Any other return value will cause the framework to ignore 223 * the service. 224 * 225 * Update the SGI number to be used. 226 * 227 */ 228 int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset) 229 { 230 int32_t ret = 0; 231 232 if (reset == 1U) { 233 sgi = INVALID_SGI; 234 } else if (sgi != INVALID_SGI) { 235 ret = -EBUSY; 236 } else if (sgi_num >= GICV3_MAX_SGI_TARGETS) { 237 ret = -EINVAL; 238 } else { 239 sgi = (uint32_t)sgi_num; 240 } 241 242 return ret; 243 } 244 245 /** 246 * pm_setup() - PM service setup. 247 * 248 * Return: On success, the initialization function must return 0. 249 * Any other return value will cause the framework to ignore 250 * the service. 251 * 252 * Initialization functions for Versal power management for 253 * communicaton with PMC. 254 * 255 * Called from sip_svc_setup initialization function with the 256 * rt_svc_init signature. 257 * 258 */ 259 int32_t pm_setup(void) 260 { 261 int32_t ret = 0; 262 263 pm_ipi_init(primary_proc); 264 pm_up = true; 265 pwrdwn_req_received = false; 266 267 /* register SGI handler for CPU power down request */ 268 ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler); 269 if (ret != 0) { 270 WARN("BL31: registering SGI interrupt failed\n"); 271 } 272 273 /* 274 * Enable IPI IRQ 275 * assume the rich OS is OK to handle callback IRQs now. 276 * Even if we were wrong, it would not enable the IRQ in 277 * the GIC. 278 */ 279 pm_ipi_irq_enable(primary_proc); 280 281 ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler); 282 if (ret != 0) { 283 WARN("BL31: registering IPI interrupt failed\n"); 284 } 285 286 gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE); 287 288 /* Register for idle callback during force power down/restart */ 289 ret = (int32_t)pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN, 290 0x0U, 0x1U, SECURE_FLAG); 291 if (ret != 0) { 292 WARN("BL31: registering idle callback for restart/force power down failed\n"); 293 } 294 295 return ret; 296 } 297 298 /** 299 * eemi_for_compatibility() - EEMI calls handler for deprecated calls. 300 * @api_id: identifier for the API being called. 301 * @pm_arg: pointer to the argument data for the API call. 302 * @handle: Pointer to caller's context structure. 303 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 304 * 305 * Return: If EEMI API found then, uintptr_t type address, else 0. 306 * 307 * Some EEMI API's use case needs to be changed in Linux driver, so they 308 * can take advantage of common EEMI handler in TF-A. As of now the old 309 * implementation of these APIs are required to maintain backward compatibility 310 * until their use case in linux driver changes. 311 * 312 */ 313 static uintptr_t eemi_for_compatibility(uint32_t api_id, const uint32_t *pm_arg, 314 void *handle, uint32_t security_flag) 315 { 316 enum pm_ret_status ret; 317 318 switch (api_id) { 319 320 case (uint32_t)PM_FEATURE_CHECK: 321 { 322 uint32_t result[RET_PAYLOAD_ARG_CNT] = {0U}; 323 324 ret = pm_feature_check(pm_arg[0], result, security_flag); 325 SMC_RET2(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U), 326 (uint64_t)result[1] | ((uint64_t)result[2] << 32U)); 327 } 328 329 case PM_LOAD_PDI: 330 { 331 ret = pm_load_pdi(pm_arg[0], pm_arg[1], pm_arg[2], 332 security_flag); 333 SMC_RET1(handle, (uint64_t)ret); 334 } 335 336 default: 337 return (uintptr_t)0; 338 } 339 } 340 341 /** 342 * eemi_psci_debugfs_handler() - EEMI API invoked from PSCI. 343 * @api_id: identifier for the API being called. 344 * @pm_arg: pointer to the argument data for the API call. 345 * @handle: Pointer to caller's context structure. 346 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 347 * 348 * These EEMI APIs performs CPU specific power management tasks. 349 * These EEMI APIs are invoked either from PSCI or from debugfs in kernel. 350 * These calls require CPU specific processing before sending IPI request to 351 * Platform Management Controller. For example enable/disable CPU specific 352 * interrupts. This requires separate handler for these calls and may not be 353 * handled using common eemi handler. 354 * 355 * Return: If EEMI API found then, uintptr_t type address, else 0. 356 * 357 */ 358 static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, const uint32_t *pm_arg, 359 void *handle, uint32_t security_flag) 360 { 361 enum pm_ret_status ret; 362 363 switch (api_id) { 364 365 case (uint32_t)PM_SELF_SUSPEND: 366 ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2], 367 pm_arg[3], security_flag); 368 SMC_RET1(handle, (u_register_t)ret); 369 370 case (uint32_t)PM_FORCE_POWERDOWN: 371 ret = pm_force_powerdown(pm_arg[0], (uint8_t)pm_arg[1], security_flag); 372 SMC_RET1(handle, (u_register_t)ret); 373 374 case (uint32_t)PM_ABORT_SUSPEND: 375 ret = pm_abort_suspend((enum pm_abort_reason)pm_arg[0], security_flag); 376 SMC_RET1(handle, (u_register_t)ret); 377 378 case (uint32_t)PM_SYSTEM_SHUTDOWN: 379 ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag); 380 SMC_RET1(handle, (u_register_t)ret); 381 382 default: 383 return (uintptr_t)0; 384 } 385 } 386 387 /** 388 * tfa_clear_pm_state() - Reset TF-A-specific PM state. 389 * 390 * This function resets TF-A-specific state that may have been modified, 391 * such as during a kexec-based kernel reload. It resets the SGI number 392 * and the shutdown scope to its default value. 393 */ 394 static enum pm_ret_status tfa_clear_pm_state(void) 395 { 396 /* Reset SGI number to default value(-1). */ 397 sgi = (uint32_t)INVALID_SGI; 398 399 /* Reset the shutdown scope to its default value(system). */ 400 return pm_system_shutdown(XPM_SHUTDOWN_TYPE_SETSCOPE_ONLY, XPM_SHUTDOWN_SUBTYPE_RST_SYSTEM, 401 0U); 402 } 403 404 /** 405 * TF_A_specific_handler() - SMC handler for TF-A specific functionality. 406 * @api_id: identifier for the API being called. 407 * @pm_arg: pointer to the argument data for the API call. 408 * @handle: Pointer to caller's context structure. 409 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 410 * 411 * These EEMI calls performs functionality that does not require 412 * IPI transaction. The handler ends in TF-A and returns requested data to 413 * kernel from TF-A. 414 * 415 * Return: If TF-A specific API found then, uintptr_t type address, else 0 416 * 417 */ 418 static uintptr_t TF_A_specific_handler(uint32_t api_id, const uint32_t *pm_arg, 419 void *handle, uint32_t security_flag) 420 { 421 switch (api_id) { 422 423 case TF_A_FEATURE_CHECK: 424 { 425 enum pm_ret_status ret; 426 uint32_t result[PAYLOAD_ARG_CNT] = {0U}; 427 428 ret = eemi_feature_check(pm_arg[0], result); 429 SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U)); 430 } 431 432 case TF_A_PM_REGISTER_SGI: 433 { 434 int32_t ret; 435 436 ret = pm_register_sgi(pm_arg[0], pm_arg[1]); 437 if (ret != 0) { 438 SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS); 439 } 440 441 SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS); 442 } 443 444 case PM_GET_CALLBACK_DATA: 445 { 446 uint32_t result[4] = {0}; 447 enum pm_ret_status ret; 448 449 ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U); 450 if (ret != PM_RET_SUCCESS) { 451 result[0] = (uint32_t)ret; 452 } 453 454 SMC_RET2(handle, 455 (uint64_t)result[0] | ((uint64_t)result[1] << 32U), 456 (uint64_t)result[2] | ((uint64_t)result[3] << 32U)); 457 } 458 459 case PM_GET_TRUSTZONE_VERSION: 460 SMC_RET1(handle, (uint64_t)PM_RET_SUCCESS | 461 ((uint64_t)TZ_VERSION << 32U)); 462 463 case TF_A_CLEAR_PM_STATE: 464 { 465 enum pm_ret_status ret; 466 467 ret = tfa_clear_pm_state(); 468 469 SMC_RET1(handle, (uint64_t)ret); 470 } 471 472 default: 473 return (uintptr_t)0; 474 } 475 } 476 477 /** 478 * eemi_handler() - Prepare EEMI payload and perform IPI transaction. 479 * @api_id: identifier for the API being called. 480 * @pm_arg: pointer to the argument data for the API call. 481 * @handle: Pointer to caller's context structure. 482 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 483 * 484 * EEMI - Embedded Energy Management Interface is Xilinx proprietary protocol 485 * to allow communication between power management controller and different 486 * processing clusters. 487 * 488 * This handler prepares EEMI protocol payload received from kernel and performs 489 * IPI transaction. 490 * 491 * Return: If EEMI API found then, uintptr_t type address, else 0 492 * 493 */ 494 static uintptr_t eemi_handler(uint32_t api_id, const uint32_t *pm_arg, 495 void *handle, uint32_t security_flag) 496 { 497 enum pm_ret_status ret; 498 uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0}; 499 500 ret = pm_handle_eemi_call(security_flag, api_id, pm_arg[0], pm_arg[1], 501 pm_arg[2], pm_arg[3], pm_arg[4], buf); 502 /* 503 * Two IOCTLs, to get clock name and pinctrl name of pm_query_data API 504 * receives 5 words of respoonse from firmware. Currently linux driver can 505 * receive only 4 words from TF-A. So, this needs to be handled separately 506 * than other eemi calls. 507 */ 508 if (api_id == (uint32_t)PM_QUERY_DATA) { 509 if (((pm_arg[0] == (uint32_t)XPM_QID_CLOCK_GET_NAME) || 510 (pm_arg[0] == (uint32_t)XPM_QID_PINCTRL_GET_FUNCTION_NAME)) && 511 (ret == PM_RET_SUCCESS)) { 512 SMC_RET2(handle, (uint64_t)buf[0] | ((uint64_t)buf[1] << 32U), 513 (uint64_t)buf[2] | ((uint64_t)buf[3] << 32U)); 514 } 515 } 516 517 SMC_RET2(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U), 518 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U)); 519 } 520 521 /** 522 * eemi_api_handler() - Prepare EEMI payload and perform IPI transaction. 523 * @api_id: identifier for the API being called. 524 * @pm_arg: pointer to the argument data for the API call. 525 * @handle: Pointer to caller's context structure. 526 * @security_flag: SECURE_FLAG or NON_SECURE_FLAG. 527 * 528 * EEMI - Embedded Energy Management Interface is AMD-Xilinx proprietary 529 * protocol to allow communication between power management controller and 530 * different processing clusters. 531 * 532 * This handler prepares EEMI protocol payload received from kernel and performs 533 * IPI transaction. 534 * 535 * Return: If EEMI API found then, uintptr_t type address, else 0 536 */ 537 static uintptr_t eemi_api_handler(uint32_t api_id, const uint32_t *pm_arg, 538 void *handle, uint32_t security_flag) 539 { 540 enum pm_ret_status ret; 541 uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U}; 542 uint32_t payload[PAYLOAD_ARG_CNT] = {0U}; 543 uint32_t module_id; 544 545 module_id = (api_id & MODULE_ID_MASK) >> 8U; 546 547 PM_PACK_PAYLOAD7(payload, module_id, security_flag, api_id, 548 pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3], 549 pm_arg[4], pm_arg[5]); 550 551 ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf, 552 RET_PAYLOAD_ARG_CNT); 553 554 SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U), 555 (uint64_t)buf[1] | ((uint64_t)buf[2] << 32U), 556 (uint64_t)buf[3] | ((uint64_t)buf[4] << 32U), 557 (uint64_t)buf[5]); 558 } 559 560 /** 561 * pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2. 562 * @smc_fid: Function Identifier. 563 * @x1: SMC64 Arguments from kernel. 564 * @x2: SMC64 Arguments from kernel. 565 * @x3: SMC64 Arguments from kernel (upper 32-bits). 566 * @x4: Unused. 567 * @cookie: Unused. 568 * @handle: Pointer to caller's context structure. 569 * @flags: SECURE_FLAG or NON_SECURE_FLAG. 570 * 571 * Return: Unused. 572 * 573 * Determines that smc_fid is valid and supported PM SMC Function ID from the 574 * list of pm_api_ids, otherwise completes the request with 575 * the unknown SMC Function ID. 576 * 577 * The SMC calls for PM service are forwarded from SIP Service SMC handler 578 * function with rt_svc_handle signature. 579 * 580 */ 581 uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, 582 uint64_t x4, const void *cookie, void *handle, uint64_t flags) 583 { 584 (void)cookie; 585 uintptr_t ret; 586 uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0}; 587 uint32_t security_flag = NON_SECURE_FLAG; 588 uint32_t api_id; 589 bool status = false, status_tmp = false; 590 const uint64_t x[4] = {x1, x2, x3, x4}; 591 592 /* Handle case where PM wasn't initialized properly */ 593 if (pm_up == false) { 594 SMC_RET1(handle, SMC_UNK); 595 } 596 597 /* 598 * Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0) 599 * if smc called is secure 600 * 601 * Add redundant macro call to immune the code from glitches 602 */ 603 SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags); 604 if ((status != false) && (status_tmp != false)) { 605 security_flag = SECURE_FLAG; 606 } 607 608 if ((smc_fid & FUNCID_NUM_MASK) == PASS_THROUGH_FW_CMD_ID) { 609 api_id = lower_32_bits(x[0]); 610 611 EXTRACT_ARGS(pm_arg, x); 612 613 return eemi_api_handler(api_id, pm_arg, handle, security_flag); 614 } 615 616 pm_arg[0] = (uint32_t)x1; 617 pm_arg[1] = (uint32_t)(x1 >> 32U); 618 pm_arg[2] = (uint32_t)x2; 619 pm_arg[3] = (uint32_t)(x2 >> 32U); 620 pm_arg[4] = (uint32_t)x3; 621 (void)(x4); 622 api_id = smc_fid & FUNCID_NUM_MASK; 623 624 ret = eemi_for_compatibility(api_id, pm_arg, handle, security_flag); 625 if (ret != (uintptr_t)0) { 626 return ret; 627 } 628 629 ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle, 630 (uint32_t)flags); 631 if (ret != (uintptr_t)0) { 632 return ret; 633 } 634 635 ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag); 636 if (ret != (uintptr_t)0) { 637 return ret; 638 } 639 640 ret = eemi_handler(api_id, pm_arg, handle, security_flag); 641 642 return ret; 643 } 644