1 /* 2 * Copyright (c) 2017-2025, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <inttypes.h> 10 #include <stddef.h> 11 #include <stdint.h> 12 #include <string.h> 13 14 #include <bl31/bl31.h> 15 #include <bl31/ehf.h> 16 #include <bl31/interrupt_mgmt.h> 17 #include <common/bl_common.h> 18 #include <common/debug.h> 19 #include <common/runtime_svc.h> 20 #include <context.h> 21 #include <lib/cassert.h> 22 #include <lib/el3_runtime/pubsub.h> 23 #include <lib/utils.h> 24 #include <plat/common/platform.h> 25 #include <services/sdei.h> 26 27 #include "sdei_private.h" 28 29 #define MAJOR_VERSION 1ULL 30 #define MINOR_VERSION 0ULL 31 #define VENDOR_VERSION 0ULL 32 33 #define MAKE_SDEI_VERSION(_major, _minor, _vendor) \ 34 ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor)) 35 36 #define LOWEST_INTR_PRIORITY 0xff 37 38 CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI, 39 sdei_critical_must_have_higher_priority); 40 41 static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots; 42 43 /* Initialise SDEI map entries */ 44 static void init_map(sdei_ev_map_t *map) 45 { 46 map->reg_count = 0; 47 } 48 49 /* Convert mapping to SDEI class */ 50 static sdei_class_t map_to_class(sdei_ev_map_t *map) 51 { 52 return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL; 53 } 54 55 /* Clear SDEI event entries except state */ 56 static void clear_event_entries(sdei_entry_t *se) 57 { 58 se->ep = 0; 59 se->arg = 0; 60 se->affinity = 0; 61 se->reg_flags = 0; 62 } 63 64 /* Perform CPU-specific state initialisation */ 65 static void *sdei_cpu_on_init(const void *arg) 66 { 67 unsigned int i; 68 sdei_ev_map_t *map; 69 sdei_entry_t *se; 70 71 /* Initialize private mappings on this CPU */ 72 for_each_private_map(i, map) { 73 se = get_event_entry(map); 74 clear_event_entries(se); 75 se->state = 0; 76 } 77 78 SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1()); 79 80 /* All PEs start with SDEI events masked */ 81 (void) sdei_pe_mask(); 82 83 return NULL; 84 } 85 86 /* CPU initialisation after wakeup from suspend */ 87 static void *sdei_cpu_wakeup_init(const void *arg) 88 { 89 SDEI_LOG("Events masked on %lx\n", read_mpidr_el1()); 90 91 /* All PEs wake up with SDEI events masked */ 92 sdei_pe_mask(); 93 94 return 0; 95 } 96 97 /* Initialise an SDEI class */ 98 static void sdei_class_init(sdei_class_t class) 99 { 100 unsigned int i; 101 bool zero_found __unused = false; 102 int ev_num_so_far __unused; 103 sdei_ev_map_t *map; 104 105 /* Sanity check and configuration of shared events */ 106 ev_num_so_far = -1; 107 for_each_shared_map(i, map) { 108 #if ENABLE_ASSERTIONS 109 /* Ensure mappings are sorted */ 110 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 111 112 ev_num_so_far = map->ev_num; 113 114 /* Event 0 must not be shared */ 115 assert(map->ev_num != SDEI_EVENT_0); 116 117 /* Check for valid event */ 118 assert(map->ev_num >= 0); 119 120 /* Make sure it's a shared event */ 121 assert(is_event_shared(map)); 122 123 /* No shared mapping should have signalable property */ 124 assert(!is_event_signalable(map)); 125 126 /* Shared mappings can't be explicit */ 127 assert(!is_map_explicit(map)); 128 #endif 129 130 /* Skip initializing the wrong priority */ 131 if (map_to_class(map) != class) 132 continue; 133 134 /* Platform events are always bound, so set the bound flag */ 135 if (is_map_dynamic(map)) { 136 assert(map->intr == SDEI_DYN_IRQ); 137 assert(is_event_normal(map)); 138 num_dyn_shrd_slots++; 139 } else { 140 /* Shared mappings must be bound to shared interrupt */ 141 assert(plat_ic_is_spi(map->intr) != 0); 142 set_map_bound(map); 143 } 144 145 init_map(map); 146 } 147 148 /* Sanity check and configuration of private events for this CPU */ 149 ev_num_so_far = -1; 150 for_each_private_map(i, map) { 151 #if ENABLE_ASSERTIONS 152 /* Ensure mappings are sorted */ 153 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 154 155 ev_num_so_far = map->ev_num; 156 157 if (map->ev_num == SDEI_EVENT_0) { 158 zero_found = true; 159 160 /* Event 0 must be a Secure SGI */ 161 assert(is_secure_sgi(map->intr)); 162 163 /* 164 * Event 0 can have only have signalable flag (apart 165 * from being private 166 */ 167 assert(map->map_flags == (SDEI_MAPF_SIGNALABLE | 168 SDEI_MAPF_PRIVATE)); 169 } else { 170 /* No other mapping should have signalable property */ 171 assert(!is_event_signalable(map)); 172 } 173 174 /* Check for valid event */ 175 assert(map->ev_num >= 0); 176 177 /* Make sure it's a private event */ 178 assert(is_event_private(map)); 179 180 /* 181 * Other than priority, explicit events can only have explicit 182 * and private flags set. 183 */ 184 if (is_map_explicit(map)) { 185 assert((map->map_flags | SDEI_MAPF_CRITICAL) == 186 (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE 187 | SDEI_MAPF_CRITICAL)); 188 } 189 #endif 190 191 /* Skip initializing the wrong priority */ 192 if (map_to_class(map) != class) 193 continue; 194 195 /* Platform events are always bound, so set the bound flag */ 196 if (map->ev_num != SDEI_EVENT_0) { 197 if (is_map_dynamic(map)) { 198 assert(map->intr == SDEI_DYN_IRQ); 199 assert(is_event_normal(map)); 200 num_dyn_priv_slots++; 201 } else if (is_map_explicit(map)) { 202 /* 203 * Explicit mappings don't have a backing 204 * SDEI interrupt, but verify that anyway. 205 */ 206 assert(map->intr == SDEI_DYN_IRQ); 207 } else { 208 /* 209 * Private mappings must be bound to private 210 * interrupt. 211 */ 212 assert(plat_ic_is_ppi((unsigned) map->intr) != 0); 213 set_map_bound(map); 214 } 215 } 216 217 init_map(map); 218 } 219 220 /* Ensure event 0 is in the mapping */ 221 assert(zero_found); 222 223 (void) sdei_cpu_on_init(NULL); 224 } 225 226 /* SDEI dispatcher initialisation */ 227 void sdei_init(void) 228 { 229 plat_sdei_setup(); 230 sdei_class_init(SDEI_CRITICAL); 231 sdei_class_init(SDEI_NORMAL); 232 233 /* Register priority level handlers */ 234 ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI, 235 sdei_intr_handler); 236 ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI, 237 sdei_intr_handler); 238 } 239 240 /* Populate SDEI event entry */ 241 static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg, 242 unsigned int flags, uint64_t affinity) 243 { 244 assert(se != NULL); 245 246 se->ep = ep; 247 se->arg = arg; 248 se->affinity = (affinity & MPIDR_AFFINITY_MASK); 249 se->reg_flags = flags; 250 } 251 252 static uint64_t sdei_version(void) 253 { 254 return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION); 255 } 256 257 /* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */ 258 static int validate_flags(uint64_t flags, uint64_t mpidr) 259 { 260 /* Validate flags */ 261 switch (flags) { 262 case SDEI_REGF_RM_PE: 263 if (!is_valid_mpidr(mpidr)) 264 return SDEI_EINVAL; 265 break; 266 case SDEI_REGF_RM_ANY: 267 break; 268 default: 269 /* Unknown flags */ 270 return SDEI_EINVAL; 271 } 272 273 return 0; 274 } 275 276 /* Set routing of an SDEI event */ 277 static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr) 278 { 279 int ret; 280 unsigned int routing; 281 sdei_ev_map_t *map; 282 sdei_entry_t *se; 283 284 ret = validate_flags(flags, mpidr); 285 if (ret != 0) 286 return ret; 287 288 /* Check if valid event number */ 289 map = find_event_map(ev_num); 290 if (map == NULL) 291 return SDEI_EINVAL; 292 293 /* The event must not be private */ 294 if (is_event_private(map)) 295 return SDEI_EINVAL; 296 297 se = get_event_entry(map); 298 299 sdei_map_lock(map); 300 301 if (!is_map_bound(map) || is_event_private(map)) { 302 ret = SDEI_EINVAL; 303 goto finish; 304 } 305 306 if (!can_sdei_state_trans(se, DO_ROUTING)) { 307 ret = SDEI_EDENY; 308 goto finish; 309 } 310 311 /* Choose appropriate routing */ 312 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 313 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 314 315 /* Update event registration flag */ 316 se->reg_flags = (unsigned int) flags; 317 if (flags == SDEI_REGF_RM_PE) { 318 se->affinity = (mpidr & MPIDR_AFFINITY_MASK); 319 } 320 321 /* 322 * ROUTING_SET is permissible only when event composite state is 323 * 'registered, disabled, and not running'. This means that the 324 * interrupt is currently disabled, and not active. 325 */ 326 plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr); 327 328 finish: 329 sdei_map_unlock(map); 330 331 return ret; 332 } 333 334 /* Register handler and argument for an SDEI event */ 335 static int64_t sdei_event_register(int ev_num, 336 uint64_t ep, 337 uint64_t arg, 338 uint64_t flags, 339 uint64_t mpidr) 340 { 341 int ret; 342 unsigned int routing; 343 sdei_entry_t *se; 344 sdei_ev_map_t *map; 345 sdei_state_t backup_state; 346 347 if ((ep == 0U) || (plat_sdei_validate_entry_point( 348 ep, sdei_client_el()) != 0)) { 349 return SDEI_EINVAL; 350 } 351 352 ret = validate_flags(flags, mpidr); 353 if (ret != 0) 354 return ret; 355 356 /* Check if valid event number */ 357 map = find_event_map(ev_num); 358 if (map == NULL) 359 return SDEI_EINVAL; 360 361 /* Private events always target the PE */ 362 if (is_event_private(map)) { 363 /* 364 * SDEI internally handles private events in the same manner 365 * as public events with routing mode=RM_PE, since the routing 366 * mode flag and affinity fields are not used when registering 367 * a private event, set them here. 368 */ 369 flags = SDEI_REGF_RM_PE; 370 /* 371 * Kernel may pass 0 as mpidr, as we set flags to 372 * SDEI_REGF_RM_PE, so set mpidr also. 373 */ 374 mpidr = read_mpidr_el1(); 375 } 376 377 se = get_event_entry(map); 378 379 /* 380 * Even though register operation is per-event (additionally for private 381 * events, registration is required individually), it has to be 382 * serialised with respect to bind/release, which are global operations. 383 * So we hold the lock throughout, unconditionally. 384 */ 385 sdei_map_lock(map); 386 387 backup_state = se->state; 388 if (!can_sdei_state_trans(se, DO_REGISTER)) 389 goto fallback; 390 391 /* 392 * When registering for dynamic events, make sure it's been bound 393 * already. This has to be the case as, without binding, the client 394 * can't know about the event number to register for. 395 */ 396 if (is_map_dynamic(map) && !is_map_bound(map)) 397 goto fallback; 398 399 if (is_event_private(map)) { 400 /* Multiple calls to register are possible for private events */ 401 assert(map->reg_count >= 0); 402 } else { 403 /* Only single call to register is possible for shared events */ 404 assert(map->reg_count == 0); 405 } 406 407 if (is_map_bound(map)) { 408 /* Meanwhile, did any PE ACK the interrupt? */ 409 if (plat_ic_get_interrupt_active(map->intr) != 0U) 410 goto fallback; 411 412 /* The interrupt must currently owned by Non-secure */ 413 if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS) 414 goto fallback; 415 416 /* 417 * Disable forwarding of new interrupt triggers to CPU 418 * interface. 419 */ 420 plat_ic_disable_interrupt(map->intr); 421 422 /* 423 * Any events that are triggered after register and before 424 * enable should remain pending. Clear any previous interrupt 425 * triggers which are pending (except for SGIs). This has no 426 * affect on level-triggered interrupts. 427 */ 428 if (ev_num != SDEI_EVENT_0) 429 plat_ic_clear_interrupt_pending(map->intr); 430 431 /* Map interrupt to EL3 and program the correct priority */ 432 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3); 433 434 /* Program the appropriate interrupt priority */ 435 plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map)); 436 437 /* 438 * Set the routing mode for shared event as requested. We 439 * already ensure that shared events get bound to SPIs. 440 */ 441 if (is_event_shared(map)) { 442 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 443 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 444 plat_ic_set_spi_routing(map->intr, routing, 445 (u_register_t) mpidr); 446 } 447 } 448 449 /* Populate event entries */ 450 set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr); 451 452 /* Increment register count */ 453 map->reg_count++; 454 455 sdei_map_unlock(map); 456 457 return 0; 458 459 fallback: 460 /* Reinstate previous state */ 461 se->state = backup_state; 462 463 sdei_map_unlock(map); 464 465 return SDEI_EDENY; 466 } 467 468 /* Enable SDEI event */ 469 static int64_t sdei_event_enable(int ev_num) 470 { 471 sdei_ev_map_t *map; 472 sdei_entry_t *se; 473 int ret; 474 bool before, after; 475 476 /* Check if valid event number */ 477 map = find_event_map(ev_num); 478 if (map == NULL) 479 return SDEI_EINVAL; 480 481 se = get_event_entry(map); 482 ret = SDEI_EDENY; 483 484 if (is_event_shared(map)) 485 sdei_map_lock(map); 486 487 before = GET_EV_STATE(se, ENABLED); 488 if (!can_sdei_state_trans(se, DO_ENABLE)) 489 goto finish; 490 after = GET_EV_STATE(se, ENABLED); 491 492 /* 493 * Enable interrupt for bound events only if there's a change in enabled 494 * state. 495 */ 496 if (is_map_bound(map) && (!before && after)) 497 plat_ic_enable_interrupt(map->intr); 498 499 ret = 0; 500 501 finish: 502 if (is_event_shared(map)) 503 sdei_map_unlock(map); 504 505 return ret; 506 } 507 508 /* Disable SDEI event */ 509 static int sdei_event_disable(int ev_num) 510 { 511 sdei_ev_map_t *map; 512 sdei_entry_t *se; 513 int ret; 514 bool before, after; 515 516 /* Check if valid event number */ 517 map = find_event_map(ev_num); 518 if (map == NULL) 519 return SDEI_EINVAL; 520 521 se = get_event_entry(map); 522 ret = SDEI_EDENY; 523 524 if (is_event_shared(map)) 525 sdei_map_lock(map); 526 527 before = GET_EV_STATE(se, ENABLED); 528 if (!can_sdei_state_trans(se, DO_DISABLE)) 529 goto finish; 530 after = GET_EV_STATE(se, ENABLED); 531 532 /* 533 * Disable interrupt for bound events only if there's a change in 534 * enabled state. 535 */ 536 if (is_map_bound(map) && (before && !after)) 537 plat_ic_disable_interrupt(map->intr); 538 539 ret = 0; 540 541 finish: 542 if (is_event_shared(map)) 543 sdei_map_unlock(map); 544 545 return ret; 546 } 547 548 /* Query SDEI event information */ 549 static int64_t sdei_event_get_info(int ev_num, int info) 550 { 551 sdei_entry_t *se; 552 sdei_ev_map_t *map; 553 554 uint64_t flags; 555 bool registered; 556 uint64_t affinity; 557 558 /* Check if valid event number */ 559 map = find_event_map(ev_num); 560 if (map == NULL) 561 return SDEI_EINVAL; 562 563 se = get_event_entry(map); 564 565 if (is_event_shared(map)) 566 sdei_map_lock(map); 567 568 /* Sample state under lock */ 569 registered = GET_EV_STATE(se, REGISTERED); 570 flags = se->reg_flags; 571 affinity = se->affinity; 572 573 if (is_event_shared(map)) 574 sdei_map_unlock(map); 575 576 switch (info) { 577 case SDEI_INFO_EV_TYPE: 578 return is_event_shared(map); 579 580 case SDEI_INFO_EV_NOT_SIGNALED: 581 return !is_event_signalable(map); 582 583 case SDEI_INFO_EV_PRIORITY: 584 return is_event_critical(map); 585 586 case SDEI_INFO_EV_ROUTING_MODE: 587 if (!is_event_shared(map)) 588 return SDEI_EINVAL; 589 if (!registered) 590 return SDEI_EDENY; 591 return (flags == SDEI_REGF_RM_PE); 592 593 case SDEI_INFO_EV_ROUTING_AFF: 594 if (!is_event_shared(map)) 595 return SDEI_EINVAL; 596 if (!registered) 597 return SDEI_EDENY; 598 if (flags != SDEI_REGF_RM_PE) 599 return SDEI_EINVAL; 600 return affinity; 601 602 default: 603 return SDEI_EINVAL; 604 } 605 } 606 607 /* Unregister an SDEI event */ 608 static int sdei_event_unregister(int ev_num) 609 { 610 int ret = 0; 611 sdei_entry_t *se; 612 sdei_ev_map_t *map; 613 614 /* Check if valid event number */ 615 map = find_event_map(ev_num); 616 if (map == NULL) 617 return SDEI_EINVAL; 618 619 se = get_event_entry(map); 620 621 /* 622 * Even though unregister operation is per-event (additionally for 623 * private events, unregistration is required individually), it has to 624 * be serialised with respect to bind/release, which are global 625 * operations. So we hold the lock throughout, unconditionally. 626 */ 627 sdei_map_lock(map); 628 629 if (!can_sdei_state_trans(se, DO_UNREGISTER)) { 630 /* 631 * Even if the call is invalid, and the handler is running (for 632 * example, having unregistered from a running handler earlier), 633 * return pending error code; otherwise, return deny. 634 */ 635 ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY; 636 637 goto finish; 638 } 639 640 map->reg_count--; 641 if (is_event_private(map)) { 642 /* Multiple calls to register are possible for private events */ 643 assert(map->reg_count >= 0); 644 } else { 645 /* Only single call to register is possible for shared events */ 646 assert(map->reg_count == 0); 647 } 648 649 if (is_map_bound(map)) { 650 plat_ic_disable_interrupt(map->intr); 651 652 /* 653 * Clear pending interrupt. Skip for SGIs as they may not be 654 * cleared on interrupt controllers. 655 */ 656 if (ev_num != SDEI_EVENT_0) 657 plat_ic_clear_interrupt_pending(map->intr); 658 659 assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3); 660 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS); 661 plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY); 662 } 663 664 clear_event_entries(se); 665 666 /* 667 * If the handler is running at the time of unregister, return the 668 * pending error code. 669 */ 670 if (GET_EV_STATE(se, RUNNING)) 671 ret = SDEI_EPEND; 672 673 finish: 674 sdei_map_unlock(map); 675 676 return ret; 677 } 678 679 /* Query status of an SDEI event */ 680 static int sdei_event_status(int ev_num) 681 { 682 sdei_ev_map_t *map; 683 sdei_entry_t *se; 684 sdei_state_t state; 685 686 /* Check if valid event number */ 687 map = find_event_map(ev_num); 688 if (map == NULL) 689 return SDEI_EINVAL; 690 691 se = get_event_entry(map); 692 693 if (is_event_shared(map)) 694 sdei_map_lock(map); 695 696 /* State value directly maps to the expected return format */ 697 state = se->state; 698 699 if (is_event_shared(map)) 700 sdei_map_unlock(map); 701 702 return (int) state; 703 } 704 705 /* Bind an SDEI event to an interrupt */ 706 static int sdei_interrupt_bind(unsigned int intr_num) 707 { 708 sdei_ev_map_t *map; 709 bool retry = true, shared_mapping; 710 711 /* Interrupt must be either PPI or SPI */ 712 if (!(plat_ic_is_ppi(intr_num) || plat_ic_is_spi(intr_num))) 713 return SDEI_EINVAL; 714 715 shared_mapping = (plat_ic_is_spi(intr_num) != 0); 716 do { 717 /* 718 * Bail out if there is already an event for this interrupt, 719 * either platform-defined or dynamic. 720 */ 721 map = find_event_map_by_intr(intr_num, shared_mapping); 722 if (map != NULL) { 723 if (is_map_dynamic(map)) { 724 if (is_map_bound(map)) { 725 /* 726 * Dynamic event, already bound. Return 727 * event number. 728 */ 729 return map->ev_num; 730 } 731 } else { 732 /* Binding non-dynamic event */ 733 return SDEI_EINVAL; 734 } 735 } 736 737 /* 738 * The interrupt is not bound yet. Try to find a free slot to 739 * bind it. Free dynamic mappings have their interrupt set as 740 * SDEI_DYN_IRQ. 741 */ 742 map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping); 743 if (map == NULL) 744 return SDEI_ENOMEM; 745 746 /* The returned mapping must be dynamic */ 747 if (!is_map_dynamic(map)) { 748 return SDEI_ENOMEM; 749 } 750 751 /* 752 * We cannot assert for bound maps here, as we might be racing 753 * with another bind. 754 */ 755 756 /* The requested interrupt must already belong to NS */ 757 if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS) 758 return SDEI_EDENY; 759 760 /* 761 * Interrupt programming and ownership transfer are deferred 762 * until register. 763 */ 764 765 sdei_map_lock(map); 766 if (!is_map_bound(map)) { 767 map->intr = intr_num; 768 set_map_bound(map); 769 retry = false; 770 } 771 sdei_map_unlock(map); 772 } while (retry); 773 774 return map->ev_num; 775 } 776 777 /* Release a bound SDEI event previously to an interrupt */ 778 static int sdei_interrupt_release(int ev_num) 779 { 780 int ret = 0; 781 sdei_ev_map_t *map; 782 sdei_entry_t *se; 783 784 /* Check if valid event number */ 785 map = find_event_map(ev_num); 786 if (map == NULL) 787 return SDEI_EINVAL; 788 789 if (!is_map_dynamic(map)) 790 return SDEI_EINVAL; 791 792 se = get_event_entry(map); 793 794 sdei_map_lock(map); 795 796 /* Event must have been unregistered before release */ 797 if (map->reg_count != 0) { 798 ret = SDEI_EDENY; 799 goto finish; 800 } 801 802 /* 803 * Interrupt release never causes the state to change. We only check 804 * whether it's permissible or not. 805 */ 806 if (!can_sdei_state_trans(se, DO_RELEASE)) { 807 ret = SDEI_EDENY; 808 goto finish; 809 } 810 811 if (is_map_bound(map)) { 812 /* 813 * Deny release if the interrupt is active, which means it's 814 * probably being acknowledged and handled elsewhere. 815 */ 816 if (plat_ic_get_interrupt_active(map->intr) != 0U) { 817 ret = SDEI_EDENY; 818 goto finish; 819 } 820 821 /* 822 * Interrupt programming and ownership transfer are already done 823 * during unregister. 824 */ 825 826 map->intr = SDEI_DYN_IRQ; 827 clr_map_bound(map); 828 } else { 829 SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map), 830 map->reg_count); 831 ret = SDEI_EINVAL; 832 } 833 834 finish: 835 sdei_map_unlock(map); 836 837 return ret; 838 } 839 840 /* Perform reset of private SDEI events */ 841 static int sdei_private_reset(void) 842 { 843 sdei_ev_map_t *map; 844 int ret = 0, final_ret = 0; 845 unsigned int i; 846 847 /* Unregister all private events */ 848 for_each_private_map(i, map) { 849 /* 850 * The unregister can fail if the event is not registered, which 851 * is allowed, and a deny will be returned. But if the event is 852 * running or unregister pending, the call fails. 853 */ 854 ret = sdei_event_unregister(map->ev_num); 855 if ((ret == SDEI_EPEND) && (final_ret == 0)) 856 final_ret = SDEI_EDENY; 857 } 858 859 return final_ret; 860 } 861 862 /* Perform reset of shared SDEI events */ 863 static int sdei_shared_reset(void) 864 { 865 const sdei_mapping_t *mapping; 866 sdei_ev_map_t *map; 867 int ret = 0, final_ret = 0; 868 unsigned int i, j; 869 870 /* Unregister all shared events */ 871 for_each_shared_map(i, map) { 872 /* 873 * The unregister can fail if the event is not registered, which 874 * is allowed, and a deny will be returned. But if the event is 875 * running or unregister pending, the call fails. 876 */ 877 ret = sdei_event_unregister(map->ev_num); 878 if ((ret == SDEI_EPEND) && (final_ret == 0)) 879 final_ret = SDEI_EDENY; 880 } 881 882 if (final_ret != 0) 883 return final_ret; 884 885 /* 886 * Loop through both private and shared mappings, and release all 887 * bindings. 888 */ 889 for_each_mapping_type(i, mapping) { 890 iterate_mapping(mapping, j, map) { 891 /* 892 * Release bindings for mappings that are dynamic and 893 * bound. 894 */ 895 if (is_map_dynamic(map) && is_map_bound(map)) { 896 /* 897 * Any failure to release would mean there is at 898 * least a PE registered for the event. 899 */ 900 ret = sdei_interrupt_release(map->ev_num); 901 if ((ret != 0) && (final_ret == 0)) 902 final_ret = ret; 903 } 904 } 905 } 906 907 return final_ret; 908 } 909 910 /* Send a signal to another SDEI client PE */ 911 static int sdei_signal(int ev_num, uint64_t target_pe) 912 { 913 unsigned int i; 914 sdei_ev_map_t *map; 915 sdei_ev_map_t *map_priv; 916 sdei_entry_t *se; 917 918 /* Only event 0 can be signalled */ 919 if (ev_num != SDEI_EVENT_0) { 920 return SDEI_EINVAL; 921 } 922 923 /* Find mapping for event 0 */ 924 map = find_event_map(SDEI_EVENT_0); 925 if (map == NULL) { 926 return SDEI_EINVAL; 927 } 928 929 /* The event must be signalable */ 930 if (!is_event_signalable(map)) { 931 return SDEI_EINVAL; 932 } 933 934 /* Validate target */ 935 if (!is_valid_mpidr(target_pe)) { 936 return SDEI_EINVAL; 937 } 938 939 /* The event must be unmasked */ 940 if (sdei_is_target_pe_masked(target_pe)) { 941 return SDEI_EINVAL; 942 } 943 944 /* The event must be registered and enabled */ 945 if (is_event_private(map)) { 946 map_priv = SDEI_PRIVATE_MAPPING()->map; 947 for (i = 0; i < SDEI_PRIVATE_MAPPING()->num_maps; i++) { 948 if (map_priv->ev_num == SDEI_EVENT_0) { 949 se = get_event_entry_target_pe((long int) i, 950 (unsigned int) SDEI_PRIVATE_MAPPING()->num_maps, target_pe); 951 if (!(GET_EV_STATE((se), REGISTERED))) { 952 return SDEI_EINVAL; 953 } 954 if (!(GET_EV_STATE((se), ENABLED))) { 955 return SDEI_EINVAL; 956 } 957 } 958 map_priv++; 959 } 960 } 961 /* Raise SGI. Platform will validate target_pe */ 962 plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe); 963 964 return 0; 965 } 966 967 /* Query SDEI dispatcher features */ 968 static uint64_t sdei_features(unsigned int feature) 969 { 970 if (feature == SDEI_FEATURE_BIND_SLOTS) { 971 return FEATURE_BIND_SLOTS(num_dyn_priv_slots, 972 num_dyn_shrd_slots); 973 } 974 975 return (uint64_t) SDEI_EINVAL; 976 } 977 978 /* SDEI top level handler for servicing SMCs */ 979 uint64_t sdei_smc_handler(uint32_t smc_fid, 980 uint64_t x1, 981 uint64_t x2, 982 uint64_t x3, 983 uint64_t x4, 984 void *cookie, 985 void *handle, 986 uint64_t flags) 987 { 988 989 uint64_t x5; 990 unsigned int ss = (unsigned int) get_interrupt_src_ss(flags); 991 int64_t ret; 992 bool resume = false; 993 cpu_context_t *ctx = handle; 994 int ev_num = (int) x1; 995 996 if (ss != NON_SECURE) 997 SMC_RET1(ctx, SMC_UNK); 998 999 /* Verify the caller EL */ 1000 if (GET_EL(read_spsr_el3()) != sdei_client_el()) 1001 SMC_RET1(ctx, SMC_UNK); 1002 1003 switch (smc_fid) { 1004 case SDEI_VERSION: 1005 SDEI_LOG("> VER\n"); 1006 ret = (int64_t) sdei_version(); 1007 SDEI_LOG("< VER:%" PRIx64 "\n", ret); 1008 SMC_RET1(ctx, ret); 1009 1010 case SDEI_EVENT_REGISTER: 1011 x5 = SMC_GET_GP(ctx, CTX_GPREG_X5); 1012 SDEI_LOG("> REG(n:%d e:%" PRIx64 " a:%" PRIx64 " f:%x m:%" PRIx64 "\n", ev_num, 1013 x2, x3, (int) x4, x5); 1014 ret = sdei_event_register(ev_num, x2, x3, x4, x5); 1015 SDEI_LOG("< REG:%" PRId64 "\n", ret); 1016 SMC_RET1(ctx, ret); 1017 1018 case SDEI_EVENT_ENABLE: 1019 SDEI_LOG("> ENABLE(n:%d)\n", (int) x1); 1020 ret = sdei_event_enable(ev_num); 1021 SDEI_LOG("< ENABLE:%" PRId64 "\n", ret); 1022 SMC_RET1(ctx, ret); 1023 1024 case SDEI_EVENT_DISABLE: 1025 SDEI_LOG("> DISABLE(n:0x%x)\n", ev_num); 1026 ret = sdei_event_disable(ev_num); 1027 SDEI_LOG("< DISABLE:%" PRId64 "\n", ret); 1028 SMC_RET1(ctx, ret); 1029 1030 case SDEI_EVENT_CONTEXT: 1031 SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1()); 1032 ret = sdei_event_context(ctx, (unsigned int) x1); 1033 SDEI_LOG("< CTX:%" PRId64 "\n", ret); 1034 SMC_RET1(ctx, ret); 1035 1036 case SDEI_EVENT_COMPLETE_AND_RESUME: 1037 resume = true; 1038 /* Fallthrough */ 1039 1040 case SDEI_EVENT_COMPLETE: 1041 SDEI_LOG("> COMPLETE(r:%u sta/ep:%" PRIx64 "):%lx\n", 1042 (unsigned int) resume, x1, read_mpidr_el1()); 1043 ret = sdei_event_complete(resume, x1); 1044 SDEI_LOG("< COMPLETE:%" PRIx64 "\n", ret); 1045 1046 /* 1047 * Set error code only if the call failed. If the call 1048 * succeeded, we discard the dispatched context, and restore the 1049 * interrupted context to a pristine condition, and therefore 1050 * shouldn't be modified. We don't return to the caller in this 1051 * case anyway. 1052 */ 1053 if (ret != 0) 1054 SMC_RET1(ctx, ret); 1055 1056 SMC_RET0(ctx); 1057 1058 case SDEI_EVENT_STATUS: 1059 SDEI_LOG("> STAT(n:0x%x)\n", ev_num); 1060 ret = sdei_event_status(ev_num); 1061 SDEI_LOG("< STAT:%" PRId64 "\n", ret); 1062 SMC_RET1(ctx, ret); 1063 1064 case SDEI_EVENT_GET_INFO: 1065 SDEI_LOG("> INFO(n:0x%x, %d)\n", ev_num, (int) x2); 1066 ret = sdei_event_get_info(ev_num, (int) x2); 1067 SDEI_LOG("< INFO:%" PRId64 "\n", ret); 1068 SMC_RET1(ctx, ret); 1069 1070 case SDEI_EVENT_UNREGISTER: 1071 SDEI_LOG("> UNREG(n:0x%x)\n", ev_num); 1072 ret = sdei_event_unregister(ev_num); 1073 SDEI_LOG("< UNREG:%" PRId64 "\n", ret); 1074 SMC_RET1(ctx, ret); 1075 1076 case SDEI_PE_UNMASK: 1077 SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1()); 1078 sdei_pe_unmask(); 1079 SDEI_LOG("< UNMASK:%d\n", 0); 1080 SMC_RET1(ctx, 0); 1081 1082 case SDEI_PE_MASK: 1083 SDEI_LOG("> MASK:%lx\n", read_mpidr_el1()); 1084 ret = sdei_pe_mask(); 1085 SDEI_LOG("< MASK:%" PRId64 "\n", ret); 1086 SMC_RET1(ctx, ret); 1087 1088 case SDEI_INTERRUPT_BIND: 1089 SDEI_LOG("> BIND(%d)\n", (int) x1); 1090 ret = sdei_interrupt_bind((unsigned int) x1); 1091 SDEI_LOG("< BIND:%" PRId64 "\n", ret); 1092 SMC_RET1(ctx, ret); 1093 1094 case SDEI_INTERRUPT_RELEASE: 1095 SDEI_LOG("> REL(0x%x)\n", ev_num); 1096 ret = sdei_interrupt_release(ev_num); 1097 SDEI_LOG("< REL:%" PRId64 "\n", ret); 1098 SMC_RET1(ctx, ret); 1099 1100 case SDEI_SHARED_RESET: 1101 SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1()); 1102 ret = sdei_shared_reset(); 1103 SDEI_LOG("< S_RESET:%" PRId64 "\n", ret); 1104 SMC_RET1(ctx, ret); 1105 1106 case SDEI_PRIVATE_RESET: 1107 SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1()); 1108 ret = sdei_private_reset(); 1109 SDEI_LOG("< P_RESET:%" PRId64 "\n", ret); 1110 SMC_RET1(ctx, ret); 1111 1112 case SDEI_EVENT_ROUTING_SET: 1113 SDEI_LOG("> ROUTE_SET(n:%d f:%" PRIx64 " aff:%" PRIx64 ")\n", ev_num, x2, x3); 1114 ret = sdei_event_routing_set(ev_num, x2, x3); 1115 SDEI_LOG("< ROUTE_SET:%" PRId64 "\n", ret); 1116 SMC_RET1(ctx, ret); 1117 1118 case SDEI_FEATURES: 1119 SDEI_LOG("> FTRS(f:%" PRIx64 ")\n", x1); 1120 ret = (int64_t) sdei_features((unsigned int) x1); 1121 SDEI_LOG("< FTRS:%" PRIx64 "\n", ret); 1122 SMC_RET1(ctx, ret); 1123 1124 case SDEI_EVENT_SIGNAL: 1125 SDEI_LOG("> SIGNAL(e:%d t:%" PRIx64 ")\n", ev_num, x2); 1126 ret = sdei_signal(ev_num, x2); 1127 SDEI_LOG("< SIGNAL:%" PRId64 "\n", ret); 1128 SMC_RET1(ctx, ret); 1129 1130 default: 1131 /* Do nothing in default case */ 1132 break; 1133 } 1134 1135 WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid); 1136 SMC_RET1(ctx, SMC_UNK); 1137 } 1138 1139 /* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */ 1140 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init); 1141 1142 /* Subscribe to PSCI CPU suspend finisher for per-CPU configuration */ 1143 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, sdei_cpu_wakeup_init); 1144