1 /* 2 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <inttypes.h> 10 #include <stddef.h> 11 #include <stdint.h> 12 #include <string.h> 13 14 #include <bl31/bl31.h> 15 #include <bl31/ehf.h> 16 #include <bl31/interrupt_mgmt.h> 17 #include <common/bl_common.h> 18 #include <common/debug.h> 19 #include <common/runtime_svc.h> 20 #include <context.h> 21 #include <lib/cassert.h> 22 #include <lib/el3_runtime/pubsub.h> 23 #include <lib/utils.h> 24 #include <plat/common/platform.h> 25 #include <services/sdei.h> 26 27 #include "sdei_private.h" 28 29 #define MAJOR_VERSION 1ULL 30 #define MINOR_VERSION 0ULL 31 #define VENDOR_VERSION 0ULL 32 33 #define MAKE_SDEI_VERSION(_major, _minor, _vendor) \ 34 ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor)) 35 36 #define LOWEST_INTR_PRIORITY 0xff 37 38 CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI, 39 sdei_critical_must_have_higher_priority); 40 41 static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots; 42 43 /* Initialise SDEI map entries */ 44 static void init_map(sdei_ev_map_t *map) 45 { 46 map->reg_count = 0; 47 } 48 49 /* Convert mapping to SDEI class */ 50 static sdei_class_t map_to_class(sdei_ev_map_t *map) 51 { 52 return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL; 53 } 54 55 /* Clear SDEI event entries except state */ 56 static void clear_event_entries(sdei_entry_t *se) 57 { 58 se->ep = 0; 59 se->arg = 0; 60 se->affinity = 0; 61 se->reg_flags = 0; 62 } 63 64 /* Perform CPU-specific state initialisation */ 65 static void *sdei_cpu_on_init(const void *arg) 66 { 67 unsigned int i; 68 sdei_ev_map_t *map; 69 sdei_entry_t *se; 70 71 /* Initialize private mappings on this CPU */ 72 for_each_private_map(i, map) { 73 se = get_event_entry(map); 74 clear_event_entries(se); 75 se->state = 0; 76 } 77 78 SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1()); 79 80 /* All PEs start with SDEI events masked */ 81 (void) sdei_pe_mask(); 82 83 return NULL; 84 } 85 86 /* CPU initialisation after wakeup from suspend */ 87 static void *sdei_cpu_wakeup_init(const void *arg) 88 { 89 SDEI_LOG("Events masked on %lx\n", read_mpidr_el1()); 90 91 /* All PEs wake up with SDEI events masked */ 92 sdei_pe_mask(); 93 94 return 0; 95 } 96 97 /* Initialise an SDEI class */ 98 static void sdei_class_init(sdei_class_t class) 99 { 100 unsigned int i; 101 bool zero_found __unused = false; 102 int ev_num_so_far __unused; 103 sdei_ev_map_t *map; 104 105 /* Sanity check and configuration of shared events */ 106 ev_num_so_far = -1; 107 for_each_shared_map(i, map) { 108 #if ENABLE_ASSERTIONS 109 /* Ensure mappings are sorted */ 110 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 111 112 ev_num_so_far = map->ev_num; 113 114 /* Event 0 must not be shared */ 115 assert(map->ev_num != SDEI_EVENT_0); 116 117 /* Check for valid event */ 118 assert(map->ev_num >= 0); 119 120 /* Make sure it's a shared event */ 121 assert(is_event_shared(map)); 122 123 /* No shared mapping should have signalable property */ 124 assert(!is_event_signalable(map)); 125 126 /* Shared mappings can't be explicit */ 127 assert(!is_map_explicit(map)); 128 #endif 129 130 /* Skip initializing the wrong priority */ 131 if (map_to_class(map) != class) 132 continue; 133 134 /* Platform events are always bound, so set the bound flag */ 135 if (is_map_dynamic(map)) { 136 assert(map->intr == SDEI_DYN_IRQ); 137 assert(is_event_normal(map)); 138 num_dyn_shrd_slots++; 139 } else { 140 /* Shared mappings must be bound to shared interrupt */ 141 assert(plat_ic_is_spi(map->intr) != 0); 142 set_map_bound(map); 143 } 144 145 init_map(map); 146 } 147 148 /* Sanity check and configuration of private events for this CPU */ 149 ev_num_so_far = -1; 150 for_each_private_map(i, map) { 151 #if ENABLE_ASSERTIONS 152 /* Ensure mappings are sorted */ 153 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 154 155 ev_num_so_far = map->ev_num; 156 157 if (map->ev_num == SDEI_EVENT_0) { 158 zero_found = true; 159 160 /* Event 0 must be a Secure SGI */ 161 assert(is_secure_sgi(map->intr)); 162 163 /* 164 * Event 0 can have only have signalable flag (apart 165 * from being private 166 */ 167 assert(map->map_flags == (SDEI_MAPF_SIGNALABLE | 168 SDEI_MAPF_PRIVATE)); 169 } else { 170 /* No other mapping should have signalable property */ 171 assert(!is_event_signalable(map)); 172 } 173 174 /* Check for valid event */ 175 assert(map->ev_num >= 0); 176 177 /* Make sure it's a private event */ 178 assert(is_event_private(map)); 179 180 /* 181 * Other than priority, explicit events can only have explicit 182 * and private flags set. 183 */ 184 if (is_map_explicit(map)) { 185 assert((map->map_flags | SDEI_MAPF_CRITICAL) == 186 (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE 187 | SDEI_MAPF_CRITICAL)); 188 } 189 #endif 190 191 /* Skip initializing the wrong priority */ 192 if (map_to_class(map) != class) 193 continue; 194 195 /* Platform events are always bound, so set the bound flag */ 196 if (map->ev_num != SDEI_EVENT_0) { 197 if (is_map_dynamic(map)) { 198 assert(map->intr == SDEI_DYN_IRQ); 199 assert(is_event_normal(map)); 200 num_dyn_priv_slots++; 201 } else if (is_map_explicit(map)) { 202 /* 203 * Explicit mappings don't have a backing 204 * SDEI interrupt, but verify that anyway. 205 */ 206 assert(map->intr == SDEI_DYN_IRQ); 207 } else { 208 /* 209 * Private mappings must be bound to private 210 * interrupt. 211 */ 212 assert(plat_ic_is_ppi((unsigned) map->intr) != 0); 213 set_map_bound(map); 214 } 215 } 216 217 init_map(map); 218 } 219 220 /* Ensure event 0 is in the mapping */ 221 assert(zero_found); 222 223 (void) sdei_cpu_on_init(NULL); 224 } 225 226 /* SDEI dispatcher initialisation */ 227 void sdei_init(void) 228 { 229 plat_sdei_setup(); 230 sdei_class_init(SDEI_CRITICAL); 231 sdei_class_init(SDEI_NORMAL); 232 233 /* Register priority level handlers */ 234 ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI, 235 sdei_intr_handler); 236 ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI, 237 sdei_intr_handler); 238 } 239 240 /* Populate SDEI event entry */ 241 static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg, 242 unsigned int flags, uint64_t affinity) 243 { 244 assert(se != NULL); 245 246 se->ep = ep; 247 se->arg = arg; 248 se->affinity = (affinity & MPIDR_AFFINITY_MASK); 249 se->reg_flags = flags; 250 } 251 252 static uint64_t sdei_version(void) 253 { 254 return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION); 255 } 256 257 /* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */ 258 static int validate_flags(uint64_t flags, uint64_t mpidr) 259 { 260 /* Validate flags */ 261 switch (flags) { 262 case SDEI_REGF_RM_PE: 263 if (!is_valid_mpidr(mpidr)) 264 return SDEI_EINVAL; 265 break; 266 case SDEI_REGF_RM_ANY: 267 break; 268 default: 269 /* Unknown flags */ 270 return SDEI_EINVAL; 271 } 272 273 return 0; 274 } 275 276 /* Set routing of an SDEI event */ 277 static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr) 278 { 279 int ret; 280 unsigned int routing; 281 sdei_ev_map_t *map; 282 sdei_entry_t *se; 283 284 ret = validate_flags(flags, mpidr); 285 if (ret != 0) 286 return ret; 287 288 /* Check if valid event number */ 289 map = find_event_map(ev_num); 290 if (map == NULL) 291 return SDEI_EINVAL; 292 293 /* The event must not be private */ 294 if (is_event_private(map)) 295 return SDEI_EINVAL; 296 297 se = get_event_entry(map); 298 299 sdei_map_lock(map); 300 301 if (!is_map_bound(map) || is_event_private(map)) { 302 ret = SDEI_EINVAL; 303 goto finish; 304 } 305 306 if (!can_sdei_state_trans(se, DO_ROUTING)) { 307 ret = SDEI_EDENY; 308 goto finish; 309 } 310 311 /* Choose appropriate routing */ 312 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 313 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 314 315 /* Update event registration flag */ 316 se->reg_flags = (unsigned int) flags; 317 if (flags == SDEI_REGF_RM_PE) { 318 se->affinity = (mpidr & MPIDR_AFFINITY_MASK); 319 } 320 321 /* 322 * ROUTING_SET is permissible only when event composite state is 323 * 'registered, disabled, and not running'. This means that the 324 * interrupt is currently disabled, and not active. 325 */ 326 plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr); 327 328 finish: 329 sdei_map_unlock(map); 330 331 return ret; 332 } 333 334 /* Register handler and argument for an SDEI event */ 335 static int64_t sdei_event_register(int ev_num, 336 uint64_t ep, 337 uint64_t arg, 338 uint64_t flags, 339 uint64_t mpidr) 340 { 341 int ret; 342 unsigned int routing; 343 sdei_entry_t *se; 344 sdei_ev_map_t *map; 345 sdei_state_t backup_state; 346 347 if ((ep == 0U) || (plat_sdei_validate_entry_point( 348 ep, sdei_client_el()) != 0)) { 349 return SDEI_EINVAL; 350 } 351 352 ret = validate_flags(flags, mpidr); 353 if (ret != 0) 354 return ret; 355 356 /* Check if valid event number */ 357 map = find_event_map(ev_num); 358 if (map == NULL) 359 return SDEI_EINVAL; 360 361 /* Private events always target the PE */ 362 if (is_event_private(map)) { 363 /* 364 * SDEI internally handles private events in the same manner 365 * as public events with routing mode=RM_PE, since the routing 366 * mode flag and affinity fields are not used when registering 367 * a private event, set them here. 368 */ 369 flags = SDEI_REGF_RM_PE; 370 /* 371 * Kernel may pass 0 as mpidr, as we set flags to 372 * SDEI_REGF_RM_PE, so set mpidr also. 373 */ 374 mpidr = read_mpidr_el1(); 375 } 376 377 se = get_event_entry(map); 378 379 /* 380 * Even though register operation is per-event (additionally for private 381 * events, registration is required individually), it has to be 382 * serialised with respect to bind/release, which are global operations. 383 * So we hold the lock throughout, unconditionally. 384 */ 385 sdei_map_lock(map); 386 387 backup_state = se->state; 388 if (!can_sdei_state_trans(se, DO_REGISTER)) 389 goto fallback; 390 391 /* 392 * When registering for dynamic events, make sure it's been bound 393 * already. This has to be the case as, without binding, the client 394 * can't know about the event number to register for. 395 */ 396 if (is_map_dynamic(map) && !is_map_bound(map)) 397 goto fallback; 398 399 if (is_event_private(map)) { 400 /* Multiple calls to register are possible for private events */ 401 assert(map->reg_count >= 0); 402 } else { 403 /* Only single call to register is possible for shared events */ 404 assert(map->reg_count == 0); 405 } 406 407 if (is_map_bound(map)) { 408 /* Meanwhile, did any PE ACK the interrupt? */ 409 if (plat_ic_get_interrupt_active(map->intr) != 0U) 410 goto fallback; 411 412 /* The interrupt must currently owned by Non-secure */ 413 if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS) 414 goto fallback; 415 416 /* 417 * Disable forwarding of new interrupt triggers to CPU 418 * interface. 419 */ 420 plat_ic_disable_interrupt(map->intr); 421 422 /* 423 * Any events that are triggered after register and before 424 * enable should remain pending. Clear any previous interrupt 425 * triggers which are pending (except for SGIs). This has no 426 * affect on level-triggered interrupts. 427 */ 428 if (ev_num != SDEI_EVENT_0) 429 plat_ic_clear_interrupt_pending(map->intr); 430 431 /* Map interrupt to EL3 and program the correct priority */ 432 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3); 433 434 /* Program the appropriate interrupt priority */ 435 plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map)); 436 437 /* 438 * Set the routing mode for shared event as requested. We 439 * already ensure that shared events get bound to SPIs. 440 */ 441 if (is_event_shared(map)) { 442 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 443 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 444 plat_ic_set_spi_routing(map->intr, routing, 445 (u_register_t) mpidr); 446 } 447 } 448 449 /* Populate event entries */ 450 set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr); 451 452 /* Increment register count */ 453 map->reg_count++; 454 455 sdei_map_unlock(map); 456 457 return 0; 458 459 fallback: 460 /* Reinstate previous state */ 461 se->state = backup_state; 462 463 sdei_map_unlock(map); 464 465 return SDEI_EDENY; 466 } 467 468 /* Enable SDEI event */ 469 static int64_t sdei_event_enable(int ev_num) 470 { 471 sdei_ev_map_t *map; 472 sdei_entry_t *se; 473 int ret; 474 bool before, after; 475 476 /* Check if valid event number */ 477 map = find_event_map(ev_num); 478 if (map == NULL) 479 return SDEI_EINVAL; 480 481 se = get_event_entry(map); 482 ret = SDEI_EDENY; 483 484 if (is_event_shared(map)) 485 sdei_map_lock(map); 486 487 before = GET_EV_STATE(se, ENABLED); 488 if (!can_sdei_state_trans(se, DO_ENABLE)) 489 goto finish; 490 after = GET_EV_STATE(se, ENABLED); 491 492 /* 493 * Enable interrupt for bound events only if there's a change in enabled 494 * state. 495 */ 496 if (is_map_bound(map) && (!before && after)) 497 plat_ic_enable_interrupt(map->intr); 498 499 ret = 0; 500 501 finish: 502 if (is_event_shared(map)) 503 sdei_map_unlock(map); 504 505 return ret; 506 } 507 508 /* Disable SDEI event */ 509 static int sdei_event_disable(int ev_num) 510 { 511 sdei_ev_map_t *map; 512 sdei_entry_t *se; 513 int ret; 514 bool before, after; 515 516 /* Check if valid event number */ 517 map = find_event_map(ev_num); 518 if (map == NULL) 519 return SDEI_EINVAL; 520 521 se = get_event_entry(map); 522 ret = SDEI_EDENY; 523 524 if (is_event_shared(map)) 525 sdei_map_lock(map); 526 527 before = GET_EV_STATE(se, ENABLED); 528 if (!can_sdei_state_trans(se, DO_DISABLE)) 529 goto finish; 530 after = GET_EV_STATE(se, ENABLED); 531 532 /* 533 * Disable interrupt for bound events only if there's a change in 534 * enabled state. 535 */ 536 if (is_map_bound(map) && (before && !after)) 537 plat_ic_disable_interrupt(map->intr); 538 539 ret = 0; 540 541 finish: 542 if (is_event_shared(map)) 543 sdei_map_unlock(map); 544 545 return ret; 546 } 547 548 /* Query SDEI event information */ 549 static int64_t sdei_event_get_info(int ev_num, int info) 550 { 551 sdei_entry_t *se; 552 sdei_ev_map_t *map; 553 554 uint64_t flags; 555 bool registered; 556 uint64_t affinity; 557 558 /* Check if valid event number */ 559 map = find_event_map(ev_num); 560 if (map == NULL) 561 return SDEI_EINVAL; 562 563 se = get_event_entry(map); 564 565 if (is_event_shared(map)) 566 sdei_map_lock(map); 567 568 /* Sample state under lock */ 569 registered = GET_EV_STATE(se, REGISTERED); 570 flags = se->reg_flags; 571 affinity = se->affinity; 572 573 if (is_event_shared(map)) 574 sdei_map_unlock(map); 575 576 switch (info) { 577 case SDEI_INFO_EV_TYPE: 578 return is_event_shared(map); 579 580 case SDEI_INFO_EV_NOT_SIGNALED: 581 return !is_event_signalable(map); 582 583 case SDEI_INFO_EV_PRIORITY: 584 return is_event_critical(map); 585 586 case SDEI_INFO_EV_ROUTING_MODE: 587 if (!is_event_shared(map)) 588 return SDEI_EINVAL; 589 if (!registered) 590 return SDEI_EDENY; 591 return (flags == SDEI_REGF_RM_PE); 592 593 case SDEI_INFO_EV_ROUTING_AFF: 594 if (!is_event_shared(map)) 595 return SDEI_EINVAL; 596 if (!registered) 597 return SDEI_EDENY; 598 if (flags != SDEI_REGF_RM_PE) 599 return SDEI_EINVAL; 600 return affinity; 601 602 default: 603 return SDEI_EINVAL; 604 } 605 } 606 607 /* Unregister an SDEI event */ 608 static int sdei_event_unregister(int ev_num) 609 { 610 int ret = 0; 611 sdei_entry_t *se; 612 sdei_ev_map_t *map; 613 614 /* Check if valid event number */ 615 map = find_event_map(ev_num); 616 if (map == NULL) 617 return SDEI_EINVAL; 618 619 se = get_event_entry(map); 620 621 /* 622 * Even though unregister operation is per-event (additionally for 623 * private events, unregistration is required individually), it has to 624 * be serialised with respect to bind/release, which are global 625 * operations. So we hold the lock throughout, unconditionally. 626 */ 627 sdei_map_lock(map); 628 629 if (!can_sdei_state_trans(se, DO_UNREGISTER)) { 630 /* 631 * Even if the call is invalid, and the handler is running (for 632 * example, having unregistered from a running handler earlier), 633 * return pending error code; otherwise, return deny. 634 */ 635 ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY; 636 637 goto finish; 638 } 639 640 map->reg_count--; 641 if (is_event_private(map)) { 642 /* Multiple calls to register are possible for private events */ 643 assert(map->reg_count >= 0); 644 } else { 645 /* Only single call to register is possible for shared events */ 646 assert(map->reg_count == 0); 647 } 648 649 if (is_map_bound(map)) { 650 plat_ic_disable_interrupt(map->intr); 651 652 /* 653 * Clear pending interrupt. Skip for SGIs as they may not be 654 * cleared on interrupt controllers. 655 */ 656 if (ev_num != SDEI_EVENT_0) 657 plat_ic_clear_interrupt_pending(map->intr); 658 659 assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3); 660 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS); 661 plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY); 662 } 663 664 clear_event_entries(se); 665 666 /* 667 * If the handler is running at the time of unregister, return the 668 * pending error code. 669 */ 670 if (GET_EV_STATE(se, RUNNING)) 671 ret = SDEI_EPEND; 672 673 finish: 674 sdei_map_unlock(map); 675 676 return ret; 677 } 678 679 /* Query status of an SDEI event */ 680 static int sdei_event_status(int ev_num) 681 { 682 sdei_ev_map_t *map; 683 sdei_entry_t *se; 684 sdei_state_t state; 685 686 /* Check if valid event number */ 687 map = find_event_map(ev_num); 688 if (map == NULL) 689 return SDEI_EINVAL; 690 691 se = get_event_entry(map); 692 693 if (is_event_shared(map)) 694 sdei_map_lock(map); 695 696 /* State value directly maps to the expected return format */ 697 state = se->state; 698 699 if (is_event_shared(map)) 700 sdei_map_unlock(map); 701 702 return (int) state; 703 } 704 705 /* Bind an SDEI event to an interrupt */ 706 static int sdei_interrupt_bind(unsigned int intr_num) 707 { 708 sdei_ev_map_t *map; 709 bool retry = true, shared_mapping; 710 711 /* Interrupt must be either PPI or SPI */ 712 if (!(plat_ic_is_ppi(intr_num) || plat_ic_is_spi(intr_num))) 713 return SDEI_EINVAL; 714 715 shared_mapping = (plat_ic_is_spi(intr_num) != 0); 716 do { 717 /* 718 * Bail out if there is already an event for this interrupt, 719 * either platform-defined or dynamic. 720 */ 721 map = find_event_map_by_intr(intr_num, shared_mapping); 722 if (map != NULL) { 723 if (is_map_dynamic(map)) { 724 if (is_map_bound(map)) { 725 /* 726 * Dynamic event, already bound. Return 727 * event number. 728 */ 729 return map->ev_num; 730 } 731 } else { 732 /* Binding non-dynamic event */ 733 return SDEI_EINVAL; 734 } 735 } 736 737 /* 738 * The interrupt is not bound yet. Try to find a free slot to 739 * bind it. Free dynamic mappings have their interrupt set as 740 * SDEI_DYN_IRQ. 741 */ 742 map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping); 743 if (map == NULL) 744 return SDEI_ENOMEM; 745 746 /* The returned mapping must be dynamic */ 747 assert(is_map_dynamic(map)); 748 749 /* 750 * We cannot assert for bound maps here, as we might be racing 751 * with another bind. 752 */ 753 754 /* The requested interrupt must already belong to NS */ 755 if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS) 756 return SDEI_EDENY; 757 758 /* 759 * Interrupt programming and ownership transfer are deferred 760 * until register. 761 */ 762 763 sdei_map_lock(map); 764 if (!is_map_bound(map)) { 765 map->intr = intr_num; 766 set_map_bound(map); 767 retry = false; 768 } 769 sdei_map_unlock(map); 770 } while (retry); 771 772 return map->ev_num; 773 } 774 775 /* Release a bound SDEI event previously to an interrupt */ 776 static int sdei_interrupt_release(int ev_num) 777 { 778 int ret = 0; 779 sdei_ev_map_t *map; 780 sdei_entry_t *se; 781 782 /* Check if valid event number */ 783 map = find_event_map(ev_num); 784 if (map == NULL) 785 return SDEI_EINVAL; 786 787 if (!is_map_dynamic(map)) 788 return SDEI_EINVAL; 789 790 se = get_event_entry(map); 791 792 sdei_map_lock(map); 793 794 /* Event must have been unregistered before release */ 795 if (map->reg_count != 0) { 796 ret = SDEI_EDENY; 797 goto finish; 798 } 799 800 /* 801 * Interrupt release never causes the state to change. We only check 802 * whether it's permissible or not. 803 */ 804 if (!can_sdei_state_trans(se, DO_RELEASE)) { 805 ret = SDEI_EDENY; 806 goto finish; 807 } 808 809 if (is_map_bound(map)) { 810 /* 811 * Deny release if the interrupt is active, which means it's 812 * probably being acknowledged and handled elsewhere. 813 */ 814 if (plat_ic_get_interrupt_active(map->intr) != 0U) { 815 ret = SDEI_EDENY; 816 goto finish; 817 } 818 819 /* 820 * Interrupt programming and ownership transfer are already done 821 * during unregister. 822 */ 823 824 map->intr = SDEI_DYN_IRQ; 825 clr_map_bound(map); 826 } else { 827 SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map), 828 map->reg_count); 829 ret = SDEI_EINVAL; 830 } 831 832 finish: 833 sdei_map_unlock(map); 834 835 return ret; 836 } 837 838 /* Perform reset of private SDEI events */ 839 static int sdei_private_reset(void) 840 { 841 sdei_ev_map_t *map; 842 int ret = 0, final_ret = 0; 843 unsigned int i; 844 845 /* Unregister all private events */ 846 for_each_private_map(i, map) { 847 /* 848 * The unregister can fail if the event is not registered, which 849 * is allowed, and a deny will be returned. But if the event is 850 * running or unregister pending, the call fails. 851 */ 852 ret = sdei_event_unregister(map->ev_num); 853 if ((ret == SDEI_EPEND) && (final_ret == 0)) 854 final_ret = SDEI_EDENY; 855 } 856 857 return final_ret; 858 } 859 860 /* Perform reset of shared SDEI events */ 861 static int sdei_shared_reset(void) 862 { 863 const sdei_mapping_t *mapping; 864 sdei_ev_map_t *map; 865 int ret = 0, final_ret = 0; 866 unsigned int i, j; 867 868 /* Unregister all shared events */ 869 for_each_shared_map(i, map) { 870 /* 871 * The unregister can fail if the event is not registered, which 872 * is allowed, and a deny will be returned. But if the event is 873 * running or unregister pending, the call fails. 874 */ 875 ret = sdei_event_unregister(map->ev_num); 876 if ((ret == SDEI_EPEND) && (final_ret == 0)) 877 final_ret = SDEI_EDENY; 878 } 879 880 if (final_ret != 0) 881 return final_ret; 882 883 /* 884 * Loop through both private and shared mappings, and release all 885 * bindings. 886 */ 887 for_each_mapping_type(i, mapping) { 888 iterate_mapping(mapping, j, map) { 889 /* 890 * Release bindings for mappings that are dynamic and 891 * bound. 892 */ 893 if (is_map_dynamic(map) && is_map_bound(map)) { 894 /* 895 * Any failure to release would mean there is at 896 * least a PE registered for the event. 897 */ 898 ret = sdei_interrupt_release(map->ev_num); 899 if ((ret != 0) && (final_ret == 0)) 900 final_ret = ret; 901 } 902 } 903 } 904 905 return final_ret; 906 } 907 908 /* Send a signal to another SDEI client PE */ 909 static int sdei_signal(int ev_num, uint64_t target_pe) 910 { 911 sdei_ev_map_t *map; 912 913 /* Only event 0 can be signalled */ 914 if (ev_num != SDEI_EVENT_0) 915 return SDEI_EINVAL; 916 917 /* Find mapping for event 0 */ 918 map = find_event_map(SDEI_EVENT_0); 919 if (map == NULL) 920 return SDEI_EINVAL; 921 922 /* The event must be signalable */ 923 if (!is_event_signalable(map)) 924 return SDEI_EINVAL; 925 926 /* Validate target */ 927 if (!is_valid_mpidr(target_pe)) 928 return SDEI_EINVAL; 929 930 /* Raise SGI. Platform will validate target_pe */ 931 plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe); 932 933 return 0; 934 } 935 936 /* Query SDEI dispatcher features */ 937 static uint64_t sdei_features(unsigned int feature) 938 { 939 if (feature == SDEI_FEATURE_BIND_SLOTS) { 940 return FEATURE_BIND_SLOTS(num_dyn_priv_slots, 941 num_dyn_shrd_slots); 942 } 943 944 return (uint64_t) SDEI_EINVAL; 945 } 946 947 /* SDEI top level handler for servicing SMCs */ 948 uint64_t sdei_smc_handler(uint32_t smc_fid, 949 uint64_t x1, 950 uint64_t x2, 951 uint64_t x3, 952 uint64_t x4, 953 void *cookie, 954 void *handle, 955 uint64_t flags) 956 { 957 958 uint64_t x5; 959 unsigned int ss = (unsigned int) get_interrupt_src_ss(flags); 960 int64_t ret; 961 bool resume = false; 962 cpu_context_t *ctx = handle; 963 int ev_num = (int) x1; 964 965 if (ss != NON_SECURE) 966 SMC_RET1(ctx, SMC_UNK); 967 968 /* Verify the caller EL */ 969 if (GET_EL(read_spsr_el3()) != sdei_client_el()) 970 SMC_RET1(ctx, SMC_UNK); 971 972 switch (smc_fid) { 973 case SDEI_VERSION: 974 SDEI_LOG("> VER\n"); 975 ret = (int64_t) sdei_version(); 976 SDEI_LOG("< VER:%" PRIx64 "\n", ret); 977 SMC_RET1(ctx, ret); 978 979 case SDEI_EVENT_REGISTER: 980 x5 = SMC_GET_GP(ctx, CTX_GPREG_X5); 981 SDEI_LOG("> REG(n:%d e:%" PRIx64 " a:%" PRIx64 " f:%x m:%" PRIx64 "\n", ev_num, 982 x2, x3, (int) x4, x5); 983 ret = sdei_event_register(ev_num, x2, x3, x4, x5); 984 SDEI_LOG("< REG:%" PRId64 "\n", ret); 985 SMC_RET1(ctx, ret); 986 987 case SDEI_EVENT_ENABLE: 988 SDEI_LOG("> ENABLE(n:%d)\n", (int) x1); 989 ret = sdei_event_enable(ev_num); 990 SDEI_LOG("< ENABLE:%" PRId64 "\n", ret); 991 SMC_RET1(ctx, ret); 992 993 case SDEI_EVENT_DISABLE: 994 SDEI_LOG("> DISABLE(n:0x%x)\n", ev_num); 995 ret = sdei_event_disable(ev_num); 996 SDEI_LOG("< DISABLE:%" PRId64 "\n", ret); 997 SMC_RET1(ctx, ret); 998 999 case SDEI_EVENT_CONTEXT: 1000 SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1()); 1001 ret = sdei_event_context(ctx, (unsigned int) x1); 1002 SDEI_LOG("< CTX:%" PRId64 "\n", ret); 1003 SMC_RET1(ctx, ret); 1004 1005 case SDEI_EVENT_COMPLETE_AND_RESUME: 1006 resume = true; 1007 /* Fallthrough */ 1008 1009 case SDEI_EVENT_COMPLETE: 1010 SDEI_LOG("> COMPLETE(r:%u sta/ep:%" PRIx64 "):%lx\n", 1011 (unsigned int) resume, x1, read_mpidr_el1()); 1012 ret = sdei_event_complete(resume, x1); 1013 SDEI_LOG("< COMPLETE:%" PRIx64 "\n", ret); 1014 1015 /* 1016 * Set error code only if the call failed. If the call 1017 * succeeded, we discard the dispatched context, and restore the 1018 * interrupted context to a pristine condition, and therefore 1019 * shouldn't be modified. We don't return to the caller in this 1020 * case anyway. 1021 */ 1022 if (ret != 0) 1023 SMC_RET1(ctx, ret); 1024 1025 SMC_RET0(ctx); 1026 1027 case SDEI_EVENT_STATUS: 1028 SDEI_LOG("> STAT(n:0x%x)\n", ev_num); 1029 ret = sdei_event_status(ev_num); 1030 SDEI_LOG("< STAT:%" PRId64 "\n", ret); 1031 SMC_RET1(ctx, ret); 1032 1033 case SDEI_EVENT_GET_INFO: 1034 SDEI_LOG("> INFO(n:0x%x, %d)\n", ev_num, (int) x2); 1035 ret = sdei_event_get_info(ev_num, (int) x2); 1036 SDEI_LOG("< INFO:%" PRId64 "\n", ret); 1037 SMC_RET1(ctx, ret); 1038 1039 case SDEI_EVENT_UNREGISTER: 1040 SDEI_LOG("> UNREG(n:0x%x)\n", ev_num); 1041 ret = sdei_event_unregister(ev_num); 1042 SDEI_LOG("< UNREG:%" PRId64 "\n", ret); 1043 SMC_RET1(ctx, ret); 1044 1045 case SDEI_PE_UNMASK: 1046 SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1()); 1047 sdei_pe_unmask(); 1048 SDEI_LOG("< UNMASK:%d\n", 0); 1049 SMC_RET1(ctx, 0); 1050 1051 case SDEI_PE_MASK: 1052 SDEI_LOG("> MASK:%lx\n", read_mpidr_el1()); 1053 ret = sdei_pe_mask(); 1054 SDEI_LOG("< MASK:%" PRId64 "\n", ret); 1055 SMC_RET1(ctx, ret); 1056 1057 case SDEI_INTERRUPT_BIND: 1058 SDEI_LOG("> BIND(%d)\n", (int) x1); 1059 ret = sdei_interrupt_bind((unsigned int) x1); 1060 SDEI_LOG("< BIND:%" PRId64 "\n", ret); 1061 SMC_RET1(ctx, ret); 1062 1063 case SDEI_INTERRUPT_RELEASE: 1064 SDEI_LOG("> REL(0x%x)\n", ev_num); 1065 ret = sdei_interrupt_release(ev_num); 1066 SDEI_LOG("< REL:%" PRId64 "\n", ret); 1067 SMC_RET1(ctx, ret); 1068 1069 case SDEI_SHARED_RESET: 1070 SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1()); 1071 ret = sdei_shared_reset(); 1072 SDEI_LOG("< S_RESET:%" PRId64 "\n", ret); 1073 SMC_RET1(ctx, ret); 1074 1075 case SDEI_PRIVATE_RESET: 1076 SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1()); 1077 ret = sdei_private_reset(); 1078 SDEI_LOG("< P_RESET:%" PRId64 "\n", ret); 1079 SMC_RET1(ctx, ret); 1080 1081 case SDEI_EVENT_ROUTING_SET: 1082 SDEI_LOG("> ROUTE_SET(n:%d f:%" PRIx64 " aff:%" PRIx64 ")\n", ev_num, x2, x3); 1083 ret = sdei_event_routing_set(ev_num, x2, x3); 1084 SDEI_LOG("< ROUTE_SET:%" PRId64 "\n", ret); 1085 SMC_RET1(ctx, ret); 1086 1087 case SDEI_FEATURES: 1088 SDEI_LOG("> FTRS(f:%" PRIx64 ")\n", x1); 1089 ret = (int64_t) sdei_features((unsigned int) x1); 1090 SDEI_LOG("< FTRS:%" PRIx64 "\n", ret); 1091 SMC_RET1(ctx, ret); 1092 1093 case SDEI_EVENT_SIGNAL: 1094 SDEI_LOG("> SIGNAL(e:%d t:%" PRIx64 ")\n", ev_num, x2); 1095 ret = sdei_signal(ev_num, x2); 1096 SDEI_LOG("< SIGNAL:%" PRId64 "\n", ret); 1097 SMC_RET1(ctx, ret); 1098 1099 default: 1100 /* Do nothing in default case */ 1101 break; 1102 } 1103 1104 WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid); 1105 SMC_RET1(ctx, SMC_UNK); 1106 } 1107 1108 /* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */ 1109 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init); 1110 1111 /* Subscribe to PSCI CPU suspend finisher for per-CPU configuration */ 1112 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, sdei_cpu_wakeup_init); 1113