1 /* 2 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <stddef.h> 10 #include <string.h> 11 12 #include <bl31/bl31.h> 13 #include <bl31/ehf.h> 14 #include <bl31/interrupt_mgmt.h> 15 #include <common/bl_common.h> 16 #include <common/debug.h> 17 #include <common/runtime_svc.h> 18 #include <context.h> 19 #include <lib/cassert.h> 20 #include <lib/el3_runtime/pubsub.h> 21 #include <lib/utils.h> 22 #include <plat/common/platform.h> 23 #include <services/sdei.h> 24 25 #include "sdei_private.h" 26 27 #define MAJOR_VERSION 1ULL 28 #define MINOR_VERSION 0ULL 29 #define VENDOR_VERSION 0ULL 30 31 #define MAKE_SDEI_VERSION(_major, _minor, _vendor) \ 32 ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor)) 33 34 #define LOWEST_INTR_PRIORITY 0xff 35 36 #define is_valid_affinity(_mpidr) (plat_core_pos_by_mpidr(_mpidr) >= 0) 37 38 CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI, 39 sdei_critical_must_have_higher_priority); 40 41 static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots; 42 43 /* Initialise SDEI map entries */ 44 static void init_map(sdei_ev_map_t *map) 45 { 46 map->reg_count = 0; 47 } 48 49 /* Convert mapping to SDEI class */ 50 static sdei_class_t map_to_class(sdei_ev_map_t *map) 51 { 52 return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL; 53 } 54 55 /* Clear SDEI event entries except state */ 56 static void clear_event_entries(sdei_entry_t *se) 57 { 58 se->ep = 0; 59 se->arg = 0; 60 se->affinity = 0; 61 se->reg_flags = 0; 62 } 63 64 /* Perform CPU-specific state initialisation */ 65 static void *sdei_cpu_on_init(const void *arg) 66 { 67 unsigned int i; 68 sdei_ev_map_t *map; 69 sdei_entry_t *se; 70 71 /* Initialize private mappings on this CPU */ 72 for_each_private_map(i, map) { 73 se = get_event_entry(map); 74 clear_event_entries(se); 75 se->state = 0; 76 } 77 78 SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1()); 79 80 /* All PEs start with SDEI events masked */ 81 (void) sdei_pe_mask(); 82 83 return NULL; 84 } 85 86 /* CPU initialisation after wakeup from suspend */ 87 static void *sdei_cpu_wakeup_init(const void *arg) 88 { 89 SDEI_LOG("Events masked on %lx\n", read_mpidr_el1()); 90 91 /* All PEs wake up with SDEI events masked */ 92 sdei_pe_mask(); 93 94 return 0; 95 } 96 97 /* Initialise an SDEI class */ 98 static void sdei_class_init(sdei_class_t class) 99 { 100 unsigned int i; 101 bool zero_found __unused = false; 102 int ev_num_so_far __unused; 103 sdei_ev_map_t *map; 104 105 /* Sanity check and configuration of shared events */ 106 ev_num_so_far = -1; 107 for_each_shared_map(i, map) { 108 #if ENABLE_ASSERTIONS 109 /* Ensure mappings are sorted */ 110 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 111 112 ev_num_so_far = map->ev_num; 113 114 /* Event 0 must not be shared */ 115 assert(map->ev_num != SDEI_EVENT_0); 116 117 /* Check for valid event */ 118 assert(map->ev_num >= 0); 119 120 /* Make sure it's a shared event */ 121 assert(is_event_shared(map)); 122 123 /* No shared mapping should have signalable property */ 124 assert(!is_event_signalable(map)); 125 126 /* Shared mappings can't be explicit */ 127 assert(!is_map_explicit(map)); 128 #endif 129 130 /* Skip initializing the wrong priority */ 131 if (map_to_class(map) != class) 132 continue; 133 134 /* Platform events are always bound, so set the bound flag */ 135 if (is_map_dynamic(map)) { 136 assert(map->intr == SDEI_DYN_IRQ); 137 assert(is_event_normal(map)); 138 num_dyn_shrd_slots++; 139 } else { 140 /* Shared mappings must be bound to shared interrupt */ 141 assert(plat_ic_is_spi(map->intr) != 0); 142 set_map_bound(map); 143 } 144 145 init_map(map); 146 } 147 148 /* Sanity check and configuration of private events for this CPU */ 149 ev_num_so_far = -1; 150 for_each_private_map(i, map) { 151 #if ENABLE_ASSERTIONS 152 /* Ensure mappings are sorted */ 153 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 154 155 ev_num_so_far = map->ev_num; 156 157 if (map->ev_num == SDEI_EVENT_0) { 158 zero_found = true; 159 160 /* Event 0 must be a Secure SGI */ 161 assert(is_secure_sgi(map->intr)); 162 163 /* 164 * Event 0 can have only have signalable flag (apart 165 * from being private 166 */ 167 assert(map->map_flags == (SDEI_MAPF_SIGNALABLE | 168 SDEI_MAPF_PRIVATE)); 169 } else { 170 /* No other mapping should have signalable property */ 171 assert(!is_event_signalable(map)); 172 } 173 174 /* Check for valid event */ 175 assert(map->ev_num >= 0); 176 177 /* Make sure it's a private event */ 178 assert(is_event_private(map)); 179 180 /* 181 * Other than priority, explicit events can only have explicit 182 * and private flags set. 183 */ 184 if (is_map_explicit(map)) { 185 assert((map->map_flags | SDEI_MAPF_CRITICAL) == 186 (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE 187 | SDEI_MAPF_CRITICAL)); 188 } 189 #endif 190 191 /* Skip initializing the wrong priority */ 192 if (map_to_class(map) != class) 193 continue; 194 195 /* Platform events are always bound, so set the bound flag */ 196 if (map->ev_num != SDEI_EVENT_0) { 197 if (is_map_dynamic(map)) { 198 assert(map->intr == SDEI_DYN_IRQ); 199 assert(is_event_normal(map)); 200 num_dyn_priv_slots++; 201 } else if (is_map_explicit(map)) { 202 /* 203 * Explicit mappings don't have a backing 204 * SDEI interrupt, but verify that anyway. 205 */ 206 assert(map->intr == SDEI_DYN_IRQ); 207 } else { 208 /* 209 * Private mappings must be bound to private 210 * interrupt. 211 */ 212 assert(plat_ic_is_ppi((unsigned) map->intr) != 0); 213 set_map_bound(map); 214 } 215 } 216 217 init_map(map); 218 } 219 220 /* Ensure event 0 is in the mapping */ 221 assert(zero_found); 222 223 (void) sdei_cpu_on_init(NULL); 224 } 225 226 /* SDEI dispatcher initialisation */ 227 void sdei_init(void) 228 { 229 plat_sdei_setup(); 230 sdei_class_init(SDEI_CRITICAL); 231 sdei_class_init(SDEI_NORMAL); 232 233 /* Register priority level handlers */ 234 ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI, 235 sdei_intr_handler); 236 ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI, 237 sdei_intr_handler); 238 } 239 240 /* Populate SDEI event entry */ 241 static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg, 242 unsigned int flags, uint64_t affinity) 243 { 244 assert(se != NULL); 245 246 se->ep = ep; 247 se->arg = arg; 248 se->affinity = (affinity & MPIDR_AFFINITY_MASK); 249 se->reg_flags = flags; 250 } 251 252 static uint64_t sdei_version(void) 253 { 254 return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION); 255 } 256 257 /* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */ 258 static int validate_flags(uint64_t flags, uint64_t mpidr) 259 { 260 /* Validate flags */ 261 switch (flags) { 262 case SDEI_REGF_RM_PE: 263 if (!is_valid_affinity(mpidr)) 264 return SDEI_EINVAL; 265 break; 266 case SDEI_REGF_RM_ANY: 267 break; 268 default: 269 /* Unknown flags */ 270 return SDEI_EINVAL; 271 } 272 273 return 0; 274 } 275 276 /* Set routing of an SDEI event */ 277 static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr) 278 { 279 int ret; 280 unsigned int routing; 281 sdei_ev_map_t *map; 282 sdei_entry_t *se; 283 284 ret = validate_flags(flags, mpidr); 285 if (ret != 0) 286 return ret; 287 288 /* Check if valid event number */ 289 map = find_event_map(ev_num); 290 if (map == NULL) 291 return SDEI_EINVAL; 292 293 /* The event must not be private */ 294 if (is_event_private(map)) 295 return SDEI_EINVAL; 296 297 se = get_event_entry(map); 298 299 sdei_map_lock(map); 300 301 if (!is_map_bound(map) || is_event_private(map)) { 302 ret = SDEI_EINVAL; 303 goto finish; 304 } 305 306 if (!can_sdei_state_trans(se, DO_ROUTING)) { 307 ret = SDEI_EDENY; 308 goto finish; 309 } 310 311 /* Choose appropriate routing */ 312 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 313 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 314 315 /* Update event registration flag */ 316 se->reg_flags = (unsigned int) flags; 317 318 /* 319 * ROUTING_SET is permissible only when event composite state is 320 * 'registered, disabled, and not running'. This means that the 321 * interrupt is currently disabled, and not active. 322 */ 323 plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr); 324 325 finish: 326 sdei_map_unlock(map); 327 328 return ret; 329 } 330 331 /* Register handler and argument for an SDEI event */ 332 static int64_t sdei_event_register(int ev_num, 333 uint64_t ep, 334 uint64_t arg, 335 uint64_t flags, 336 uint64_t mpidr) 337 { 338 int ret; 339 unsigned int routing; 340 sdei_entry_t *se; 341 sdei_ev_map_t *map; 342 sdei_state_t backup_state; 343 344 if ((ep == 0U) || (plat_sdei_validate_entry_point( 345 ep, sdei_client_el()) != 0)) { 346 return SDEI_EINVAL; 347 } 348 349 ret = validate_flags(flags, mpidr); 350 if (ret != 0) 351 return ret; 352 353 /* Check if valid event number */ 354 map = find_event_map(ev_num); 355 if (map == NULL) 356 return SDEI_EINVAL; 357 358 /* Private events always target the PE */ 359 if (is_event_private(map)) 360 flags = SDEI_REGF_RM_PE; 361 362 se = get_event_entry(map); 363 364 /* 365 * Even though register operation is per-event (additionally for private 366 * events, registration is required individually), it has to be 367 * serialised with respect to bind/release, which are global operations. 368 * So we hold the lock throughout, unconditionally. 369 */ 370 sdei_map_lock(map); 371 372 backup_state = se->state; 373 if (!can_sdei_state_trans(se, DO_REGISTER)) 374 goto fallback; 375 376 /* 377 * When registering for dynamic events, make sure it's been bound 378 * already. This has to be the case as, without binding, the client 379 * can't know about the event number to register for. 380 */ 381 if (is_map_dynamic(map) && !is_map_bound(map)) 382 goto fallback; 383 384 if (is_event_private(map)) { 385 /* Multiple calls to register are possible for private events */ 386 assert(map->reg_count >= 0); 387 } else { 388 /* Only single call to register is possible for shared events */ 389 assert(map->reg_count == 0); 390 } 391 392 if (is_map_bound(map)) { 393 /* Meanwhile, did any PE ACK the interrupt? */ 394 if (plat_ic_get_interrupt_active(map->intr) != 0U) 395 goto fallback; 396 397 /* The interrupt must currently owned by Non-secure */ 398 if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS) 399 goto fallback; 400 401 /* 402 * Disable forwarding of new interrupt triggers to CPU 403 * interface. 404 */ 405 plat_ic_disable_interrupt(map->intr); 406 407 /* 408 * Any events that are triggered after register and before 409 * enable should remain pending. Clear any previous interrupt 410 * triggers which are pending (except for SGIs). This has no 411 * affect on level-triggered interrupts. 412 */ 413 if (ev_num != SDEI_EVENT_0) 414 plat_ic_clear_interrupt_pending(map->intr); 415 416 /* Map interrupt to EL3 and program the correct priority */ 417 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3); 418 419 /* Program the appropriate interrupt priority */ 420 plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map)); 421 422 /* 423 * Set the routing mode for shared event as requested. We 424 * already ensure that shared events get bound to SPIs. 425 */ 426 if (is_event_shared(map)) { 427 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 428 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 429 plat_ic_set_spi_routing(map->intr, routing, 430 (u_register_t) mpidr); 431 } 432 } 433 434 /* Populate event entries */ 435 set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr); 436 437 /* Increment register count */ 438 map->reg_count++; 439 440 sdei_map_unlock(map); 441 442 return 0; 443 444 fallback: 445 /* Reinstate previous state */ 446 se->state = backup_state; 447 448 sdei_map_unlock(map); 449 450 return SDEI_EDENY; 451 } 452 453 /* Enable SDEI event */ 454 static int64_t sdei_event_enable(int ev_num) 455 { 456 sdei_ev_map_t *map; 457 sdei_entry_t *se; 458 int ret; 459 bool before, after; 460 461 /* Check if valid event number */ 462 map = find_event_map(ev_num); 463 if (map == NULL) 464 return SDEI_EINVAL; 465 466 se = get_event_entry(map); 467 ret = SDEI_EDENY; 468 469 if (is_event_shared(map)) 470 sdei_map_lock(map); 471 472 before = GET_EV_STATE(se, ENABLED); 473 if (!can_sdei_state_trans(se, DO_ENABLE)) 474 goto finish; 475 after = GET_EV_STATE(se, ENABLED); 476 477 /* 478 * Enable interrupt for bound events only if there's a change in enabled 479 * state. 480 */ 481 if (is_map_bound(map) && (!before && after)) 482 plat_ic_enable_interrupt(map->intr); 483 484 ret = 0; 485 486 finish: 487 if (is_event_shared(map)) 488 sdei_map_unlock(map); 489 490 return ret; 491 } 492 493 /* Disable SDEI event */ 494 static int sdei_event_disable(int ev_num) 495 { 496 sdei_ev_map_t *map; 497 sdei_entry_t *se; 498 int ret; 499 bool before, after; 500 501 /* Check if valid event number */ 502 map = find_event_map(ev_num); 503 if (map == NULL) 504 return SDEI_EINVAL; 505 506 se = get_event_entry(map); 507 ret = SDEI_EDENY; 508 509 if (is_event_shared(map)) 510 sdei_map_lock(map); 511 512 before = GET_EV_STATE(se, ENABLED); 513 if (!can_sdei_state_trans(se, DO_DISABLE)) 514 goto finish; 515 after = GET_EV_STATE(se, ENABLED); 516 517 /* 518 * Disable interrupt for bound events only if there's a change in 519 * enabled state. 520 */ 521 if (is_map_bound(map) && (before && !after)) 522 plat_ic_disable_interrupt(map->intr); 523 524 ret = 0; 525 526 finish: 527 if (is_event_shared(map)) 528 sdei_map_unlock(map); 529 530 return ret; 531 } 532 533 /* Query SDEI event information */ 534 static int64_t sdei_event_get_info(int ev_num, int info) 535 { 536 sdei_entry_t *se; 537 sdei_ev_map_t *map; 538 539 uint64_t flags; 540 bool registered; 541 uint64_t affinity; 542 543 /* Check if valid event number */ 544 map = find_event_map(ev_num); 545 if (map == NULL) 546 return SDEI_EINVAL; 547 548 se = get_event_entry(map); 549 550 if (is_event_shared(map)) 551 sdei_map_lock(map); 552 553 /* Sample state under lock */ 554 registered = GET_EV_STATE(se, REGISTERED); 555 flags = se->reg_flags; 556 affinity = se->affinity; 557 558 if (is_event_shared(map)) 559 sdei_map_unlock(map); 560 561 switch (info) { 562 case SDEI_INFO_EV_TYPE: 563 return is_event_shared(map); 564 565 case SDEI_INFO_EV_NOT_SIGNALED: 566 return !is_event_signalable(map); 567 568 case SDEI_INFO_EV_PRIORITY: 569 return is_event_critical(map); 570 571 case SDEI_INFO_EV_ROUTING_MODE: 572 if (!is_event_shared(map)) 573 return SDEI_EINVAL; 574 if (!registered) 575 return SDEI_EDENY; 576 return (flags == SDEI_REGF_RM_PE); 577 578 case SDEI_INFO_EV_ROUTING_AFF: 579 if (!is_event_shared(map)) 580 return SDEI_EINVAL; 581 if (!registered) 582 return SDEI_EDENY; 583 if (flags != SDEI_REGF_RM_PE) 584 return SDEI_EINVAL; 585 return affinity; 586 587 default: 588 return SDEI_EINVAL; 589 } 590 } 591 592 /* Unregister an SDEI event */ 593 static int sdei_event_unregister(int ev_num) 594 { 595 int ret = 0; 596 sdei_entry_t *se; 597 sdei_ev_map_t *map; 598 599 /* Check if valid event number */ 600 map = find_event_map(ev_num); 601 if (map == NULL) 602 return SDEI_EINVAL; 603 604 se = get_event_entry(map); 605 606 /* 607 * Even though unregister operation is per-event (additionally for 608 * private events, unregistration is required individually), it has to 609 * be serialised with respect to bind/release, which are global 610 * operations. So we hold the lock throughout, unconditionally. 611 */ 612 sdei_map_lock(map); 613 614 if (!can_sdei_state_trans(se, DO_UNREGISTER)) { 615 /* 616 * Even if the call is invalid, and the handler is running (for 617 * example, having unregistered from a running handler earlier), 618 * return pending error code; otherwise, return deny. 619 */ 620 ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY; 621 622 goto finish; 623 } 624 625 map->reg_count--; 626 if (is_event_private(map)) { 627 /* Multiple calls to register are possible for private events */ 628 assert(map->reg_count >= 0); 629 } else { 630 /* Only single call to register is possible for shared events */ 631 assert(map->reg_count == 0); 632 } 633 634 if (is_map_bound(map)) { 635 plat_ic_disable_interrupt(map->intr); 636 637 /* 638 * Clear pending interrupt. Skip for SGIs as they may not be 639 * cleared on interrupt controllers. 640 */ 641 if (ev_num != SDEI_EVENT_0) 642 plat_ic_clear_interrupt_pending(map->intr); 643 644 assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3); 645 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS); 646 plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY); 647 } 648 649 clear_event_entries(se); 650 651 /* 652 * If the handler is running at the time of unregister, return the 653 * pending error code. 654 */ 655 if (GET_EV_STATE(se, RUNNING)) 656 ret = SDEI_EPEND; 657 658 finish: 659 sdei_map_unlock(map); 660 661 return ret; 662 } 663 664 /* Query status of an SDEI event */ 665 static int sdei_event_status(int ev_num) 666 { 667 sdei_ev_map_t *map; 668 sdei_entry_t *se; 669 sdei_state_t state; 670 671 /* Check if valid event number */ 672 map = find_event_map(ev_num); 673 if (map == NULL) 674 return SDEI_EINVAL; 675 676 se = get_event_entry(map); 677 678 if (is_event_shared(map)) 679 sdei_map_lock(map); 680 681 /* State value directly maps to the expected return format */ 682 state = se->state; 683 684 if (is_event_shared(map)) 685 sdei_map_unlock(map); 686 687 return (int) state; 688 } 689 690 /* Bind an SDEI event to an interrupt */ 691 static int sdei_interrupt_bind(unsigned int intr_num) 692 { 693 sdei_ev_map_t *map; 694 bool retry = true, shared_mapping; 695 696 /* SGIs are not allowed to be bound */ 697 if (plat_ic_is_sgi(intr_num) != 0) 698 return SDEI_EINVAL; 699 700 shared_mapping = (plat_ic_is_spi(intr_num) != 0); 701 do { 702 /* 703 * Bail out if there is already an event for this interrupt, 704 * either platform-defined or dynamic. 705 */ 706 map = find_event_map_by_intr(intr_num, shared_mapping); 707 if (map != NULL) { 708 if (is_map_dynamic(map)) { 709 if (is_map_bound(map)) { 710 /* 711 * Dynamic event, already bound. Return 712 * event number. 713 */ 714 return map->ev_num; 715 } 716 } else { 717 /* Binding non-dynamic event */ 718 return SDEI_EINVAL; 719 } 720 } 721 722 /* 723 * The interrupt is not bound yet. Try to find a free slot to 724 * bind it. Free dynamic mappings have their interrupt set as 725 * SDEI_DYN_IRQ. 726 */ 727 map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping); 728 if (map == NULL) 729 return SDEI_ENOMEM; 730 731 /* The returned mapping must be dynamic */ 732 assert(is_map_dynamic(map)); 733 734 /* 735 * We cannot assert for bound maps here, as we might be racing 736 * with another bind. 737 */ 738 739 /* The requested interrupt must already belong to NS */ 740 if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS) 741 return SDEI_EDENY; 742 743 /* 744 * Interrupt programming and ownership transfer are deferred 745 * until register. 746 */ 747 748 sdei_map_lock(map); 749 if (!is_map_bound(map)) { 750 map->intr = intr_num; 751 set_map_bound(map); 752 retry = false; 753 } 754 sdei_map_unlock(map); 755 } while (retry); 756 757 return map->ev_num; 758 } 759 760 /* Release a bound SDEI event previously to an interrupt */ 761 static int sdei_interrupt_release(int ev_num) 762 { 763 int ret = 0; 764 sdei_ev_map_t *map; 765 sdei_entry_t *se; 766 767 /* Check if valid event number */ 768 map = find_event_map(ev_num); 769 if (map == NULL) 770 return SDEI_EINVAL; 771 772 if (!is_map_dynamic(map)) 773 return SDEI_EINVAL; 774 775 se = get_event_entry(map); 776 777 sdei_map_lock(map); 778 779 /* Event must have been unregistered before release */ 780 if (map->reg_count != 0) { 781 ret = SDEI_EDENY; 782 goto finish; 783 } 784 785 /* 786 * Interrupt release never causes the state to change. We only check 787 * whether it's permissible or not. 788 */ 789 if (!can_sdei_state_trans(se, DO_RELEASE)) { 790 ret = SDEI_EDENY; 791 goto finish; 792 } 793 794 if (is_map_bound(map)) { 795 /* 796 * Deny release if the interrupt is active, which means it's 797 * probably being acknowledged and handled elsewhere. 798 */ 799 if (plat_ic_get_interrupt_active(map->intr) != 0U) { 800 ret = SDEI_EDENY; 801 goto finish; 802 } 803 804 /* 805 * Interrupt programming and ownership transfer are already done 806 * during unregister. 807 */ 808 809 map->intr = SDEI_DYN_IRQ; 810 clr_map_bound(map); 811 } else { 812 SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map), 813 map->reg_count); 814 ret = SDEI_EINVAL; 815 } 816 817 finish: 818 sdei_map_unlock(map); 819 820 return ret; 821 } 822 823 /* Perform reset of private SDEI events */ 824 static int sdei_private_reset(void) 825 { 826 sdei_ev_map_t *map; 827 int ret = 0, final_ret = 0; 828 unsigned int i; 829 830 /* Unregister all private events */ 831 for_each_private_map(i, map) { 832 /* 833 * The unregister can fail if the event is not registered, which 834 * is allowed, and a deny will be returned. But if the event is 835 * running or unregister pending, the call fails. 836 */ 837 ret = sdei_event_unregister(map->ev_num); 838 if ((ret == SDEI_EPEND) && (final_ret == 0)) 839 final_ret = SDEI_EDENY; 840 } 841 842 return final_ret; 843 } 844 845 /* Perform reset of shared SDEI events */ 846 static int sdei_shared_reset(void) 847 { 848 const sdei_mapping_t *mapping; 849 sdei_ev_map_t *map; 850 int ret = 0, final_ret = 0; 851 unsigned int i, j; 852 853 /* Unregister all shared events */ 854 for_each_shared_map(i, map) { 855 /* 856 * The unregister can fail if the event is not registered, which 857 * is allowed, and a deny will be returned. But if the event is 858 * running or unregister pending, the call fails. 859 */ 860 ret = sdei_event_unregister(map->ev_num); 861 if ((ret == SDEI_EPEND) && (final_ret == 0)) 862 final_ret = SDEI_EDENY; 863 } 864 865 if (final_ret != 0) 866 return final_ret; 867 868 /* 869 * Loop through both private and shared mappings, and release all 870 * bindings. 871 */ 872 for_each_mapping_type(i, mapping) { 873 iterate_mapping(mapping, j, map) { 874 /* 875 * Release bindings for mappings that are dynamic and 876 * bound. 877 */ 878 if (is_map_dynamic(map) && is_map_bound(map)) { 879 /* 880 * Any failure to release would mean there is at 881 * least a PE registered for the event. 882 */ 883 ret = sdei_interrupt_release(map->ev_num); 884 if ((ret != 0) && (final_ret == 0)) 885 final_ret = ret; 886 } 887 } 888 } 889 890 return final_ret; 891 } 892 893 /* Send a signal to another SDEI client PE */ 894 static int sdei_signal(int ev_num, uint64_t target_pe) 895 { 896 sdei_ev_map_t *map; 897 898 /* Only event 0 can be signalled */ 899 if (ev_num != SDEI_EVENT_0) 900 return SDEI_EINVAL; 901 902 /* Find mapping for event 0 */ 903 map = find_event_map(SDEI_EVENT_0); 904 if (map == NULL) 905 return SDEI_EINVAL; 906 907 /* The event must be signalable */ 908 if (!is_event_signalable(map)) 909 return SDEI_EINVAL; 910 911 /* Validate target */ 912 if (plat_core_pos_by_mpidr(target_pe) < 0) 913 return SDEI_EINVAL; 914 915 /* Raise SGI. Platform will validate target_pe */ 916 plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe); 917 918 return 0; 919 } 920 921 /* Query SDEI dispatcher features */ 922 static uint64_t sdei_features(unsigned int feature) 923 { 924 if (feature == SDEI_FEATURE_BIND_SLOTS) { 925 return FEATURE_BIND_SLOTS(num_dyn_priv_slots, 926 num_dyn_shrd_slots); 927 } 928 929 return (uint64_t) SDEI_EINVAL; 930 } 931 932 /* SDEI top level handler for servicing SMCs */ 933 uint64_t sdei_smc_handler(uint32_t smc_fid, 934 uint64_t x1, 935 uint64_t x2, 936 uint64_t x3, 937 uint64_t x4, 938 void *cookie, 939 void *handle, 940 uint64_t flags) 941 { 942 943 uint64_t x5; 944 unsigned int ss = (unsigned int) get_interrupt_src_ss(flags); 945 int64_t ret; 946 bool resume = false; 947 cpu_context_t *ctx = handle; 948 int ev_num = (int) x1; 949 950 if (ss != NON_SECURE) 951 SMC_RET1(ctx, SMC_UNK); 952 953 /* Verify the caller EL */ 954 if (GET_EL(read_spsr_el3()) != sdei_client_el()) 955 SMC_RET1(ctx, SMC_UNK); 956 957 switch (smc_fid) { 958 case SDEI_VERSION: 959 SDEI_LOG("> VER\n"); 960 ret = (int64_t) sdei_version(); 961 SDEI_LOG("< VER:%llx\n", ret); 962 SMC_RET1(ctx, ret); 963 964 case SDEI_EVENT_REGISTER: 965 x5 = SMC_GET_GP(ctx, CTX_GPREG_X5); 966 SDEI_LOG("> REG(n:%d e:%llx a:%llx f:%x m:%llx)\n", ev_num, 967 x2, x3, (int) x4, x5); 968 ret = sdei_event_register(ev_num, x2, x3, x4, x5); 969 SDEI_LOG("< REG:%lld\n", ret); 970 SMC_RET1(ctx, ret); 971 972 case SDEI_EVENT_ENABLE: 973 SDEI_LOG("> ENABLE(n:%d)\n", (int) x1); 974 ret = sdei_event_enable(ev_num); 975 SDEI_LOG("< ENABLE:%lld\n", ret); 976 SMC_RET1(ctx, ret); 977 978 case SDEI_EVENT_DISABLE: 979 SDEI_LOG("> DISABLE(n:%d)\n", ev_num); 980 ret = sdei_event_disable(ev_num); 981 SDEI_LOG("< DISABLE:%lld\n", ret); 982 SMC_RET1(ctx, ret); 983 984 case SDEI_EVENT_CONTEXT: 985 SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1()); 986 ret = sdei_event_context(ctx, (unsigned int) x1); 987 SDEI_LOG("< CTX:%lld\n", ret); 988 SMC_RET1(ctx, ret); 989 990 case SDEI_EVENT_COMPLETE_AND_RESUME: 991 resume = true; 992 /* Fallthrough */ 993 994 case SDEI_EVENT_COMPLETE: 995 SDEI_LOG("> COMPLETE(r:%u sta/ep:%llx):%lx\n", 996 (unsigned int) resume, x1, read_mpidr_el1()); 997 ret = sdei_event_complete(resume, x1); 998 SDEI_LOG("< COMPLETE:%llx\n", ret); 999 1000 /* 1001 * Set error code only if the call failed. If the call 1002 * succeeded, we discard the dispatched context, and restore the 1003 * interrupted context to a pristine condition, and therefore 1004 * shouldn't be modified. We don't return to the caller in this 1005 * case anyway. 1006 */ 1007 if (ret != 0) 1008 SMC_RET1(ctx, ret); 1009 1010 SMC_RET0(ctx); 1011 1012 case SDEI_EVENT_STATUS: 1013 SDEI_LOG("> STAT(n:%d)\n", ev_num); 1014 ret = sdei_event_status(ev_num); 1015 SDEI_LOG("< STAT:%lld\n", ret); 1016 SMC_RET1(ctx, ret); 1017 1018 case SDEI_EVENT_GET_INFO: 1019 SDEI_LOG("> INFO(n:%d, %d)\n", ev_num, (int) x2); 1020 ret = sdei_event_get_info(ev_num, (int) x2); 1021 SDEI_LOG("< INFO:%lld\n", ret); 1022 SMC_RET1(ctx, ret); 1023 1024 case SDEI_EVENT_UNREGISTER: 1025 SDEI_LOG("> UNREG(n:%d)\n", ev_num); 1026 ret = sdei_event_unregister(ev_num); 1027 SDEI_LOG("< UNREG:%lld\n", ret); 1028 SMC_RET1(ctx, ret); 1029 1030 case SDEI_PE_UNMASK: 1031 SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1()); 1032 sdei_pe_unmask(); 1033 SDEI_LOG("< UNMASK:%d\n", 0); 1034 SMC_RET1(ctx, 0); 1035 1036 case SDEI_PE_MASK: 1037 SDEI_LOG("> MASK:%lx\n", read_mpidr_el1()); 1038 ret = sdei_pe_mask(); 1039 SDEI_LOG("< MASK:%lld\n", ret); 1040 SMC_RET1(ctx, ret); 1041 1042 case SDEI_INTERRUPT_BIND: 1043 SDEI_LOG("> BIND(%d)\n", (int) x1); 1044 ret = sdei_interrupt_bind((unsigned int) x1); 1045 SDEI_LOG("< BIND:%lld\n", ret); 1046 SMC_RET1(ctx, ret); 1047 1048 case SDEI_INTERRUPT_RELEASE: 1049 SDEI_LOG("> REL(%d)\n", ev_num); 1050 ret = sdei_interrupt_release(ev_num); 1051 SDEI_LOG("< REL:%lld\n", ret); 1052 SMC_RET1(ctx, ret); 1053 1054 case SDEI_SHARED_RESET: 1055 SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1()); 1056 ret = sdei_shared_reset(); 1057 SDEI_LOG("< S_RESET:%lld\n", ret); 1058 SMC_RET1(ctx, ret); 1059 1060 case SDEI_PRIVATE_RESET: 1061 SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1()); 1062 ret = sdei_private_reset(); 1063 SDEI_LOG("< P_RESET:%lld\n", ret); 1064 SMC_RET1(ctx, ret); 1065 1066 case SDEI_EVENT_ROUTING_SET: 1067 SDEI_LOG("> ROUTE_SET(n:%d f:%llx aff:%llx)\n", ev_num, x2, x3); 1068 ret = sdei_event_routing_set(ev_num, x2, x3); 1069 SDEI_LOG("< ROUTE_SET:%lld\n", ret); 1070 SMC_RET1(ctx, ret); 1071 1072 case SDEI_FEATURES: 1073 SDEI_LOG("> FTRS(f:%llx)\n", x1); 1074 ret = (int64_t) sdei_features((unsigned int) x1); 1075 SDEI_LOG("< FTRS:%llx\n", ret); 1076 SMC_RET1(ctx, ret); 1077 1078 case SDEI_EVENT_SIGNAL: 1079 SDEI_LOG("> SIGNAL(e:%d t:%llx)\n", ev_num, x2); 1080 ret = sdei_signal(ev_num, x2); 1081 SDEI_LOG("< SIGNAL:%lld\n", ret); 1082 SMC_RET1(ctx, ret); 1083 1084 default: 1085 /* Do nothing in default case */ 1086 break; 1087 } 1088 1089 WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid); 1090 SMC_RET1(ctx, SMC_UNK); 1091 } 1092 1093 /* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */ 1094 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init); 1095 1096 /* Subscribe to PSCI CPU suspend finisher for per-CPU configuration */ 1097 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, sdei_cpu_wakeup_init); 1098