1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <bl31.h> 10 #include <bl_common.h> 11 #include <cassert.h> 12 #include <context.h> 13 #include <debug.h> 14 #include <ehf.h> 15 #include <interrupt_mgmt.h> 16 #include <platform.h> 17 #include <pubsub.h> 18 #include <runtime_svc.h> 19 #include <sdei.h> 20 #include <stddef.h> 21 #include <string.h> 22 #include <utils.h> 23 #include "sdei_private.h" 24 25 #define MAJOR_VERSION 1ULL 26 #define MINOR_VERSION 0ULL 27 #define VENDOR_VERSION 0ULL 28 29 #define MAKE_SDEI_VERSION(_major, _minor, _vendor) \ 30 ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor)) 31 32 #define LOWEST_INTR_PRIORITY 0xff 33 34 #define is_valid_affinity(_mpidr) (plat_core_pos_by_mpidr(_mpidr) >= 0) 35 36 CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI, 37 sdei_critical_must_have_higher_priority); 38 39 static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots; 40 41 /* Initialise SDEI map entries */ 42 static void init_map(sdei_ev_map_t *map) 43 { 44 map->reg_count = 0; 45 } 46 47 /* Convert mapping to SDEI class */ 48 static sdei_class_t map_to_class(sdei_ev_map_t *map) 49 { 50 return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL; 51 } 52 53 /* Clear SDEI event entries except state */ 54 static void clear_event_entries(sdei_entry_t *se) 55 { 56 se->ep = 0; 57 se->arg = 0; 58 se->affinity = 0; 59 se->reg_flags = 0; 60 } 61 62 /* Perform CPU-specific state initialisation */ 63 static void *sdei_cpu_on_init(const void *arg) 64 { 65 unsigned int i; 66 sdei_ev_map_t *map; 67 sdei_entry_t *se; 68 69 /* Initialize private mappings on this CPU */ 70 for_each_private_map(i, map) { 71 se = get_event_entry(map); 72 clear_event_entries(se); 73 se->state = 0; 74 } 75 76 SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1()); 77 78 /* All PEs start with SDEI events masked */ 79 (void) sdei_pe_mask(); 80 81 return NULL; 82 } 83 84 /* Initialise an SDEI class */ 85 static void sdei_class_init(sdei_class_t class) 86 { 87 unsigned int i; 88 bool zero_found __unused = false; 89 int ev_num_so_far __unused; 90 sdei_ev_map_t *map; 91 92 /* Sanity check and configuration of shared events */ 93 ev_num_so_far = -1; 94 for_each_shared_map(i, map) { 95 #if ENABLE_ASSERTIONS 96 /* Ensure mappings are sorted */ 97 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 98 99 ev_num_so_far = map->ev_num; 100 101 /* Event 0 must not be shared */ 102 assert(map->ev_num != SDEI_EVENT_0); 103 104 /* Check for valid event */ 105 assert(map->ev_num >= 0); 106 107 /* Make sure it's a shared event */ 108 assert(is_event_shared(map)); 109 110 /* No shared mapping should have signalable property */ 111 assert(!is_event_signalable(map)); 112 113 /* Shared mappings can't be explicit */ 114 assert(!is_map_explicit(map)); 115 #endif 116 117 /* Skip initializing the wrong priority */ 118 if (map_to_class(map) != class) 119 continue; 120 121 /* Platform events are always bound, so set the bound flag */ 122 if (is_map_dynamic(map)) { 123 assert(map->intr == SDEI_DYN_IRQ); 124 assert(is_event_normal(map)); 125 num_dyn_shrd_slots++; 126 } else { 127 /* Shared mappings must be bound to shared interrupt */ 128 assert(plat_ic_is_spi(map->intr) != 0); 129 set_map_bound(map); 130 } 131 132 init_map(map); 133 } 134 135 /* Sanity check and configuration of private events for this CPU */ 136 ev_num_so_far = -1; 137 for_each_private_map(i, map) { 138 #if ENABLE_ASSERTIONS 139 /* Ensure mappings are sorted */ 140 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 141 142 ev_num_so_far = map->ev_num; 143 144 if (map->ev_num == SDEI_EVENT_0) { 145 zero_found = true; 146 147 /* Event 0 must be a Secure SGI */ 148 assert(is_secure_sgi(map->intr)); 149 150 /* 151 * Event 0 can have only have signalable flag (apart 152 * from being private 153 */ 154 assert(map->map_flags == (SDEI_MAPF_SIGNALABLE | 155 SDEI_MAPF_PRIVATE)); 156 } else { 157 /* No other mapping should have signalable property */ 158 assert(!is_event_signalable(map)); 159 } 160 161 /* Check for valid event */ 162 assert(map->ev_num >= 0); 163 164 /* Make sure it's a private event */ 165 assert(is_event_private(map)); 166 167 /* 168 * Other than priority, explicit events can only have explicit 169 * and private flags set. 170 */ 171 if (is_map_explicit(map)) { 172 assert((map->map_flags | SDEI_MAPF_CRITICAL) == 173 (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE 174 | SDEI_MAPF_CRITICAL)); 175 } 176 #endif 177 178 /* Skip initializing the wrong priority */ 179 if (map_to_class(map) != class) 180 continue; 181 182 /* Platform events are always bound, so set the bound flag */ 183 if (map->ev_num != SDEI_EVENT_0) { 184 if (is_map_dynamic(map)) { 185 assert(map->intr == SDEI_DYN_IRQ); 186 assert(is_event_normal(map)); 187 num_dyn_priv_slots++; 188 } else if (is_map_explicit(map)) { 189 /* 190 * Explicit mappings don't have a backing 191 * SDEI interrupt, but verify that anyway. 192 */ 193 assert(map->intr == SDEI_DYN_IRQ); 194 } else { 195 /* 196 * Private mappings must be bound to private 197 * interrupt. 198 */ 199 assert(plat_ic_is_ppi((unsigned) map->intr) != 0); 200 set_map_bound(map); 201 } 202 } 203 204 init_map(map); 205 } 206 207 /* Ensure event 0 is in the mapping */ 208 assert(zero_found); 209 210 (void) sdei_cpu_on_init(NULL); 211 } 212 213 /* SDEI dispatcher initialisation */ 214 void sdei_init(void) 215 { 216 sdei_class_init(SDEI_CRITICAL); 217 sdei_class_init(SDEI_NORMAL); 218 219 /* Register priority level handlers */ 220 ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI, 221 sdei_intr_handler); 222 ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI, 223 sdei_intr_handler); 224 } 225 226 /* Populate SDEI event entry */ 227 static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg, 228 unsigned int flags, uint64_t affinity) 229 { 230 assert(se != NULL); 231 232 se->ep = ep; 233 se->arg = arg; 234 se->affinity = (affinity & MPIDR_AFFINITY_MASK); 235 se->reg_flags = flags; 236 } 237 238 static uint64_t sdei_version(void) 239 { 240 return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION); 241 } 242 243 /* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */ 244 static int validate_flags(uint64_t flags, uint64_t mpidr) 245 { 246 /* Validate flags */ 247 switch (flags) { 248 case SDEI_REGF_RM_PE: 249 if (!is_valid_affinity(mpidr)) 250 return SDEI_EINVAL; 251 break; 252 case SDEI_REGF_RM_ANY: 253 break; 254 default: 255 /* Unknown flags */ 256 return SDEI_EINVAL; 257 } 258 259 return 0; 260 } 261 262 /* Set routing of an SDEI event */ 263 static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr) 264 { 265 int ret; 266 unsigned int routing; 267 sdei_ev_map_t *map; 268 sdei_entry_t *se; 269 270 ret = validate_flags(flags, mpidr); 271 if (ret != 0) 272 return ret; 273 274 /* Check if valid event number */ 275 map = find_event_map(ev_num); 276 if (map == NULL) 277 return SDEI_EINVAL; 278 279 /* The event must not be private */ 280 if (is_event_private(map)) 281 return SDEI_EINVAL; 282 283 se = get_event_entry(map); 284 285 sdei_map_lock(map); 286 287 if (!is_map_bound(map) || is_event_private(map)) { 288 ret = SDEI_EINVAL; 289 goto finish; 290 } 291 292 if (!can_sdei_state_trans(se, DO_ROUTING)) { 293 ret = SDEI_EDENY; 294 goto finish; 295 } 296 297 /* Choose appropriate routing */ 298 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 299 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 300 301 /* Update event registration flag */ 302 se->reg_flags = (unsigned int) flags; 303 304 /* 305 * ROUTING_SET is permissible only when event composite state is 306 * 'registered, disabled, and not running'. This means that the 307 * interrupt is currently disabled, and not active. 308 */ 309 plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr); 310 311 finish: 312 sdei_map_unlock(map); 313 314 return ret; 315 } 316 317 /* Register handler and argument for an SDEI event */ 318 static int64_t sdei_event_register(int ev_num, uint64_t ep, uint64_t arg, 319 uint64_t flags, uint64_t mpidr) 320 { 321 int ret; 322 unsigned int routing; 323 sdei_entry_t *se; 324 sdei_ev_map_t *map; 325 sdei_state_t backup_state; 326 327 if ((ep == 0U) || (plat_sdei_validate_entry_point( 328 ep, sdei_client_el()) != 0)) { 329 return SDEI_EINVAL; 330 } 331 332 ret = validate_flags(flags, mpidr); 333 if (ret != 0) 334 return ret; 335 336 /* Check if valid event number */ 337 map = find_event_map(ev_num); 338 if (map == NULL) 339 return SDEI_EINVAL; 340 341 /* Private events always target the PE */ 342 if (is_event_private(map)) 343 flags = SDEI_REGF_RM_PE; 344 345 se = get_event_entry(map); 346 347 /* 348 * Even though register operation is per-event (additionally for private 349 * events, registration is required individually), it has to be 350 * serialised with respect to bind/release, which are global operations. 351 * So we hold the lock throughout, unconditionally. 352 */ 353 sdei_map_lock(map); 354 355 backup_state = se->state; 356 if (!can_sdei_state_trans(se, DO_REGISTER)) 357 goto fallback; 358 359 /* 360 * When registering for dynamic events, make sure it's been bound 361 * already. This has to be the case as, without binding, the client 362 * can't know about the event number to register for. 363 */ 364 if (is_map_dynamic(map) && !is_map_bound(map)) 365 goto fallback; 366 367 if (is_event_private(map)) { 368 /* Multiple calls to register are possible for private events */ 369 assert(map->reg_count >= 0); 370 } else { 371 /* Only single call to register is possible for shared events */ 372 assert(map->reg_count == 0); 373 } 374 375 if (is_map_bound(map)) { 376 /* Meanwhile, did any PE ACK the interrupt? */ 377 if (plat_ic_get_interrupt_active(map->intr) != 0U) 378 goto fallback; 379 380 /* The interrupt must currently owned by Non-secure */ 381 if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS) 382 goto fallback; 383 384 /* 385 * Disable forwarding of new interrupt triggers to CPU 386 * interface. 387 */ 388 plat_ic_disable_interrupt(map->intr); 389 390 /* 391 * Any events that are triggered after register and before 392 * enable should remain pending. Clear any previous interrupt 393 * triggers which are pending (except for SGIs). This has no 394 * affect on level-triggered interrupts. 395 */ 396 if (ev_num != SDEI_EVENT_0) 397 plat_ic_clear_interrupt_pending(map->intr); 398 399 /* Map interrupt to EL3 and program the correct priority */ 400 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3); 401 402 /* Program the appropriate interrupt priority */ 403 plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map)); 404 405 /* 406 * Set the routing mode for shared event as requested. We 407 * already ensure that shared events get bound to SPIs. 408 */ 409 if (is_event_shared(map)) { 410 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 411 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 412 plat_ic_set_spi_routing(map->intr, routing, 413 (u_register_t) mpidr); 414 } 415 } 416 417 /* Populate event entries */ 418 set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr); 419 420 /* Increment register count */ 421 map->reg_count++; 422 423 sdei_map_unlock(map); 424 425 return 0; 426 427 fallback: 428 /* Reinstate previous state */ 429 se->state = backup_state; 430 431 sdei_map_unlock(map); 432 433 return SDEI_EDENY; 434 } 435 436 /* Enable SDEI event */ 437 static int64_t sdei_event_enable(int ev_num) 438 { 439 sdei_ev_map_t *map; 440 sdei_entry_t *se; 441 int ret; 442 bool before, after; 443 444 /* Check if valid event number */ 445 map = find_event_map(ev_num); 446 if (map == NULL) 447 return SDEI_EINVAL; 448 449 se = get_event_entry(map); 450 ret = SDEI_EDENY; 451 452 if (is_event_shared(map)) 453 sdei_map_lock(map); 454 455 before = GET_EV_STATE(se, ENABLED); 456 if (!can_sdei_state_trans(se, DO_ENABLE)) 457 goto finish; 458 after = GET_EV_STATE(se, ENABLED); 459 460 /* 461 * Enable interrupt for bound events only if there's a change in enabled 462 * state. 463 */ 464 if (is_map_bound(map) && (!before && after)) 465 plat_ic_enable_interrupt(map->intr); 466 467 ret = 0; 468 469 finish: 470 if (is_event_shared(map)) 471 sdei_map_unlock(map); 472 473 return ret; 474 } 475 476 /* Disable SDEI event */ 477 static int sdei_event_disable(int ev_num) 478 { 479 sdei_ev_map_t *map; 480 sdei_entry_t *se; 481 int ret; 482 bool before, after; 483 484 /* Check if valid event number */ 485 map = find_event_map(ev_num); 486 if (map == NULL) 487 return SDEI_EINVAL; 488 489 se = get_event_entry(map); 490 ret = SDEI_EDENY; 491 492 if (is_event_shared(map)) 493 sdei_map_lock(map); 494 495 before = GET_EV_STATE(se, ENABLED); 496 if (!can_sdei_state_trans(se, DO_DISABLE)) 497 goto finish; 498 after = GET_EV_STATE(se, ENABLED); 499 500 /* 501 * Disable interrupt for bound events only if there's a change in 502 * enabled state. 503 */ 504 if (is_map_bound(map) && (before && !after)) 505 plat_ic_disable_interrupt(map->intr); 506 507 ret = 0; 508 509 finish: 510 if (is_event_shared(map)) 511 sdei_map_unlock(map); 512 513 return ret; 514 } 515 516 /* Query SDEI event information */ 517 static int64_t sdei_event_get_info(int ev_num, int info) 518 { 519 sdei_entry_t *se; 520 sdei_ev_map_t *map; 521 522 uint64_t flags; 523 bool registered; 524 uint64_t affinity; 525 526 /* Check if valid event number */ 527 map = find_event_map(ev_num); 528 if (map == NULL) 529 return SDEI_EINVAL; 530 531 se = get_event_entry(map); 532 533 if (is_event_shared(map)) 534 sdei_map_lock(map); 535 536 /* Sample state under lock */ 537 registered = GET_EV_STATE(se, REGISTERED); 538 flags = se->reg_flags; 539 affinity = se->affinity; 540 541 if (is_event_shared(map)) 542 sdei_map_unlock(map); 543 544 switch (info) { 545 case SDEI_INFO_EV_TYPE: 546 return is_event_shared(map); 547 548 case SDEI_INFO_EV_NOT_SIGNALED: 549 return !is_event_signalable(map); 550 551 case SDEI_INFO_EV_PRIORITY: 552 return is_event_critical(map); 553 554 case SDEI_INFO_EV_ROUTING_MODE: 555 if (!is_event_shared(map)) 556 return SDEI_EINVAL; 557 if (!registered) 558 return SDEI_EDENY; 559 return (flags == SDEI_REGF_RM_PE); 560 561 case SDEI_INFO_EV_ROUTING_AFF: 562 if (!is_event_shared(map)) 563 return SDEI_EINVAL; 564 if (!registered) 565 return SDEI_EDENY; 566 if (flags != SDEI_REGF_RM_PE) 567 return SDEI_EINVAL; 568 return affinity; 569 570 default: 571 return SDEI_EINVAL; 572 } 573 } 574 575 /* Unregister an SDEI event */ 576 static int sdei_event_unregister(int ev_num) 577 { 578 int ret = 0; 579 sdei_entry_t *se; 580 sdei_ev_map_t *map; 581 582 /* Check if valid event number */ 583 map = find_event_map(ev_num); 584 if (map == NULL) 585 return SDEI_EINVAL; 586 587 se = get_event_entry(map); 588 589 /* 590 * Even though unregister operation is per-event (additionally for 591 * private events, unregistration is required individually), it has to 592 * be serialised with respect to bind/release, which are global 593 * operations. So we hold the lock throughout, unconditionally. 594 */ 595 sdei_map_lock(map); 596 597 if (!can_sdei_state_trans(se, DO_UNREGISTER)) { 598 /* 599 * Even if the call is invalid, and the handler is running (for 600 * example, having unregistered from a running handler earlier), 601 * return pending error code; otherwise, return deny. 602 */ 603 ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY; 604 605 goto finish; 606 } 607 608 map->reg_count--; 609 if (is_event_private(map)) { 610 /* Multiple calls to register are possible for private events */ 611 assert(map->reg_count >= 0); 612 } else { 613 /* Only single call to register is possible for shared events */ 614 assert(map->reg_count == 0); 615 } 616 617 if (is_map_bound(map)) { 618 plat_ic_disable_interrupt(map->intr); 619 620 /* 621 * Clear pending interrupt. Skip for SGIs as they may not be 622 * cleared on interrupt controllers. 623 */ 624 if (ev_num != SDEI_EVENT_0) 625 plat_ic_clear_interrupt_pending(map->intr); 626 627 assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3); 628 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS); 629 plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY); 630 } 631 632 clear_event_entries(se); 633 634 /* 635 * If the handler is running at the time of unregister, return the 636 * pending error code. 637 */ 638 if (GET_EV_STATE(se, RUNNING)) 639 ret = SDEI_EPEND; 640 641 finish: 642 sdei_map_unlock(map); 643 644 return ret; 645 } 646 647 /* Query status of an SDEI event */ 648 static int sdei_event_status(int ev_num) 649 { 650 sdei_ev_map_t *map; 651 sdei_entry_t *se; 652 sdei_state_t state; 653 654 /* Check if valid event number */ 655 map = find_event_map(ev_num); 656 if (map == NULL) 657 return SDEI_EINVAL; 658 659 se = get_event_entry(map); 660 661 if (is_event_shared(map)) 662 sdei_map_lock(map); 663 664 /* State value directly maps to the expected return format */ 665 state = se->state; 666 667 if (is_event_shared(map)) 668 sdei_map_unlock(map); 669 670 return (int) state; 671 } 672 673 /* Bind an SDEI event to an interrupt */ 674 static int sdei_interrupt_bind(unsigned int intr_num) 675 { 676 sdei_ev_map_t *map; 677 bool retry = true, shared_mapping; 678 679 /* SGIs are not allowed to be bound */ 680 if (plat_ic_is_sgi(intr_num) != 0) 681 return SDEI_EINVAL; 682 683 shared_mapping = (plat_ic_is_spi(intr_num) != 0); 684 do { 685 /* 686 * Bail out if there is already an event for this interrupt, 687 * either platform-defined or dynamic. 688 */ 689 map = find_event_map_by_intr(intr_num, shared_mapping); 690 if (map != NULL) { 691 if (is_map_dynamic(map)) { 692 if (is_map_bound(map)) { 693 /* 694 * Dynamic event, already bound. Return 695 * event number. 696 */ 697 return map->ev_num; 698 } 699 } else { 700 /* Binding non-dynamic event */ 701 return SDEI_EINVAL; 702 } 703 } 704 705 /* 706 * The interrupt is not bound yet. Try to find a free slot to 707 * bind it. Free dynamic mappings have their interrupt set as 708 * SDEI_DYN_IRQ. 709 */ 710 map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping); 711 if (map == NULL) 712 return SDEI_ENOMEM; 713 714 /* The returned mapping must be dynamic */ 715 assert(is_map_dynamic(map)); 716 717 /* 718 * We cannot assert for bound maps here, as we might be racing 719 * with another bind. 720 */ 721 722 /* The requested interrupt must already belong to NS */ 723 if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS) 724 return SDEI_EDENY; 725 726 /* 727 * Interrupt programming and ownership transfer are deferred 728 * until register. 729 */ 730 731 sdei_map_lock(map); 732 if (!is_map_bound(map)) { 733 map->intr = intr_num; 734 set_map_bound(map); 735 retry = false; 736 } 737 sdei_map_unlock(map); 738 } while (retry); 739 740 return map->ev_num; 741 } 742 743 /* Release a bound SDEI event previously to an interrupt */ 744 static int sdei_interrupt_release(int ev_num) 745 { 746 int ret = 0; 747 sdei_ev_map_t *map; 748 sdei_entry_t *se; 749 750 /* Check if valid event number */ 751 map = find_event_map(ev_num); 752 if (map == NULL) 753 return SDEI_EINVAL; 754 755 if (!is_map_dynamic(map)) 756 return SDEI_EINVAL; 757 758 se = get_event_entry(map); 759 760 sdei_map_lock(map); 761 762 /* Event must have been unregistered before release */ 763 if (map->reg_count != 0) { 764 ret = SDEI_EDENY; 765 goto finish; 766 } 767 768 /* 769 * Interrupt release never causes the state to change. We only check 770 * whether it's permissible or not. 771 */ 772 if (!can_sdei_state_trans(se, DO_RELEASE)) { 773 ret = SDEI_EDENY; 774 goto finish; 775 } 776 777 if (is_map_bound(map)) { 778 /* 779 * Deny release if the interrupt is active, which means it's 780 * probably being acknowledged and handled elsewhere. 781 */ 782 if (plat_ic_get_interrupt_active(map->intr) != 0U) { 783 ret = SDEI_EDENY; 784 goto finish; 785 } 786 787 /* 788 * Interrupt programming and ownership transfer are already done 789 * during unregister. 790 */ 791 792 map->intr = SDEI_DYN_IRQ; 793 clr_map_bound(map); 794 } else { 795 SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map), 796 map->reg_count); 797 ret = SDEI_EINVAL; 798 } 799 800 finish: 801 sdei_map_unlock(map); 802 803 return ret; 804 } 805 806 /* Perform reset of private SDEI events */ 807 static int sdei_private_reset(void) 808 { 809 sdei_ev_map_t *map; 810 int ret = 0, final_ret = 0; 811 unsigned int i; 812 813 /* Unregister all private events */ 814 for_each_private_map(i, map) { 815 /* 816 * The unregister can fail if the event is not registered, which 817 * is allowed, and a deny will be returned. But if the event is 818 * running or unregister pending, the call fails. 819 */ 820 ret = sdei_event_unregister(map->ev_num); 821 if ((ret == SDEI_EPEND) && (final_ret == 0)) 822 final_ret = SDEI_EDENY; 823 } 824 825 return final_ret; 826 } 827 828 /* Perform reset of shared SDEI events */ 829 static int sdei_shared_reset(void) 830 { 831 const sdei_mapping_t *mapping; 832 sdei_ev_map_t *map; 833 int ret = 0, final_ret = 0; 834 unsigned int i, j; 835 836 /* Unregister all shared events */ 837 for_each_shared_map(i, map) { 838 /* 839 * The unregister can fail if the event is not registered, which 840 * is allowed, and a deny will be returned. But if the event is 841 * running or unregister pending, the call fails. 842 */ 843 ret = sdei_event_unregister(map->ev_num); 844 if ((ret == SDEI_EPEND) && (final_ret == 0)) 845 final_ret = SDEI_EDENY; 846 } 847 848 if (final_ret != 0) 849 return final_ret; 850 851 /* 852 * Loop through both private and shared mappings, and release all 853 * bindings. 854 */ 855 for_each_mapping_type(i, mapping) { 856 iterate_mapping(mapping, j, map) { 857 /* 858 * Release bindings for mappings that are dynamic and 859 * bound. 860 */ 861 if (is_map_dynamic(map) && is_map_bound(map)) { 862 /* 863 * Any failure to release would mean there is at 864 * least a PE registered for the event. 865 */ 866 ret = sdei_interrupt_release(map->ev_num); 867 if ((ret != 0) && (final_ret == 0)) 868 final_ret = ret; 869 } 870 } 871 } 872 873 return final_ret; 874 } 875 876 /* Send a signal to another SDEI client PE */ 877 static int sdei_signal(int ev_num, uint64_t target_pe) 878 { 879 sdei_ev_map_t *map; 880 881 /* Only event 0 can be signalled */ 882 if (ev_num != SDEI_EVENT_0) 883 return SDEI_EINVAL; 884 885 /* Find mapping for event 0 */ 886 map = find_event_map(SDEI_EVENT_0); 887 if (map == NULL) 888 return SDEI_EINVAL; 889 890 /* The event must be signalable */ 891 if (!is_event_signalable(map)) 892 return SDEI_EINVAL; 893 894 /* Validate target */ 895 if (plat_core_pos_by_mpidr(target_pe) < 0) 896 return SDEI_EINVAL; 897 898 /* Raise SGI. Platform will validate target_pe */ 899 plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe); 900 901 return 0; 902 } 903 904 /* Query SDEI dispatcher features */ 905 static uint64_t sdei_features(unsigned int feature) 906 { 907 if (feature == SDEI_FEATURE_BIND_SLOTS) { 908 return FEATURE_BIND_SLOTS(num_dyn_priv_slots, 909 num_dyn_shrd_slots); 910 } 911 912 return (uint64_t) SDEI_EINVAL; 913 } 914 915 /* SDEI top level handler for servicing SMCs */ 916 uint64_t sdei_smc_handler(uint32_t smc_fid, 917 uint64_t x1, 918 uint64_t x2, 919 uint64_t x3, 920 uint64_t x4, 921 void *cookie, 922 void *handle, 923 uint64_t flags) 924 { 925 926 uint64_t x5; 927 unsigned int ss = (unsigned int) get_interrupt_src_ss(flags); 928 int64_t ret; 929 bool resume = false; 930 cpu_context_t *ctx = handle; 931 int ev_num = (int) x1; 932 933 if (ss != NON_SECURE) 934 SMC_RET1(ctx, SMC_UNK); 935 936 /* Verify the caller EL */ 937 if (GET_EL(read_spsr_el3()) != sdei_client_el()) 938 SMC_RET1(ctx, SMC_UNK); 939 940 switch (smc_fid) { 941 case SDEI_VERSION: 942 SDEI_LOG("> VER\n"); 943 ret = (int64_t) sdei_version(); 944 SDEI_LOG("< VER:%llx\n", ret); 945 SMC_RET1(ctx, ret); 946 947 case SDEI_EVENT_REGISTER: 948 x5 = SMC_GET_GP(ctx, CTX_GPREG_X5); 949 SDEI_LOG("> REG(n:%d e:%llx a:%llx f:%x m:%llx)\n", ev_num, 950 x2, x3, (int) x4, x5); 951 ret = sdei_event_register(ev_num, x2, x3, x4, x5); 952 SDEI_LOG("< REG:%lld\n", ret); 953 SMC_RET1(ctx, ret); 954 955 case SDEI_EVENT_ENABLE: 956 SDEI_LOG("> ENABLE(n:%d)\n", (int) x1); 957 ret = sdei_event_enable(ev_num); 958 SDEI_LOG("< ENABLE:%lld\n", ret); 959 SMC_RET1(ctx, ret); 960 961 case SDEI_EVENT_DISABLE: 962 SDEI_LOG("> DISABLE(n:%d)\n", ev_num); 963 ret = sdei_event_disable(ev_num); 964 SDEI_LOG("< DISABLE:%lld\n", ret); 965 SMC_RET1(ctx, ret); 966 967 case SDEI_EVENT_CONTEXT: 968 SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1()); 969 ret = sdei_event_context(ctx, (unsigned int) x1); 970 SDEI_LOG("< CTX:%lld\n", ret); 971 SMC_RET1(ctx, ret); 972 973 case SDEI_EVENT_COMPLETE_AND_RESUME: 974 resume = true; 975 /* Fallthrough */ 976 977 case SDEI_EVENT_COMPLETE: 978 SDEI_LOG("> COMPLETE(r:%u sta/ep:%llx):%lx\n", 979 (unsigned int) resume, x1, read_mpidr_el1()); 980 ret = sdei_event_complete(resume, x1); 981 SDEI_LOG("< COMPLETE:%llx\n", ret); 982 983 /* 984 * Set error code only if the call failed. If the call 985 * succeeded, we discard the dispatched context, and restore the 986 * interrupted context to a pristine condition, and therefore 987 * shouldn't be modified. We don't return to the caller in this 988 * case anyway. 989 */ 990 if (ret != 0) 991 SMC_RET1(ctx, ret); 992 993 SMC_RET0(ctx); 994 995 case SDEI_EVENT_STATUS: 996 SDEI_LOG("> STAT(n:%d)\n", ev_num); 997 ret = sdei_event_status(ev_num); 998 SDEI_LOG("< STAT:%lld\n", ret); 999 SMC_RET1(ctx, ret); 1000 1001 case SDEI_EVENT_GET_INFO: 1002 SDEI_LOG("> INFO(n:%d, %d)\n", ev_num, (int) x2); 1003 ret = sdei_event_get_info(ev_num, (int) x2); 1004 SDEI_LOG("< INFO:%lld\n", ret); 1005 SMC_RET1(ctx, ret); 1006 1007 case SDEI_EVENT_UNREGISTER: 1008 SDEI_LOG("> UNREG(n:%d)\n", ev_num); 1009 ret = sdei_event_unregister(ev_num); 1010 SDEI_LOG("< UNREG:%lld\n", ret); 1011 SMC_RET1(ctx, ret); 1012 1013 case SDEI_PE_UNMASK: 1014 SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1()); 1015 sdei_pe_unmask(); 1016 SDEI_LOG("< UNMASK:%d\n", 0); 1017 SMC_RET1(ctx, 0); 1018 1019 case SDEI_PE_MASK: 1020 SDEI_LOG("> MASK:%lx\n", read_mpidr_el1()); 1021 ret = sdei_pe_mask(); 1022 SDEI_LOG("< MASK:%lld\n", ret); 1023 SMC_RET1(ctx, ret); 1024 1025 case SDEI_INTERRUPT_BIND: 1026 SDEI_LOG("> BIND(%d)\n", (int) x1); 1027 ret = sdei_interrupt_bind((unsigned int) x1); 1028 SDEI_LOG("< BIND:%lld\n", ret); 1029 SMC_RET1(ctx, ret); 1030 1031 case SDEI_INTERRUPT_RELEASE: 1032 SDEI_LOG("> REL(%d)\n", ev_num); 1033 ret = sdei_interrupt_release(ev_num); 1034 SDEI_LOG("< REL:%lld\n", ret); 1035 SMC_RET1(ctx, ret); 1036 1037 case SDEI_SHARED_RESET: 1038 SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1()); 1039 ret = sdei_shared_reset(); 1040 SDEI_LOG("< S_RESET:%lld\n", ret); 1041 SMC_RET1(ctx, ret); 1042 1043 case SDEI_PRIVATE_RESET: 1044 SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1()); 1045 ret = sdei_private_reset(); 1046 SDEI_LOG("< P_RESET:%lld\n", ret); 1047 SMC_RET1(ctx, ret); 1048 1049 case SDEI_EVENT_ROUTING_SET: 1050 SDEI_LOG("> ROUTE_SET(n:%d f:%llx aff:%llx)\n", ev_num, x2, x3); 1051 ret = sdei_event_routing_set(ev_num, x2, x3); 1052 SDEI_LOG("< ROUTE_SET:%lld\n", ret); 1053 SMC_RET1(ctx, ret); 1054 1055 case SDEI_FEATURES: 1056 SDEI_LOG("> FTRS(f:%llx)\n", x1); 1057 ret = (int64_t) sdei_features((unsigned int) x1); 1058 SDEI_LOG("< FTRS:%llx\n", ret); 1059 SMC_RET1(ctx, ret); 1060 1061 case SDEI_EVENT_SIGNAL: 1062 SDEI_LOG("> SIGNAL(e:%d t:%llx)\n", ev_num, x2); 1063 ret = sdei_signal(ev_num, x2); 1064 SDEI_LOG("< SIGNAL:%lld\n", ret); 1065 SMC_RET1(ctx, ret); 1066 1067 default: 1068 /* Do nothing in default case */ 1069 break; 1070 } 1071 1072 WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid); 1073 SMC_RET1(ctx, SMC_UNK); 1074 } 1075 1076 /* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */ 1077 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init); 1078