1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <stddef.h> 10 #include <string.h> 11 12 #include <bl31/bl31.h> 13 #include <bl31/ehf.h> 14 #include <bl31/interrupt_mgmt.h> 15 #include <common/bl_common.h> 16 #include <common/debug.h> 17 #include <common/runtime_svc.h> 18 #include <context.h> 19 #include <lib/cassert.h> 20 #include <lib/el3_runtime/pubsub.h> 21 #include <lib/utils.h> 22 #include <plat/common/platform.h> 23 #include <services/sdei.h> 24 25 #include "sdei_private.h" 26 27 #define MAJOR_VERSION 1ULL 28 #define MINOR_VERSION 0ULL 29 #define VENDOR_VERSION 0ULL 30 31 #define MAKE_SDEI_VERSION(_major, _minor, _vendor) \ 32 ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor)) 33 34 #define LOWEST_INTR_PRIORITY 0xff 35 36 #define is_valid_affinity(_mpidr) (plat_core_pos_by_mpidr(_mpidr) >= 0) 37 38 CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI, 39 sdei_critical_must_have_higher_priority); 40 41 static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots; 42 43 /* Initialise SDEI map entries */ 44 static void init_map(sdei_ev_map_t *map) 45 { 46 map->reg_count = 0; 47 } 48 49 /* Convert mapping to SDEI class */ 50 static sdei_class_t map_to_class(sdei_ev_map_t *map) 51 { 52 return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL; 53 } 54 55 /* Clear SDEI event entries except state */ 56 static void clear_event_entries(sdei_entry_t *se) 57 { 58 se->ep = 0; 59 se->arg = 0; 60 se->affinity = 0; 61 se->reg_flags = 0; 62 } 63 64 /* Perform CPU-specific state initialisation */ 65 static void *sdei_cpu_on_init(const void *arg) 66 { 67 unsigned int i; 68 sdei_ev_map_t *map; 69 sdei_entry_t *se; 70 71 /* Initialize private mappings on this CPU */ 72 for_each_private_map(i, map) { 73 se = get_event_entry(map); 74 clear_event_entries(se); 75 se->state = 0; 76 } 77 78 SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1()); 79 80 /* All PEs start with SDEI events masked */ 81 (void) sdei_pe_mask(); 82 83 return NULL; 84 } 85 86 /* CPU initialisation after wakeup from suspend */ 87 static void *sdei_cpu_wakeup_init(const void *arg) 88 { 89 SDEI_LOG("Events masked on %lx\n", read_mpidr_el1()); 90 91 /* All PEs wake up with SDEI events masked */ 92 sdei_pe_mask(); 93 94 return 0; 95 } 96 97 /* Initialise an SDEI class */ 98 static void sdei_class_init(sdei_class_t class) 99 { 100 unsigned int i; 101 bool zero_found __unused = false; 102 int ev_num_so_far __unused; 103 sdei_ev_map_t *map; 104 105 /* Sanity check and configuration of shared events */ 106 ev_num_so_far = -1; 107 for_each_shared_map(i, map) { 108 #if ENABLE_ASSERTIONS 109 /* Ensure mappings are sorted */ 110 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 111 112 ev_num_so_far = map->ev_num; 113 114 /* Event 0 must not be shared */ 115 assert(map->ev_num != SDEI_EVENT_0); 116 117 /* Check for valid event */ 118 assert(map->ev_num >= 0); 119 120 /* Make sure it's a shared event */ 121 assert(is_event_shared(map)); 122 123 /* No shared mapping should have signalable property */ 124 assert(!is_event_signalable(map)); 125 126 /* Shared mappings can't be explicit */ 127 assert(!is_map_explicit(map)); 128 #endif 129 130 /* Skip initializing the wrong priority */ 131 if (map_to_class(map) != class) 132 continue; 133 134 /* Platform events are always bound, so set the bound flag */ 135 if (is_map_dynamic(map)) { 136 assert(map->intr == SDEI_DYN_IRQ); 137 assert(is_event_normal(map)); 138 num_dyn_shrd_slots++; 139 } else { 140 /* Shared mappings must be bound to shared interrupt */ 141 assert(plat_ic_is_spi(map->intr) != 0); 142 set_map_bound(map); 143 } 144 145 init_map(map); 146 } 147 148 /* Sanity check and configuration of private events for this CPU */ 149 ev_num_so_far = -1; 150 for_each_private_map(i, map) { 151 #if ENABLE_ASSERTIONS 152 /* Ensure mappings are sorted */ 153 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 154 155 ev_num_so_far = map->ev_num; 156 157 if (map->ev_num == SDEI_EVENT_0) { 158 zero_found = true; 159 160 /* Event 0 must be a Secure SGI */ 161 assert(is_secure_sgi(map->intr)); 162 163 /* 164 * Event 0 can have only have signalable flag (apart 165 * from being private 166 */ 167 assert(map->map_flags == (SDEI_MAPF_SIGNALABLE | 168 SDEI_MAPF_PRIVATE)); 169 } else { 170 /* No other mapping should have signalable property */ 171 assert(!is_event_signalable(map)); 172 } 173 174 /* Check for valid event */ 175 assert(map->ev_num >= 0); 176 177 /* Make sure it's a private event */ 178 assert(is_event_private(map)); 179 180 /* 181 * Other than priority, explicit events can only have explicit 182 * and private flags set. 183 */ 184 if (is_map_explicit(map)) { 185 assert((map->map_flags | SDEI_MAPF_CRITICAL) == 186 (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE 187 | SDEI_MAPF_CRITICAL)); 188 } 189 #endif 190 191 /* Skip initializing the wrong priority */ 192 if (map_to_class(map) != class) 193 continue; 194 195 /* Platform events are always bound, so set the bound flag */ 196 if (map->ev_num != SDEI_EVENT_0) { 197 if (is_map_dynamic(map)) { 198 assert(map->intr == SDEI_DYN_IRQ); 199 assert(is_event_normal(map)); 200 num_dyn_priv_slots++; 201 } else if (is_map_explicit(map)) { 202 /* 203 * Explicit mappings don't have a backing 204 * SDEI interrupt, but verify that anyway. 205 */ 206 assert(map->intr == SDEI_DYN_IRQ); 207 } else { 208 /* 209 * Private mappings must be bound to private 210 * interrupt. 211 */ 212 assert(plat_ic_is_ppi((unsigned) map->intr) != 0); 213 set_map_bound(map); 214 } 215 } 216 217 init_map(map); 218 } 219 220 /* Ensure event 0 is in the mapping */ 221 assert(zero_found); 222 223 (void) sdei_cpu_on_init(NULL); 224 } 225 226 /* SDEI dispatcher initialisation */ 227 void sdei_init(void) 228 { 229 sdei_class_init(SDEI_CRITICAL); 230 sdei_class_init(SDEI_NORMAL); 231 232 /* Register priority level handlers */ 233 ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI, 234 sdei_intr_handler); 235 ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI, 236 sdei_intr_handler); 237 } 238 239 /* Populate SDEI event entry */ 240 static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg, 241 unsigned int flags, uint64_t affinity) 242 { 243 assert(se != NULL); 244 245 se->ep = ep; 246 se->arg = arg; 247 se->affinity = (affinity & MPIDR_AFFINITY_MASK); 248 se->reg_flags = flags; 249 } 250 251 static uint64_t sdei_version(void) 252 { 253 return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION); 254 } 255 256 /* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */ 257 static int validate_flags(uint64_t flags, uint64_t mpidr) 258 { 259 /* Validate flags */ 260 switch (flags) { 261 case SDEI_REGF_RM_PE: 262 if (!is_valid_affinity(mpidr)) 263 return SDEI_EINVAL; 264 break; 265 case SDEI_REGF_RM_ANY: 266 break; 267 default: 268 /* Unknown flags */ 269 return SDEI_EINVAL; 270 } 271 272 return 0; 273 } 274 275 /* Set routing of an SDEI event */ 276 static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr) 277 { 278 int ret; 279 unsigned int routing; 280 sdei_ev_map_t *map; 281 sdei_entry_t *se; 282 283 ret = validate_flags(flags, mpidr); 284 if (ret != 0) 285 return ret; 286 287 /* Check if valid event number */ 288 map = find_event_map(ev_num); 289 if (map == NULL) 290 return SDEI_EINVAL; 291 292 /* The event must not be private */ 293 if (is_event_private(map)) 294 return SDEI_EINVAL; 295 296 se = get_event_entry(map); 297 298 sdei_map_lock(map); 299 300 if (!is_map_bound(map) || is_event_private(map)) { 301 ret = SDEI_EINVAL; 302 goto finish; 303 } 304 305 if (!can_sdei_state_trans(se, DO_ROUTING)) { 306 ret = SDEI_EDENY; 307 goto finish; 308 } 309 310 /* Choose appropriate routing */ 311 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 312 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 313 314 /* Update event registration flag */ 315 se->reg_flags = (unsigned int) flags; 316 317 /* 318 * ROUTING_SET is permissible only when event composite state is 319 * 'registered, disabled, and not running'. This means that the 320 * interrupt is currently disabled, and not active. 321 */ 322 plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr); 323 324 finish: 325 sdei_map_unlock(map); 326 327 return ret; 328 } 329 330 /* Register handler and argument for an SDEI event */ 331 static int64_t sdei_event_register(int ev_num, uint64_t ep, uint64_t arg, 332 uint64_t flags, uint64_t mpidr) 333 { 334 int ret; 335 unsigned int routing; 336 sdei_entry_t *se; 337 sdei_ev_map_t *map; 338 sdei_state_t backup_state; 339 340 if ((ep == 0U) || (plat_sdei_validate_entry_point( 341 ep, sdei_client_el()) != 0)) { 342 return SDEI_EINVAL; 343 } 344 345 ret = validate_flags(flags, mpidr); 346 if (ret != 0) 347 return ret; 348 349 /* Check if valid event number */ 350 map = find_event_map(ev_num); 351 if (map == NULL) 352 return SDEI_EINVAL; 353 354 /* Private events always target the PE */ 355 if (is_event_private(map)) 356 flags = SDEI_REGF_RM_PE; 357 358 se = get_event_entry(map); 359 360 /* 361 * Even though register operation is per-event (additionally for private 362 * events, registration is required individually), it has to be 363 * serialised with respect to bind/release, which are global operations. 364 * So we hold the lock throughout, unconditionally. 365 */ 366 sdei_map_lock(map); 367 368 backup_state = se->state; 369 if (!can_sdei_state_trans(se, DO_REGISTER)) 370 goto fallback; 371 372 /* 373 * When registering for dynamic events, make sure it's been bound 374 * already. This has to be the case as, without binding, the client 375 * can't know about the event number to register for. 376 */ 377 if (is_map_dynamic(map) && !is_map_bound(map)) 378 goto fallback; 379 380 if (is_event_private(map)) { 381 /* Multiple calls to register are possible for private events */ 382 assert(map->reg_count >= 0); 383 } else { 384 /* Only single call to register is possible for shared events */ 385 assert(map->reg_count == 0); 386 } 387 388 if (is_map_bound(map)) { 389 /* Meanwhile, did any PE ACK the interrupt? */ 390 if (plat_ic_get_interrupt_active(map->intr) != 0U) 391 goto fallback; 392 393 /* The interrupt must currently owned by Non-secure */ 394 if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS) 395 goto fallback; 396 397 /* 398 * Disable forwarding of new interrupt triggers to CPU 399 * interface. 400 */ 401 plat_ic_disable_interrupt(map->intr); 402 403 /* 404 * Any events that are triggered after register and before 405 * enable should remain pending. Clear any previous interrupt 406 * triggers which are pending (except for SGIs). This has no 407 * affect on level-triggered interrupts. 408 */ 409 if (ev_num != SDEI_EVENT_0) 410 plat_ic_clear_interrupt_pending(map->intr); 411 412 /* Map interrupt to EL3 and program the correct priority */ 413 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3); 414 415 /* Program the appropriate interrupt priority */ 416 plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map)); 417 418 /* 419 * Set the routing mode for shared event as requested. We 420 * already ensure that shared events get bound to SPIs. 421 */ 422 if (is_event_shared(map)) { 423 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 424 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 425 plat_ic_set_spi_routing(map->intr, routing, 426 (u_register_t) mpidr); 427 } 428 } 429 430 /* Populate event entries */ 431 set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr); 432 433 /* Increment register count */ 434 map->reg_count++; 435 436 sdei_map_unlock(map); 437 438 return 0; 439 440 fallback: 441 /* Reinstate previous state */ 442 se->state = backup_state; 443 444 sdei_map_unlock(map); 445 446 return SDEI_EDENY; 447 } 448 449 /* Enable SDEI event */ 450 static int64_t sdei_event_enable(int ev_num) 451 { 452 sdei_ev_map_t *map; 453 sdei_entry_t *se; 454 int ret; 455 bool before, after; 456 457 /* Check if valid event number */ 458 map = find_event_map(ev_num); 459 if (map == NULL) 460 return SDEI_EINVAL; 461 462 se = get_event_entry(map); 463 ret = SDEI_EDENY; 464 465 if (is_event_shared(map)) 466 sdei_map_lock(map); 467 468 before = GET_EV_STATE(se, ENABLED); 469 if (!can_sdei_state_trans(se, DO_ENABLE)) 470 goto finish; 471 after = GET_EV_STATE(se, ENABLED); 472 473 /* 474 * Enable interrupt for bound events only if there's a change in enabled 475 * state. 476 */ 477 if (is_map_bound(map) && (!before && after)) 478 plat_ic_enable_interrupt(map->intr); 479 480 ret = 0; 481 482 finish: 483 if (is_event_shared(map)) 484 sdei_map_unlock(map); 485 486 return ret; 487 } 488 489 /* Disable SDEI event */ 490 static int sdei_event_disable(int ev_num) 491 { 492 sdei_ev_map_t *map; 493 sdei_entry_t *se; 494 int ret; 495 bool before, after; 496 497 /* Check if valid event number */ 498 map = find_event_map(ev_num); 499 if (map == NULL) 500 return SDEI_EINVAL; 501 502 se = get_event_entry(map); 503 ret = SDEI_EDENY; 504 505 if (is_event_shared(map)) 506 sdei_map_lock(map); 507 508 before = GET_EV_STATE(se, ENABLED); 509 if (!can_sdei_state_trans(se, DO_DISABLE)) 510 goto finish; 511 after = GET_EV_STATE(se, ENABLED); 512 513 /* 514 * Disable interrupt for bound events only if there's a change in 515 * enabled state. 516 */ 517 if (is_map_bound(map) && (before && !after)) 518 plat_ic_disable_interrupt(map->intr); 519 520 ret = 0; 521 522 finish: 523 if (is_event_shared(map)) 524 sdei_map_unlock(map); 525 526 return ret; 527 } 528 529 /* Query SDEI event information */ 530 static int64_t sdei_event_get_info(int ev_num, int info) 531 { 532 sdei_entry_t *se; 533 sdei_ev_map_t *map; 534 535 uint64_t flags; 536 bool registered; 537 uint64_t affinity; 538 539 /* Check if valid event number */ 540 map = find_event_map(ev_num); 541 if (map == NULL) 542 return SDEI_EINVAL; 543 544 se = get_event_entry(map); 545 546 if (is_event_shared(map)) 547 sdei_map_lock(map); 548 549 /* Sample state under lock */ 550 registered = GET_EV_STATE(se, REGISTERED); 551 flags = se->reg_flags; 552 affinity = se->affinity; 553 554 if (is_event_shared(map)) 555 sdei_map_unlock(map); 556 557 switch (info) { 558 case SDEI_INFO_EV_TYPE: 559 return is_event_shared(map); 560 561 case SDEI_INFO_EV_NOT_SIGNALED: 562 return !is_event_signalable(map); 563 564 case SDEI_INFO_EV_PRIORITY: 565 return is_event_critical(map); 566 567 case SDEI_INFO_EV_ROUTING_MODE: 568 if (!is_event_shared(map)) 569 return SDEI_EINVAL; 570 if (!registered) 571 return SDEI_EDENY; 572 return (flags == SDEI_REGF_RM_PE); 573 574 case SDEI_INFO_EV_ROUTING_AFF: 575 if (!is_event_shared(map)) 576 return SDEI_EINVAL; 577 if (!registered) 578 return SDEI_EDENY; 579 if (flags != SDEI_REGF_RM_PE) 580 return SDEI_EINVAL; 581 return affinity; 582 583 default: 584 return SDEI_EINVAL; 585 } 586 } 587 588 /* Unregister an SDEI event */ 589 static int sdei_event_unregister(int ev_num) 590 { 591 int ret = 0; 592 sdei_entry_t *se; 593 sdei_ev_map_t *map; 594 595 /* Check if valid event number */ 596 map = find_event_map(ev_num); 597 if (map == NULL) 598 return SDEI_EINVAL; 599 600 se = get_event_entry(map); 601 602 /* 603 * Even though unregister operation is per-event (additionally for 604 * private events, unregistration is required individually), it has to 605 * be serialised with respect to bind/release, which are global 606 * operations. So we hold the lock throughout, unconditionally. 607 */ 608 sdei_map_lock(map); 609 610 if (!can_sdei_state_trans(se, DO_UNREGISTER)) { 611 /* 612 * Even if the call is invalid, and the handler is running (for 613 * example, having unregistered from a running handler earlier), 614 * return pending error code; otherwise, return deny. 615 */ 616 ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY; 617 618 goto finish; 619 } 620 621 map->reg_count--; 622 if (is_event_private(map)) { 623 /* Multiple calls to register are possible for private events */ 624 assert(map->reg_count >= 0); 625 } else { 626 /* Only single call to register is possible for shared events */ 627 assert(map->reg_count == 0); 628 } 629 630 if (is_map_bound(map)) { 631 plat_ic_disable_interrupt(map->intr); 632 633 /* 634 * Clear pending interrupt. Skip for SGIs as they may not be 635 * cleared on interrupt controllers. 636 */ 637 if (ev_num != SDEI_EVENT_0) 638 plat_ic_clear_interrupt_pending(map->intr); 639 640 assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3); 641 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS); 642 plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY); 643 } 644 645 clear_event_entries(se); 646 647 /* 648 * If the handler is running at the time of unregister, return the 649 * pending error code. 650 */ 651 if (GET_EV_STATE(se, RUNNING)) 652 ret = SDEI_EPEND; 653 654 finish: 655 sdei_map_unlock(map); 656 657 return ret; 658 } 659 660 /* Query status of an SDEI event */ 661 static int sdei_event_status(int ev_num) 662 { 663 sdei_ev_map_t *map; 664 sdei_entry_t *se; 665 sdei_state_t state; 666 667 /* Check if valid event number */ 668 map = find_event_map(ev_num); 669 if (map == NULL) 670 return SDEI_EINVAL; 671 672 se = get_event_entry(map); 673 674 if (is_event_shared(map)) 675 sdei_map_lock(map); 676 677 /* State value directly maps to the expected return format */ 678 state = se->state; 679 680 if (is_event_shared(map)) 681 sdei_map_unlock(map); 682 683 return (int) state; 684 } 685 686 /* Bind an SDEI event to an interrupt */ 687 static int sdei_interrupt_bind(unsigned int intr_num) 688 { 689 sdei_ev_map_t *map; 690 bool retry = true, shared_mapping; 691 692 /* SGIs are not allowed to be bound */ 693 if (plat_ic_is_sgi(intr_num) != 0) 694 return SDEI_EINVAL; 695 696 shared_mapping = (plat_ic_is_spi(intr_num) != 0); 697 do { 698 /* 699 * Bail out if there is already an event for this interrupt, 700 * either platform-defined or dynamic. 701 */ 702 map = find_event_map_by_intr(intr_num, shared_mapping); 703 if (map != NULL) { 704 if (is_map_dynamic(map)) { 705 if (is_map_bound(map)) { 706 /* 707 * Dynamic event, already bound. Return 708 * event number. 709 */ 710 return map->ev_num; 711 } 712 } else { 713 /* Binding non-dynamic event */ 714 return SDEI_EINVAL; 715 } 716 } 717 718 /* 719 * The interrupt is not bound yet. Try to find a free slot to 720 * bind it. Free dynamic mappings have their interrupt set as 721 * SDEI_DYN_IRQ. 722 */ 723 map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping); 724 if (map == NULL) 725 return SDEI_ENOMEM; 726 727 /* The returned mapping must be dynamic */ 728 assert(is_map_dynamic(map)); 729 730 /* 731 * We cannot assert for bound maps here, as we might be racing 732 * with another bind. 733 */ 734 735 /* The requested interrupt must already belong to NS */ 736 if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS) 737 return SDEI_EDENY; 738 739 /* 740 * Interrupt programming and ownership transfer are deferred 741 * until register. 742 */ 743 744 sdei_map_lock(map); 745 if (!is_map_bound(map)) { 746 map->intr = intr_num; 747 set_map_bound(map); 748 retry = false; 749 } 750 sdei_map_unlock(map); 751 } while (retry); 752 753 return map->ev_num; 754 } 755 756 /* Release a bound SDEI event previously to an interrupt */ 757 static int sdei_interrupt_release(int ev_num) 758 { 759 int ret = 0; 760 sdei_ev_map_t *map; 761 sdei_entry_t *se; 762 763 /* Check if valid event number */ 764 map = find_event_map(ev_num); 765 if (map == NULL) 766 return SDEI_EINVAL; 767 768 if (!is_map_dynamic(map)) 769 return SDEI_EINVAL; 770 771 se = get_event_entry(map); 772 773 sdei_map_lock(map); 774 775 /* Event must have been unregistered before release */ 776 if (map->reg_count != 0) { 777 ret = SDEI_EDENY; 778 goto finish; 779 } 780 781 /* 782 * Interrupt release never causes the state to change. We only check 783 * whether it's permissible or not. 784 */ 785 if (!can_sdei_state_trans(se, DO_RELEASE)) { 786 ret = SDEI_EDENY; 787 goto finish; 788 } 789 790 if (is_map_bound(map)) { 791 /* 792 * Deny release if the interrupt is active, which means it's 793 * probably being acknowledged and handled elsewhere. 794 */ 795 if (plat_ic_get_interrupt_active(map->intr) != 0U) { 796 ret = SDEI_EDENY; 797 goto finish; 798 } 799 800 /* 801 * Interrupt programming and ownership transfer are already done 802 * during unregister. 803 */ 804 805 map->intr = SDEI_DYN_IRQ; 806 clr_map_bound(map); 807 } else { 808 SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map), 809 map->reg_count); 810 ret = SDEI_EINVAL; 811 } 812 813 finish: 814 sdei_map_unlock(map); 815 816 return ret; 817 } 818 819 /* Perform reset of private SDEI events */ 820 static int sdei_private_reset(void) 821 { 822 sdei_ev_map_t *map; 823 int ret = 0, final_ret = 0; 824 unsigned int i; 825 826 /* Unregister all private events */ 827 for_each_private_map(i, map) { 828 /* 829 * The unregister can fail if the event is not registered, which 830 * is allowed, and a deny will be returned. But if the event is 831 * running or unregister pending, the call fails. 832 */ 833 ret = sdei_event_unregister(map->ev_num); 834 if ((ret == SDEI_EPEND) && (final_ret == 0)) 835 final_ret = SDEI_EDENY; 836 } 837 838 return final_ret; 839 } 840 841 /* Perform reset of shared SDEI events */ 842 static int sdei_shared_reset(void) 843 { 844 const sdei_mapping_t *mapping; 845 sdei_ev_map_t *map; 846 int ret = 0, final_ret = 0; 847 unsigned int i, j; 848 849 /* Unregister all shared events */ 850 for_each_shared_map(i, map) { 851 /* 852 * The unregister can fail if the event is not registered, which 853 * is allowed, and a deny will be returned. But if the event is 854 * running or unregister pending, the call fails. 855 */ 856 ret = sdei_event_unregister(map->ev_num); 857 if ((ret == SDEI_EPEND) && (final_ret == 0)) 858 final_ret = SDEI_EDENY; 859 } 860 861 if (final_ret != 0) 862 return final_ret; 863 864 /* 865 * Loop through both private and shared mappings, and release all 866 * bindings. 867 */ 868 for_each_mapping_type(i, mapping) { 869 iterate_mapping(mapping, j, map) { 870 /* 871 * Release bindings for mappings that are dynamic and 872 * bound. 873 */ 874 if (is_map_dynamic(map) && is_map_bound(map)) { 875 /* 876 * Any failure to release would mean there is at 877 * least a PE registered for the event. 878 */ 879 ret = sdei_interrupt_release(map->ev_num); 880 if ((ret != 0) && (final_ret == 0)) 881 final_ret = ret; 882 } 883 } 884 } 885 886 return final_ret; 887 } 888 889 /* Send a signal to another SDEI client PE */ 890 static int sdei_signal(int ev_num, uint64_t target_pe) 891 { 892 sdei_ev_map_t *map; 893 894 /* Only event 0 can be signalled */ 895 if (ev_num != SDEI_EVENT_0) 896 return SDEI_EINVAL; 897 898 /* Find mapping for event 0 */ 899 map = find_event_map(SDEI_EVENT_0); 900 if (map == NULL) 901 return SDEI_EINVAL; 902 903 /* The event must be signalable */ 904 if (!is_event_signalable(map)) 905 return SDEI_EINVAL; 906 907 /* Validate target */ 908 if (plat_core_pos_by_mpidr(target_pe) < 0) 909 return SDEI_EINVAL; 910 911 /* Raise SGI. Platform will validate target_pe */ 912 plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe); 913 914 return 0; 915 } 916 917 /* Query SDEI dispatcher features */ 918 static uint64_t sdei_features(unsigned int feature) 919 { 920 if (feature == SDEI_FEATURE_BIND_SLOTS) { 921 return FEATURE_BIND_SLOTS(num_dyn_priv_slots, 922 num_dyn_shrd_slots); 923 } 924 925 return (uint64_t) SDEI_EINVAL; 926 } 927 928 /* SDEI top level handler for servicing SMCs */ 929 uint64_t sdei_smc_handler(uint32_t smc_fid, 930 uint64_t x1, 931 uint64_t x2, 932 uint64_t x3, 933 uint64_t x4, 934 void *cookie, 935 void *handle, 936 uint64_t flags) 937 { 938 939 uint64_t x5; 940 unsigned int ss = (unsigned int) get_interrupt_src_ss(flags); 941 int64_t ret; 942 bool resume = false; 943 cpu_context_t *ctx = handle; 944 int ev_num = (int) x1; 945 946 if (ss != NON_SECURE) 947 SMC_RET1(ctx, SMC_UNK); 948 949 /* Verify the caller EL */ 950 if (GET_EL(read_spsr_el3()) != sdei_client_el()) 951 SMC_RET1(ctx, SMC_UNK); 952 953 switch (smc_fid) { 954 case SDEI_VERSION: 955 SDEI_LOG("> VER\n"); 956 ret = (int64_t) sdei_version(); 957 SDEI_LOG("< VER:%llx\n", ret); 958 SMC_RET1(ctx, ret); 959 960 case SDEI_EVENT_REGISTER: 961 x5 = SMC_GET_GP(ctx, CTX_GPREG_X5); 962 SDEI_LOG("> REG(n:%d e:%llx a:%llx f:%x m:%llx)\n", ev_num, 963 x2, x3, (int) x4, x5); 964 ret = sdei_event_register(ev_num, x2, x3, x4, x5); 965 SDEI_LOG("< REG:%lld\n", ret); 966 SMC_RET1(ctx, ret); 967 968 case SDEI_EVENT_ENABLE: 969 SDEI_LOG("> ENABLE(n:%d)\n", (int) x1); 970 ret = sdei_event_enable(ev_num); 971 SDEI_LOG("< ENABLE:%lld\n", ret); 972 SMC_RET1(ctx, ret); 973 974 case SDEI_EVENT_DISABLE: 975 SDEI_LOG("> DISABLE(n:%d)\n", ev_num); 976 ret = sdei_event_disable(ev_num); 977 SDEI_LOG("< DISABLE:%lld\n", ret); 978 SMC_RET1(ctx, ret); 979 980 case SDEI_EVENT_CONTEXT: 981 SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1()); 982 ret = sdei_event_context(ctx, (unsigned int) x1); 983 SDEI_LOG("< CTX:%lld\n", ret); 984 SMC_RET1(ctx, ret); 985 986 case SDEI_EVENT_COMPLETE_AND_RESUME: 987 resume = true; 988 /* Fallthrough */ 989 990 case SDEI_EVENT_COMPLETE: 991 SDEI_LOG("> COMPLETE(r:%u sta/ep:%llx):%lx\n", 992 (unsigned int) resume, x1, read_mpidr_el1()); 993 ret = sdei_event_complete(resume, x1); 994 SDEI_LOG("< COMPLETE:%llx\n", ret); 995 996 /* 997 * Set error code only if the call failed. If the call 998 * succeeded, we discard the dispatched context, and restore the 999 * interrupted context to a pristine condition, and therefore 1000 * shouldn't be modified. We don't return to the caller in this 1001 * case anyway. 1002 */ 1003 if (ret != 0) 1004 SMC_RET1(ctx, ret); 1005 1006 SMC_RET0(ctx); 1007 1008 case SDEI_EVENT_STATUS: 1009 SDEI_LOG("> STAT(n:%d)\n", ev_num); 1010 ret = sdei_event_status(ev_num); 1011 SDEI_LOG("< STAT:%lld\n", ret); 1012 SMC_RET1(ctx, ret); 1013 1014 case SDEI_EVENT_GET_INFO: 1015 SDEI_LOG("> INFO(n:%d, %d)\n", ev_num, (int) x2); 1016 ret = sdei_event_get_info(ev_num, (int) x2); 1017 SDEI_LOG("< INFO:%lld\n", ret); 1018 SMC_RET1(ctx, ret); 1019 1020 case SDEI_EVENT_UNREGISTER: 1021 SDEI_LOG("> UNREG(n:%d)\n", ev_num); 1022 ret = sdei_event_unregister(ev_num); 1023 SDEI_LOG("< UNREG:%lld\n", ret); 1024 SMC_RET1(ctx, ret); 1025 1026 case SDEI_PE_UNMASK: 1027 SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1()); 1028 sdei_pe_unmask(); 1029 SDEI_LOG("< UNMASK:%d\n", 0); 1030 SMC_RET1(ctx, 0); 1031 1032 case SDEI_PE_MASK: 1033 SDEI_LOG("> MASK:%lx\n", read_mpidr_el1()); 1034 ret = sdei_pe_mask(); 1035 SDEI_LOG("< MASK:%lld\n", ret); 1036 SMC_RET1(ctx, ret); 1037 1038 case SDEI_INTERRUPT_BIND: 1039 SDEI_LOG("> BIND(%d)\n", (int) x1); 1040 ret = sdei_interrupt_bind((unsigned int) x1); 1041 SDEI_LOG("< BIND:%lld\n", ret); 1042 SMC_RET1(ctx, ret); 1043 1044 case SDEI_INTERRUPT_RELEASE: 1045 SDEI_LOG("> REL(%d)\n", ev_num); 1046 ret = sdei_interrupt_release(ev_num); 1047 SDEI_LOG("< REL:%lld\n", ret); 1048 SMC_RET1(ctx, ret); 1049 1050 case SDEI_SHARED_RESET: 1051 SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1()); 1052 ret = sdei_shared_reset(); 1053 SDEI_LOG("< S_RESET:%lld\n", ret); 1054 SMC_RET1(ctx, ret); 1055 1056 case SDEI_PRIVATE_RESET: 1057 SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1()); 1058 ret = sdei_private_reset(); 1059 SDEI_LOG("< P_RESET:%lld\n", ret); 1060 SMC_RET1(ctx, ret); 1061 1062 case SDEI_EVENT_ROUTING_SET: 1063 SDEI_LOG("> ROUTE_SET(n:%d f:%llx aff:%llx)\n", ev_num, x2, x3); 1064 ret = sdei_event_routing_set(ev_num, x2, x3); 1065 SDEI_LOG("< ROUTE_SET:%lld\n", ret); 1066 SMC_RET1(ctx, ret); 1067 1068 case SDEI_FEATURES: 1069 SDEI_LOG("> FTRS(f:%llx)\n", x1); 1070 ret = (int64_t) sdei_features((unsigned int) x1); 1071 SDEI_LOG("< FTRS:%llx\n", ret); 1072 SMC_RET1(ctx, ret); 1073 1074 case SDEI_EVENT_SIGNAL: 1075 SDEI_LOG("> SIGNAL(e:%d t:%llx)\n", ev_num, x2); 1076 ret = sdei_signal(ev_num, x2); 1077 SDEI_LOG("< SIGNAL:%lld\n", ret); 1078 SMC_RET1(ctx, ret); 1079 1080 default: 1081 /* Do nothing in default case */ 1082 break; 1083 } 1084 1085 WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid); 1086 SMC_RET1(ctx, SMC_UNK); 1087 } 1088 1089 /* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */ 1090 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init); 1091 1092 /* Subscribe to PSCI CPU suspend finisher for per-CPU configuration */ 1093 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, sdei_cpu_wakeup_init); 1094