1 /* 2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <bl31.h> 10 #include <bl_common.h> 11 #include <cassert.h> 12 #include <context.h> 13 #include <debug.h> 14 #include <ehf.h> 15 #include <interrupt_mgmt.h> 16 #include <platform.h> 17 #include <pubsub.h> 18 #include <runtime_svc.h> 19 #include <sdei.h> 20 #include <stddef.h> 21 #include <string.h> 22 #include <utils.h> 23 #include "sdei_private.h" 24 25 #define MAJOR_VERSION 1ULL 26 #define MINOR_VERSION 0ULL 27 #define VENDOR_VERSION 0ULL 28 29 #define MAKE_SDEI_VERSION(_major, _minor, _vendor) \ 30 ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor)) 31 32 #define LOWEST_INTR_PRIORITY 0xff 33 34 #define is_valid_affinity(_mpidr) (plat_core_pos_by_mpidr(_mpidr) >= 0) 35 36 CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI, 37 sdei_critical_must_have_higher_priority); 38 39 static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots; 40 41 /* Initialise SDEI map entries */ 42 static void init_map(sdei_ev_map_t *map) 43 { 44 map->reg_count = 0; 45 } 46 47 /* Convert mapping to SDEI class */ 48 static sdei_class_t map_to_class(sdei_ev_map_t *map) 49 { 50 return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL; 51 } 52 53 /* Clear SDEI event entries except state */ 54 static void clear_event_entries(sdei_entry_t *se) 55 { 56 se->ep = 0; 57 se->arg = 0; 58 se->affinity = 0; 59 se->reg_flags = 0; 60 } 61 62 /* Perform CPU-specific state initialisation */ 63 static void *sdei_cpu_on_init(const void *arg) 64 { 65 unsigned int i; 66 sdei_ev_map_t *map; 67 sdei_entry_t *se; 68 69 /* Initialize private mappings on this CPU */ 70 for_each_private_map(i, map) { 71 se = get_event_entry(map); 72 clear_event_entries(se); 73 se->state = 0; 74 } 75 76 SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1()); 77 78 /* All PEs start with SDEI events masked */ 79 (void) sdei_pe_mask(); 80 81 return NULL; 82 } 83 84 /* CPU initialisation after wakeup from suspend */ 85 static void *sdei_cpu_wakeup_init(const void *arg) 86 { 87 SDEI_LOG("Events masked on %lx\n", read_mpidr_el1()); 88 89 /* All PEs wake up with SDEI events masked */ 90 sdei_pe_mask(); 91 92 return 0; 93 } 94 95 /* Initialise an SDEI class */ 96 static void sdei_class_init(sdei_class_t class) 97 { 98 unsigned int i; 99 bool zero_found __unused = false; 100 int ev_num_so_far __unused; 101 sdei_ev_map_t *map; 102 103 /* Sanity check and configuration of shared events */ 104 ev_num_so_far = -1; 105 for_each_shared_map(i, map) { 106 #if ENABLE_ASSERTIONS 107 /* Ensure mappings are sorted */ 108 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 109 110 ev_num_so_far = map->ev_num; 111 112 /* Event 0 must not be shared */ 113 assert(map->ev_num != SDEI_EVENT_0); 114 115 /* Check for valid event */ 116 assert(map->ev_num >= 0); 117 118 /* Make sure it's a shared event */ 119 assert(is_event_shared(map)); 120 121 /* No shared mapping should have signalable property */ 122 assert(!is_event_signalable(map)); 123 124 /* Shared mappings can't be explicit */ 125 assert(!is_map_explicit(map)); 126 #endif 127 128 /* Skip initializing the wrong priority */ 129 if (map_to_class(map) != class) 130 continue; 131 132 /* Platform events are always bound, so set the bound flag */ 133 if (is_map_dynamic(map)) { 134 assert(map->intr == SDEI_DYN_IRQ); 135 assert(is_event_normal(map)); 136 num_dyn_shrd_slots++; 137 } else { 138 /* Shared mappings must be bound to shared interrupt */ 139 assert(plat_ic_is_spi(map->intr) != 0); 140 set_map_bound(map); 141 } 142 143 init_map(map); 144 } 145 146 /* Sanity check and configuration of private events for this CPU */ 147 ev_num_so_far = -1; 148 for_each_private_map(i, map) { 149 #if ENABLE_ASSERTIONS 150 /* Ensure mappings are sorted */ 151 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 152 153 ev_num_so_far = map->ev_num; 154 155 if (map->ev_num == SDEI_EVENT_0) { 156 zero_found = true; 157 158 /* Event 0 must be a Secure SGI */ 159 assert(is_secure_sgi(map->intr)); 160 161 /* 162 * Event 0 can have only have signalable flag (apart 163 * from being private 164 */ 165 assert(map->map_flags == (SDEI_MAPF_SIGNALABLE | 166 SDEI_MAPF_PRIVATE)); 167 } else { 168 /* No other mapping should have signalable property */ 169 assert(!is_event_signalable(map)); 170 } 171 172 /* Check for valid event */ 173 assert(map->ev_num >= 0); 174 175 /* Make sure it's a private event */ 176 assert(is_event_private(map)); 177 178 /* 179 * Other than priority, explicit events can only have explicit 180 * and private flags set. 181 */ 182 if (is_map_explicit(map)) { 183 assert((map->map_flags | SDEI_MAPF_CRITICAL) == 184 (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE 185 | SDEI_MAPF_CRITICAL)); 186 } 187 #endif 188 189 /* Skip initializing the wrong priority */ 190 if (map_to_class(map) != class) 191 continue; 192 193 /* Platform events are always bound, so set the bound flag */ 194 if (map->ev_num != SDEI_EVENT_0) { 195 if (is_map_dynamic(map)) { 196 assert(map->intr == SDEI_DYN_IRQ); 197 assert(is_event_normal(map)); 198 num_dyn_priv_slots++; 199 } else if (is_map_explicit(map)) { 200 /* 201 * Explicit mappings don't have a backing 202 * SDEI interrupt, but verify that anyway. 203 */ 204 assert(map->intr == SDEI_DYN_IRQ); 205 } else { 206 /* 207 * Private mappings must be bound to private 208 * interrupt. 209 */ 210 assert(plat_ic_is_ppi((unsigned) map->intr) != 0); 211 set_map_bound(map); 212 } 213 } 214 215 init_map(map); 216 } 217 218 /* Ensure event 0 is in the mapping */ 219 assert(zero_found); 220 221 (void) sdei_cpu_on_init(NULL); 222 } 223 224 /* SDEI dispatcher initialisation */ 225 void sdei_init(void) 226 { 227 sdei_class_init(SDEI_CRITICAL); 228 sdei_class_init(SDEI_NORMAL); 229 230 /* Register priority level handlers */ 231 ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI, 232 sdei_intr_handler); 233 ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI, 234 sdei_intr_handler); 235 } 236 237 /* Populate SDEI event entry */ 238 static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg, 239 unsigned int flags, uint64_t affinity) 240 { 241 assert(se != NULL); 242 243 se->ep = ep; 244 se->arg = arg; 245 se->affinity = (affinity & MPIDR_AFFINITY_MASK); 246 se->reg_flags = flags; 247 } 248 249 static uint64_t sdei_version(void) 250 { 251 return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION); 252 } 253 254 /* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */ 255 static int validate_flags(uint64_t flags, uint64_t mpidr) 256 { 257 /* Validate flags */ 258 switch (flags) { 259 case SDEI_REGF_RM_PE: 260 if (!is_valid_affinity(mpidr)) 261 return SDEI_EINVAL; 262 break; 263 case SDEI_REGF_RM_ANY: 264 break; 265 default: 266 /* Unknown flags */ 267 return SDEI_EINVAL; 268 } 269 270 return 0; 271 } 272 273 /* Set routing of an SDEI event */ 274 static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr) 275 { 276 int ret; 277 unsigned int routing; 278 sdei_ev_map_t *map; 279 sdei_entry_t *se; 280 281 ret = validate_flags(flags, mpidr); 282 if (ret != 0) 283 return ret; 284 285 /* Check if valid event number */ 286 map = find_event_map(ev_num); 287 if (map == NULL) 288 return SDEI_EINVAL; 289 290 /* The event must not be private */ 291 if (is_event_private(map)) 292 return SDEI_EINVAL; 293 294 se = get_event_entry(map); 295 296 sdei_map_lock(map); 297 298 if (!is_map_bound(map) || is_event_private(map)) { 299 ret = SDEI_EINVAL; 300 goto finish; 301 } 302 303 if (!can_sdei_state_trans(se, DO_ROUTING)) { 304 ret = SDEI_EDENY; 305 goto finish; 306 } 307 308 /* Choose appropriate routing */ 309 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 310 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 311 312 /* Update event registration flag */ 313 se->reg_flags = (unsigned int) flags; 314 315 /* 316 * ROUTING_SET is permissible only when event composite state is 317 * 'registered, disabled, and not running'. This means that the 318 * interrupt is currently disabled, and not active. 319 */ 320 plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr); 321 322 finish: 323 sdei_map_unlock(map); 324 325 return ret; 326 } 327 328 /* Register handler and argument for an SDEI event */ 329 static int64_t sdei_event_register(int ev_num, uint64_t ep, uint64_t arg, 330 uint64_t flags, uint64_t mpidr) 331 { 332 int ret; 333 unsigned int routing; 334 sdei_entry_t *se; 335 sdei_ev_map_t *map; 336 sdei_state_t backup_state; 337 338 if ((ep == 0U) || (plat_sdei_validate_entry_point( 339 ep, sdei_client_el()) != 0)) { 340 return SDEI_EINVAL; 341 } 342 343 ret = validate_flags(flags, mpidr); 344 if (ret != 0) 345 return ret; 346 347 /* Check if valid event number */ 348 map = find_event_map(ev_num); 349 if (map == NULL) 350 return SDEI_EINVAL; 351 352 /* Private events always target the PE */ 353 if (is_event_private(map)) 354 flags = SDEI_REGF_RM_PE; 355 356 se = get_event_entry(map); 357 358 /* 359 * Even though register operation is per-event (additionally for private 360 * events, registration is required individually), it has to be 361 * serialised with respect to bind/release, which are global operations. 362 * So we hold the lock throughout, unconditionally. 363 */ 364 sdei_map_lock(map); 365 366 backup_state = se->state; 367 if (!can_sdei_state_trans(se, DO_REGISTER)) 368 goto fallback; 369 370 /* 371 * When registering for dynamic events, make sure it's been bound 372 * already. This has to be the case as, without binding, the client 373 * can't know about the event number to register for. 374 */ 375 if (is_map_dynamic(map) && !is_map_bound(map)) 376 goto fallback; 377 378 if (is_event_private(map)) { 379 /* Multiple calls to register are possible for private events */ 380 assert(map->reg_count >= 0); 381 } else { 382 /* Only single call to register is possible for shared events */ 383 assert(map->reg_count == 0); 384 } 385 386 if (is_map_bound(map)) { 387 /* Meanwhile, did any PE ACK the interrupt? */ 388 if (plat_ic_get_interrupt_active(map->intr) != 0U) 389 goto fallback; 390 391 /* The interrupt must currently owned by Non-secure */ 392 if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS) 393 goto fallback; 394 395 /* 396 * Disable forwarding of new interrupt triggers to CPU 397 * interface. 398 */ 399 plat_ic_disable_interrupt(map->intr); 400 401 /* 402 * Any events that are triggered after register and before 403 * enable should remain pending. Clear any previous interrupt 404 * triggers which are pending (except for SGIs). This has no 405 * affect on level-triggered interrupts. 406 */ 407 if (ev_num != SDEI_EVENT_0) 408 plat_ic_clear_interrupt_pending(map->intr); 409 410 /* Map interrupt to EL3 and program the correct priority */ 411 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3); 412 413 /* Program the appropriate interrupt priority */ 414 plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map)); 415 416 /* 417 * Set the routing mode for shared event as requested. We 418 * already ensure that shared events get bound to SPIs. 419 */ 420 if (is_event_shared(map)) { 421 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 422 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 423 plat_ic_set_spi_routing(map->intr, routing, 424 (u_register_t) mpidr); 425 } 426 } 427 428 /* Populate event entries */ 429 set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr); 430 431 /* Increment register count */ 432 map->reg_count++; 433 434 sdei_map_unlock(map); 435 436 return 0; 437 438 fallback: 439 /* Reinstate previous state */ 440 se->state = backup_state; 441 442 sdei_map_unlock(map); 443 444 return SDEI_EDENY; 445 } 446 447 /* Enable SDEI event */ 448 static int64_t sdei_event_enable(int ev_num) 449 { 450 sdei_ev_map_t *map; 451 sdei_entry_t *se; 452 int ret; 453 bool before, after; 454 455 /* Check if valid event number */ 456 map = find_event_map(ev_num); 457 if (map == NULL) 458 return SDEI_EINVAL; 459 460 se = get_event_entry(map); 461 ret = SDEI_EDENY; 462 463 if (is_event_shared(map)) 464 sdei_map_lock(map); 465 466 before = GET_EV_STATE(se, ENABLED); 467 if (!can_sdei_state_trans(se, DO_ENABLE)) 468 goto finish; 469 after = GET_EV_STATE(se, ENABLED); 470 471 /* 472 * Enable interrupt for bound events only if there's a change in enabled 473 * state. 474 */ 475 if (is_map_bound(map) && (!before && after)) 476 plat_ic_enable_interrupt(map->intr); 477 478 ret = 0; 479 480 finish: 481 if (is_event_shared(map)) 482 sdei_map_unlock(map); 483 484 return ret; 485 } 486 487 /* Disable SDEI event */ 488 static int sdei_event_disable(int ev_num) 489 { 490 sdei_ev_map_t *map; 491 sdei_entry_t *se; 492 int ret; 493 bool before, after; 494 495 /* Check if valid event number */ 496 map = find_event_map(ev_num); 497 if (map == NULL) 498 return SDEI_EINVAL; 499 500 se = get_event_entry(map); 501 ret = SDEI_EDENY; 502 503 if (is_event_shared(map)) 504 sdei_map_lock(map); 505 506 before = GET_EV_STATE(se, ENABLED); 507 if (!can_sdei_state_trans(se, DO_DISABLE)) 508 goto finish; 509 after = GET_EV_STATE(se, ENABLED); 510 511 /* 512 * Disable interrupt for bound events only if there's a change in 513 * enabled state. 514 */ 515 if (is_map_bound(map) && (before && !after)) 516 plat_ic_disable_interrupt(map->intr); 517 518 ret = 0; 519 520 finish: 521 if (is_event_shared(map)) 522 sdei_map_unlock(map); 523 524 return ret; 525 } 526 527 /* Query SDEI event information */ 528 static int64_t sdei_event_get_info(int ev_num, int info) 529 { 530 sdei_entry_t *se; 531 sdei_ev_map_t *map; 532 533 uint64_t flags; 534 bool registered; 535 uint64_t affinity; 536 537 /* Check if valid event number */ 538 map = find_event_map(ev_num); 539 if (map == NULL) 540 return SDEI_EINVAL; 541 542 se = get_event_entry(map); 543 544 if (is_event_shared(map)) 545 sdei_map_lock(map); 546 547 /* Sample state under lock */ 548 registered = GET_EV_STATE(se, REGISTERED); 549 flags = se->reg_flags; 550 affinity = se->affinity; 551 552 if (is_event_shared(map)) 553 sdei_map_unlock(map); 554 555 switch (info) { 556 case SDEI_INFO_EV_TYPE: 557 return is_event_shared(map); 558 559 case SDEI_INFO_EV_NOT_SIGNALED: 560 return !is_event_signalable(map); 561 562 case SDEI_INFO_EV_PRIORITY: 563 return is_event_critical(map); 564 565 case SDEI_INFO_EV_ROUTING_MODE: 566 if (!is_event_shared(map)) 567 return SDEI_EINVAL; 568 if (!registered) 569 return SDEI_EDENY; 570 return (flags == SDEI_REGF_RM_PE); 571 572 case SDEI_INFO_EV_ROUTING_AFF: 573 if (!is_event_shared(map)) 574 return SDEI_EINVAL; 575 if (!registered) 576 return SDEI_EDENY; 577 if (flags != SDEI_REGF_RM_PE) 578 return SDEI_EINVAL; 579 return affinity; 580 581 default: 582 return SDEI_EINVAL; 583 } 584 } 585 586 /* Unregister an SDEI event */ 587 static int sdei_event_unregister(int ev_num) 588 { 589 int ret = 0; 590 sdei_entry_t *se; 591 sdei_ev_map_t *map; 592 593 /* Check if valid event number */ 594 map = find_event_map(ev_num); 595 if (map == NULL) 596 return SDEI_EINVAL; 597 598 se = get_event_entry(map); 599 600 /* 601 * Even though unregister operation is per-event (additionally for 602 * private events, unregistration is required individually), it has to 603 * be serialised with respect to bind/release, which are global 604 * operations. So we hold the lock throughout, unconditionally. 605 */ 606 sdei_map_lock(map); 607 608 if (!can_sdei_state_trans(se, DO_UNREGISTER)) { 609 /* 610 * Even if the call is invalid, and the handler is running (for 611 * example, having unregistered from a running handler earlier), 612 * return pending error code; otherwise, return deny. 613 */ 614 ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY; 615 616 goto finish; 617 } 618 619 map->reg_count--; 620 if (is_event_private(map)) { 621 /* Multiple calls to register are possible for private events */ 622 assert(map->reg_count >= 0); 623 } else { 624 /* Only single call to register is possible for shared events */ 625 assert(map->reg_count == 0); 626 } 627 628 if (is_map_bound(map)) { 629 plat_ic_disable_interrupt(map->intr); 630 631 /* 632 * Clear pending interrupt. Skip for SGIs as they may not be 633 * cleared on interrupt controllers. 634 */ 635 if (ev_num != SDEI_EVENT_0) 636 plat_ic_clear_interrupt_pending(map->intr); 637 638 assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3); 639 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS); 640 plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY); 641 } 642 643 clear_event_entries(se); 644 645 /* 646 * If the handler is running at the time of unregister, return the 647 * pending error code. 648 */ 649 if (GET_EV_STATE(se, RUNNING)) 650 ret = SDEI_EPEND; 651 652 finish: 653 sdei_map_unlock(map); 654 655 return ret; 656 } 657 658 /* Query status of an SDEI event */ 659 static int sdei_event_status(int ev_num) 660 { 661 sdei_ev_map_t *map; 662 sdei_entry_t *se; 663 sdei_state_t state; 664 665 /* Check if valid event number */ 666 map = find_event_map(ev_num); 667 if (map == NULL) 668 return SDEI_EINVAL; 669 670 se = get_event_entry(map); 671 672 if (is_event_shared(map)) 673 sdei_map_lock(map); 674 675 /* State value directly maps to the expected return format */ 676 state = se->state; 677 678 if (is_event_shared(map)) 679 sdei_map_unlock(map); 680 681 return (int) state; 682 } 683 684 /* Bind an SDEI event to an interrupt */ 685 static int sdei_interrupt_bind(unsigned int intr_num) 686 { 687 sdei_ev_map_t *map; 688 bool retry = true, shared_mapping; 689 690 /* SGIs are not allowed to be bound */ 691 if (plat_ic_is_sgi(intr_num) != 0) 692 return SDEI_EINVAL; 693 694 shared_mapping = (plat_ic_is_spi(intr_num) != 0); 695 do { 696 /* 697 * Bail out if there is already an event for this interrupt, 698 * either platform-defined or dynamic. 699 */ 700 map = find_event_map_by_intr(intr_num, shared_mapping); 701 if (map != NULL) { 702 if (is_map_dynamic(map)) { 703 if (is_map_bound(map)) { 704 /* 705 * Dynamic event, already bound. Return 706 * event number. 707 */ 708 return map->ev_num; 709 } 710 } else { 711 /* Binding non-dynamic event */ 712 return SDEI_EINVAL; 713 } 714 } 715 716 /* 717 * The interrupt is not bound yet. Try to find a free slot to 718 * bind it. Free dynamic mappings have their interrupt set as 719 * SDEI_DYN_IRQ. 720 */ 721 map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping); 722 if (map == NULL) 723 return SDEI_ENOMEM; 724 725 /* The returned mapping must be dynamic */ 726 assert(is_map_dynamic(map)); 727 728 /* 729 * We cannot assert for bound maps here, as we might be racing 730 * with another bind. 731 */ 732 733 /* The requested interrupt must already belong to NS */ 734 if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS) 735 return SDEI_EDENY; 736 737 /* 738 * Interrupt programming and ownership transfer are deferred 739 * until register. 740 */ 741 742 sdei_map_lock(map); 743 if (!is_map_bound(map)) { 744 map->intr = intr_num; 745 set_map_bound(map); 746 retry = false; 747 } 748 sdei_map_unlock(map); 749 } while (retry); 750 751 return map->ev_num; 752 } 753 754 /* Release a bound SDEI event previously to an interrupt */ 755 static int sdei_interrupt_release(int ev_num) 756 { 757 int ret = 0; 758 sdei_ev_map_t *map; 759 sdei_entry_t *se; 760 761 /* Check if valid event number */ 762 map = find_event_map(ev_num); 763 if (map == NULL) 764 return SDEI_EINVAL; 765 766 if (!is_map_dynamic(map)) 767 return SDEI_EINVAL; 768 769 se = get_event_entry(map); 770 771 sdei_map_lock(map); 772 773 /* Event must have been unregistered before release */ 774 if (map->reg_count != 0) { 775 ret = SDEI_EDENY; 776 goto finish; 777 } 778 779 /* 780 * Interrupt release never causes the state to change. We only check 781 * whether it's permissible or not. 782 */ 783 if (!can_sdei_state_trans(se, DO_RELEASE)) { 784 ret = SDEI_EDENY; 785 goto finish; 786 } 787 788 if (is_map_bound(map)) { 789 /* 790 * Deny release if the interrupt is active, which means it's 791 * probably being acknowledged and handled elsewhere. 792 */ 793 if (plat_ic_get_interrupt_active(map->intr) != 0U) { 794 ret = SDEI_EDENY; 795 goto finish; 796 } 797 798 /* 799 * Interrupt programming and ownership transfer are already done 800 * during unregister. 801 */ 802 803 map->intr = SDEI_DYN_IRQ; 804 clr_map_bound(map); 805 } else { 806 SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map), 807 map->reg_count); 808 ret = SDEI_EINVAL; 809 } 810 811 finish: 812 sdei_map_unlock(map); 813 814 return ret; 815 } 816 817 /* Perform reset of private SDEI events */ 818 static int sdei_private_reset(void) 819 { 820 sdei_ev_map_t *map; 821 int ret = 0, final_ret = 0; 822 unsigned int i; 823 824 /* Unregister all private events */ 825 for_each_private_map(i, map) { 826 /* 827 * The unregister can fail if the event is not registered, which 828 * is allowed, and a deny will be returned. But if the event is 829 * running or unregister pending, the call fails. 830 */ 831 ret = sdei_event_unregister(map->ev_num); 832 if ((ret == SDEI_EPEND) && (final_ret == 0)) 833 final_ret = SDEI_EDENY; 834 } 835 836 return final_ret; 837 } 838 839 /* Perform reset of shared SDEI events */ 840 static int sdei_shared_reset(void) 841 { 842 const sdei_mapping_t *mapping; 843 sdei_ev_map_t *map; 844 int ret = 0, final_ret = 0; 845 unsigned int i, j; 846 847 /* Unregister all shared events */ 848 for_each_shared_map(i, map) { 849 /* 850 * The unregister can fail if the event is not registered, which 851 * is allowed, and a deny will be returned. But if the event is 852 * running or unregister pending, the call fails. 853 */ 854 ret = sdei_event_unregister(map->ev_num); 855 if ((ret == SDEI_EPEND) && (final_ret == 0)) 856 final_ret = SDEI_EDENY; 857 } 858 859 if (final_ret != 0) 860 return final_ret; 861 862 /* 863 * Loop through both private and shared mappings, and release all 864 * bindings. 865 */ 866 for_each_mapping_type(i, mapping) { 867 iterate_mapping(mapping, j, map) { 868 /* 869 * Release bindings for mappings that are dynamic and 870 * bound. 871 */ 872 if (is_map_dynamic(map) && is_map_bound(map)) { 873 /* 874 * Any failure to release would mean there is at 875 * least a PE registered for the event. 876 */ 877 ret = sdei_interrupt_release(map->ev_num); 878 if ((ret != 0) && (final_ret == 0)) 879 final_ret = ret; 880 } 881 } 882 } 883 884 return final_ret; 885 } 886 887 /* Send a signal to another SDEI client PE */ 888 static int sdei_signal(int ev_num, uint64_t target_pe) 889 { 890 sdei_ev_map_t *map; 891 892 /* Only event 0 can be signalled */ 893 if (ev_num != SDEI_EVENT_0) 894 return SDEI_EINVAL; 895 896 /* Find mapping for event 0 */ 897 map = find_event_map(SDEI_EVENT_0); 898 if (map == NULL) 899 return SDEI_EINVAL; 900 901 /* The event must be signalable */ 902 if (!is_event_signalable(map)) 903 return SDEI_EINVAL; 904 905 /* Validate target */ 906 if (plat_core_pos_by_mpidr(target_pe) < 0) 907 return SDEI_EINVAL; 908 909 /* Raise SGI. Platform will validate target_pe */ 910 plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe); 911 912 return 0; 913 } 914 915 /* Query SDEI dispatcher features */ 916 static uint64_t sdei_features(unsigned int feature) 917 { 918 if (feature == SDEI_FEATURE_BIND_SLOTS) { 919 return FEATURE_BIND_SLOTS(num_dyn_priv_slots, 920 num_dyn_shrd_slots); 921 } 922 923 return (uint64_t) SDEI_EINVAL; 924 } 925 926 /* SDEI top level handler for servicing SMCs */ 927 uint64_t sdei_smc_handler(uint32_t smc_fid, 928 uint64_t x1, 929 uint64_t x2, 930 uint64_t x3, 931 uint64_t x4, 932 void *cookie, 933 void *handle, 934 uint64_t flags) 935 { 936 937 uint64_t x5; 938 unsigned int ss = (unsigned int) get_interrupt_src_ss(flags); 939 int64_t ret; 940 bool resume = false; 941 cpu_context_t *ctx = handle; 942 int ev_num = (int) x1; 943 944 if (ss != NON_SECURE) 945 SMC_RET1(ctx, SMC_UNK); 946 947 /* Verify the caller EL */ 948 if (GET_EL(read_spsr_el3()) != sdei_client_el()) 949 SMC_RET1(ctx, SMC_UNK); 950 951 switch (smc_fid) { 952 case SDEI_VERSION: 953 SDEI_LOG("> VER\n"); 954 ret = (int64_t) sdei_version(); 955 SDEI_LOG("< VER:%llx\n", ret); 956 SMC_RET1(ctx, ret); 957 958 case SDEI_EVENT_REGISTER: 959 x5 = SMC_GET_GP(ctx, CTX_GPREG_X5); 960 SDEI_LOG("> REG(n:%d e:%llx a:%llx f:%x m:%llx)\n", ev_num, 961 x2, x3, (int) x4, x5); 962 ret = sdei_event_register(ev_num, x2, x3, x4, x5); 963 SDEI_LOG("< REG:%lld\n", ret); 964 SMC_RET1(ctx, ret); 965 966 case SDEI_EVENT_ENABLE: 967 SDEI_LOG("> ENABLE(n:%d)\n", (int) x1); 968 ret = sdei_event_enable(ev_num); 969 SDEI_LOG("< ENABLE:%lld\n", ret); 970 SMC_RET1(ctx, ret); 971 972 case SDEI_EVENT_DISABLE: 973 SDEI_LOG("> DISABLE(n:%d)\n", ev_num); 974 ret = sdei_event_disable(ev_num); 975 SDEI_LOG("< DISABLE:%lld\n", ret); 976 SMC_RET1(ctx, ret); 977 978 case SDEI_EVENT_CONTEXT: 979 SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1()); 980 ret = sdei_event_context(ctx, (unsigned int) x1); 981 SDEI_LOG("< CTX:%lld\n", ret); 982 SMC_RET1(ctx, ret); 983 984 case SDEI_EVENT_COMPLETE_AND_RESUME: 985 resume = true; 986 /* Fallthrough */ 987 988 case SDEI_EVENT_COMPLETE: 989 SDEI_LOG("> COMPLETE(r:%u sta/ep:%llx):%lx\n", 990 (unsigned int) resume, x1, read_mpidr_el1()); 991 ret = sdei_event_complete(resume, x1); 992 SDEI_LOG("< COMPLETE:%llx\n", ret); 993 994 /* 995 * Set error code only if the call failed. If the call 996 * succeeded, we discard the dispatched context, and restore the 997 * interrupted context to a pristine condition, and therefore 998 * shouldn't be modified. We don't return to the caller in this 999 * case anyway. 1000 */ 1001 if (ret != 0) 1002 SMC_RET1(ctx, ret); 1003 1004 SMC_RET0(ctx); 1005 1006 case SDEI_EVENT_STATUS: 1007 SDEI_LOG("> STAT(n:%d)\n", ev_num); 1008 ret = sdei_event_status(ev_num); 1009 SDEI_LOG("< STAT:%lld\n", ret); 1010 SMC_RET1(ctx, ret); 1011 1012 case SDEI_EVENT_GET_INFO: 1013 SDEI_LOG("> INFO(n:%d, %d)\n", ev_num, (int) x2); 1014 ret = sdei_event_get_info(ev_num, (int) x2); 1015 SDEI_LOG("< INFO:%lld\n", ret); 1016 SMC_RET1(ctx, ret); 1017 1018 case SDEI_EVENT_UNREGISTER: 1019 SDEI_LOG("> UNREG(n:%d)\n", ev_num); 1020 ret = sdei_event_unregister(ev_num); 1021 SDEI_LOG("< UNREG:%lld\n", ret); 1022 SMC_RET1(ctx, ret); 1023 1024 case SDEI_PE_UNMASK: 1025 SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1()); 1026 sdei_pe_unmask(); 1027 SDEI_LOG("< UNMASK:%d\n", 0); 1028 SMC_RET1(ctx, 0); 1029 1030 case SDEI_PE_MASK: 1031 SDEI_LOG("> MASK:%lx\n", read_mpidr_el1()); 1032 ret = sdei_pe_mask(); 1033 SDEI_LOG("< MASK:%lld\n", ret); 1034 SMC_RET1(ctx, ret); 1035 1036 case SDEI_INTERRUPT_BIND: 1037 SDEI_LOG("> BIND(%d)\n", (int) x1); 1038 ret = sdei_interrupt_bind((unsigned int) x1); 1039 SDEI_LOG("< BIND:%lld\n", ret); 1040 SMC_RET1(ctx, ret); 1041 1042 case SDEI_INTERRUPT_RELEASE: 1043 SDEI_LOG("> REL(%d)\n", ev_num); 1044 ret = sdei_interrupt_release(ev_num); 1045 SDEI_LOG("< REL:%lld\n", ret); 1046 SMC_RET1(ctx, ret); 1047 1048 case SDEI_SHARED_RESET: 1049 SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1()); 1050 ret = sdei_shared_reset(); 1051 SDEI_LOG("< S_RESET:%lld\n", ret); 1052 SMC_RET1(ctx, ret); 1053 1054 case SDEI_PRIVATE_RESET: 1055 SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1()); 1056 ret = sdei_private_reset(); 1057 SDEI_LOG("< P_RESET:%lld\n", ret); 1058 SMC_RET1(ctx, ret); 1059 1060 case SDEI_EVENT_ROUTING_SET: 1061 SDEI_LOG("> ROUTE_SET(n:%d f:%llx aff:%llx)\n", ev_num, x2, x3); 1062 ret = sdei_event_routing_set(ev_num, x2, x3); 1063 SDEI_LOG("< ROUTE_SET:%lld\n", ret); 1064 SMC_RET1(ctx, ret); 1065 1066 case SDEI_FEATURES: 1067 SDEI_LOG("> FTRS(f:%llx)\n", x1); 1068 ret = (int64_t) sdei_features((unsigned int) x1); 1069 SDEI_LOG("< FTRS:%llx\n", ret); 1070 SMC_RET1(ctx, ret); 1071 1072 case SDEI_EVENT_SIGNAL: 1073 SDEI_LOG("> SIGNAL(e:%d t:%llx)\n", ev_num, x2); 1074 ret = sdei_signal(ev_num, x2); 1075 SDEI_LOG("< SIGNAL:%lld\n", ret); 1076 SMC_RET1(ctx, ret); 1077 1078 default: 1079 /* Do nothing in default case */ 1080 break; 1081 } 1082 1083 WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid); 1084 SMC_RET1(ctx, SMC_UNK); 1085 } 1086 1087 /* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */ 1088 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init); 1089 1090 /* Subscribe to PSCI CPU suspend finisher for per-CPU configuration */ 1091 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, sdei_cpu_wakeup_init); 1092