1 /* 2 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <inttypes.h> 10 #include <stddef.h> 11 #include <stdint.h> 12 #include <string.h> 13 14 #include <bl31/bl31.h> 15 #include <bl31/ehf.h> 16 #include <bl31/interrupt_mgmt.h> 17 #include <common/bl_common.h> 18 #include <common/debug.h> 19 #include <common/runtime_svc.h> 20 #include <context.h> 21 #include <lib/cassert.h> 22 #include <lib/el3_runtime/pubsub.h> 23 #include <lib/utils.h> 24 #include <plat/common/platform.h> 25 #include <services/sdei.h> 26 27 #include "sdei_private.h" 28 29 #define MAJOR_VERSION 1ULL 30 #define MINOR_VERSION 0ULL 31 #define VENDOR_VERSION 0ULL 32 33 #define MAKE_SDEI_VERSION(_major, _minor, _vendor) \ 34 ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor)) 35 36 #define LOWEST_INTR_PRIORITY 0xff 37 38 #define is_valid_affinity(_mpidr) (plat_core_pos_by_mpidr(_mpidr) >= 0) 39 40 CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI, 41 sdei_critical_must_have_higher_priority); 42 43 static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots; 44 45 /* Initialise SDEI map entries */ 46 static void init_map(sdei_ev_map_t *map) 47 { 48 map->reg_count = 0; 49 } 50 51 /* Convert mapping to SDEI class */ 52 static sdei_class_t map_to_class(sdei_ev_map_t *map) 53 { 54 return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL; 55 } 56 57 /* Clear SDEI event entries except state */ 58 static void clear_event_entries(sdei_entry_t *se) 59 { 60 se->ep = 0; 61 se->arg = 0; 62 se->affinity = 0; 63 se->reg_flags = 0; 64 } 65 66 /* Perform CPU-specific state initialisation */ 67 static void *sdei_cpu_on_init(const void *arg) 68 { 69 unsigned int i; 70 sdei_ev_map_t *map; 71 sdei_entry_t *se; 72 73 /* Initialize private mappings on this CPU */ 74 for_each_private_map(i, map) { 75 se = get_event_entry(map); 76 clear_event_entries(se); 77 se->state = 0; 78 } 79 80 SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1()); 81 82 /* All PEs start with SDEI events masked */ 83 (void) sdei_pe_mask(); 84 85 return NULL; 86 } 87 88 /* CPU initialisation after wakeup from suspend */ 89 static void *sdei_cpu_wakeup_init(const void *arg) 90 { 91 SDEI_LOG("Events masked on %lx\n", read_mpidr_el1()); 92 93 /* All PEs wake up with SDEI events masked */ 94 sdei_pe_mask(); 95 96 return 0; 97 } 98 99 /* Initialise an SDEI class */ 100 static void sdei_class_init(sdei_class_t class) 101 { 102 unsigned int i; 103 bool zero_found __unused = false; 104 int ev_num_so_far __unused; 105 sdei_ev_map_t *map; 106 107 /* Sanity check and configuration of shared events */ 108 ev_num_so_far = -1; 109 for_each_shared_map(i, map) { 110 #if ENABLE_ASSERTIONS 111 /* Ensure mappings are sorted */ 112 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 113 114 ev_num_so_far = map->ev_num; 115 116 /* Event 0 must not be shared */ 117 assert(map->ev_num != SDEI_EVENT_0); 118 119 /* Check for valid event */ 120 assert(map->ev_num >= 0); 121 122 /* Make sure it's a shared event */ 123 assert(is_event_shared(map)); 124 125 /* No shared mapping should have signalable property */ 126 assert(!is_event_signalable(map)); 127 128 /* Shared mappings can't be explicit */ 129 assert(!is_map_explicit(map)); 130 #endif 131 132 /* Skip initializing the wrong priority */ 133 if (map_to_class(map) != class) 134 continue; 135 136 /* Platform events are always bound, so set the bound flag */ 137 if (is_map_dynamic(map)) { 138 assert(map->intr == SDEI_DYN_IRQ); 139 assert(is_event_normal(map)); 140 num_dyn_shrd_slots++; 141 } else { 142 /* Shared mappings must be bound to shared interrupt */ 143 assert(plat_ic_is_spi(map->intr) != 0); 144 set_map_bound(map); 145 } 146 147 init_map(map); 148 } 149 150 /* Sanity check and configuration of private events for this CPU */ 151 ev_num_so_far = -1; 152 for_each_private_map(i, map) { 153 #if ENABLE_ASSERTIONS 154 /* Ensure mappings are sorted */ 155 assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far)); 156 157 ev_num_so_far = map->ev_num; 158 159 if (map->ev_num == SDEI_EVENT_0) { 160 zero_found = true; 161 162 /* Event 0 must be a Secure SGI */ 163 assert(is_secure_sgi(map->intr)); 164 165 /* 166 * Event 0 can have only have signalable flag (apart 167 * from being private 168 */ 169 assert(map->map_flags == (SDEI_MAPF_SIGNALABLE | 170 SDEI_MAPF_PRIVATE)); 171 } else { 172 /* No other mapping should have signalable property */ 173 assert(!is_event_signalable(map)); 174 } 175 176 /* Check for valid event */ 177 assert(map->ev_num >= 0); 178 179 /* Make sure it's a private event */ 180 assert(is_event_private(map)); 181 182 /* 183 * Other than priority, explicit events can only have explicit 184 * and private flags set. 185 */ 186 if (is_map_explicit(map)) { 187 assert((map->map_flags | SDEI_MAPF_CRITICAL) == 188 (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE 189 | SDEI_MAPF_CRITICAL)); 190 } 191 #endif 192 193 /* Skip initializing the wrong priority */ 194 if (map_to_class(map) != class) 195 continue; 196 197 /* Platform events are always bound, so set the bound flag */ 198 if (map->ev_num != SDEI_EVENT_0) { 199 if (is_map_dynamic(map)) { 200 assert(map->intr == SDEI_DYN_IRQ); 201 assert(is_event_normal(map)); 202 num_dyn_priv_slots++; 203 } else if (is_map_explicit(map)) { 204 /* 205 * Explicit mappings don't have a backing 206 * SDEI interrupt, but verify that anyway. 207 */ 208 assert(map->intr == SDEI_DYN_IRQ); 209 } else { 210 /* 211 * Private mappings must be bound to private 212 * interrupt. 213 */ 214 assert(plat_ic_is_ppi((unsigned) map->intr) != 0); 215 set_map_bound(map); 216 } 217 } 218 219 init_map(map); 220 } 221 222 /* Ensure event 0 is in the mapping */ 223 assert(zero_found); 224 225 (void) sdei_cpu_on_init(NULL); 226 } 227 228 /* SDEI dispatcher initialisation */ 229 void sdei_init(void) 230 { 231 plat_sdei_setup(); 232 sdei_class_init(SDEI_CRITICAL); 233 sdei_class_init(SDEI_NORMAL); 234 235 /* Register priority level handlers */ 236 ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI, 237 sdei_intr_handler); 238 ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI, 239 sdei_intr_handler); 240 } 241 242 /* Populate SDEI event entry */ 243 static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg, 244 unsigned int flags, uint64_t affinity) 245 { 246 assert(se != NULL); 247 248 se->ep = ep; 249 se->arg = arg; 250 se->affinity = (affinity & MPIDR_AFFINITY_MASK); 251 se->reg_flags = flags; 252 } 253 254 static uint64_t sdei_version(void) 255 { 256 return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION); 257 } 258 259 /* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */ 260 static int validate_flags(uint64_t flags, uint64_t mpidr) 261 { 262 /* Validate flags */ 263 switch (flags) { 264 case SDEI_REGF_RM_PE: 265 if (!is_valid_affinity(mpidr)) 266 return SDEI_EINVAL; 267 break; 268 case SDEI_REGF_RM_ANY: 269 break; 270 default: 271 /* Unknown flags */ 272 return SDEI_EINVAL; 273 } 274 275 return 0; 276 } 277 278 /* Set routing of an SDEI event */ 279 static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr) 280 { 281 int ret; 282 unsigned int routing; 283 sdei_ev_map_t *map; 284 sdei_entry_t *se; 285 286 ret = validate_flags(flags, mpidr); 287 if (ret != 0) 288 return ret; 289 290 /* Check if valid event number */ 291 map = find_event_map(ev_num); 292 if (map == NULL) 293 return SDEI_EINVAL; 294 295 /* The event must not be private */ 296 if (is_event_private(map)) 297 return SDEI_EINVAL; 298 299 se = get_event_entry(map); 300 301 sdei_map_lock(map); 302 303 if (!is_map_bound(map) || is_event_private(map)) { 304 ret = SDEI_EINVAL; 305 goto finish; 306 } 307 308 if (!can_sdei_state_trans(se, DO_ROUTING)) { 309 ret = SDEI_EDENY; 310 goto finish; 311 } 312 313 /* Choose appropriate routing */ 314 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 315 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 316 317 /* Update event registration flag */ 318 se->reg_flags = (unsigned int) flags; 319 if (flags == SDEI_REGF_RM_PE) { 320 se->affinity = (mpidr & MPIDR_AFFINITY_MASK); 321 } 322 323 /* 324 * ROUTING_SET is permissible only when event composite state is 325 * 'registered, disabled, and not running'. This means that the 326 * interrupt is currently disabled, and not active. 327 */ 328 plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr); 329 330 finish: 331 sdei_map_unlock(map); 332 333 return ret; 334 } 335 336 /* Register handler and argument for an SDEI event */ 337 static int64_t sdei_event_register(int ev_num, 338 uint64_t ep, 339 uint64_t arg, 340 uint64_t flags, 341 uint64_t mpidr) 342 { 343 int ret; 344 unsigned int routing; 345 sdei_entry_t *se; 346 sdei_ev_map_t *map; 347 sdei_state_t backup_state; 348 349 if ((ep == 0U) || (plat_sdei_validate_entry_point( 350 ep, sdei_client_el()) != 0)) { 351 return SDEI_EINVAL; 352 } 353 354 ret = validate_flags(flags, mpidr); 355 if (ret != 0) 356 return ret; 357 358 /* Check if valid event number */ 359 map = find_event_map(ev_num); 360 if (map == NULL) 361 return SDEI_EINVAL; 362 363 /* Private events always target the PE */ 364 if (is_event_private(map)) 365 flags = SDEI_REGF_RM_PE; 366 367 se = get_event_entry(map); 368 369 /* 370 * Even though register operation is per-event (additionally for private 371 * events, registration is required individually), it has to be 372 * serialised with respect to bind/release, which are global operations. 373 * So we hold the lock throughout, unconditionally. 374 */ 375 sdei_map_lock(map); 376 377 backup_state = se->state; 378 if (!can_sdei_state_trans(se, DO_REGISTER)) 379 goto fallback; 380 381 /* 382 * When registering for dynamic events, make sure it's been bound 383 * already. This has to be the case as, without binding, the client 384 * can't know about the event number to register for. 385 */ 386 if (is_map_dynamic(map) && !is_map_bound(map)) 387 goto fallback; 388 389 if (is_event_private(map)) { 390 /* Multiple calls to register are possible for private events */ 391 assert(map->reg_count >= 0); 392 } else { 393 /* Only single call to register is possible for shared events */ 394 assert(map->reg_count == 0); 395 } 396 397 if (is_map_bound(map)) { 398 /* Meanwhile, did any PE ACK the interrupt? */ 399 if (plat_ic_get_interrupt_active(map->intr) != 0U) 400 goto fallback; 401 402 /* The interrupt must currently owned by Non-secure */ 403 if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS) 404 goto fallback; 405 406 /* 407 * Disable forwarding of new interrupt triggers to CPU 408 * interface. 409 */ 410 plat_ic_disable_interrupt(map->intr); 411 412 /* 413 * Any events that are triggered after register and before 414 * enable should remain pending. Clear any previous interrupt 415 * triggers which are pending (except for SGIs). This has no 416 * affect on level-triggered interrupts. 417 */ 418 if (ev_num != SDEI_EVENT_0) 419 plat_ic_clear_interrupt_pending(map->intr); 420 421 /* Map interrupt to EL3 and program the correct priority */ 422 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3); 423 424 /* Program the appropriate interrupt priority */ 425 plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map)); 426 427 /* 428 * Set the routing mode for shared event as requested. We 429 * already ensure that shared events get bound to SPIs. 430 */ 431 if (is_event_shared(map)) { 432 routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? 433 INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); 434 plat_ic_set_spi_routing(map->intr, routing, 435 (u_register_t) mpidr); 436 } 437 } 438 439 /* Populate event entries */ 440 set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr); 441 442 /* Increment register count */ 443 map->reg_count++; 444 445 sdei_map_unlock(map); 446 447 return 0; 448 449 fallback: 450 /* Reinstate previous state */ 451 se->state = backup_state; 452 453 sdei_map_unlock(map); 454 455 return SDEI_EDENY; 456 } 457 458 /* Enable SDEI event */ 459 static int64_t sdei_event_enable(int ev_num) 460 { 461 sdei_ev_map_t *map; 462 sdei_entry_t *se; 463 int ret; 464 bool before, after; 465 466 /* Check if valid event number */ 467 map = find_event_map(ev_num); 468 if (map == NULL) 469 return SDEI_EINVAL; 470 471 se = get_event_entry(map); 472 ret = SDEI_EDENY; 473 474 if (is_event_shared(map)) 475 sdei_map_lock(map); 476 477 before = GET_EV_STATE(se, ENABLED); 478 if (!can_sdei_state_trans(se, DO_ENABLE)) 479 goto finish; 480 after = GET_EV_STATE(se, ENABLED); 481 482 /* 483 * Enable interrupt for bound events only if there's a change in enabled 484 * state. 485 */ 486 if (is_map_bound(map) && (!before && after)) 487 plat_ic_enable_interrupt(map->intr); 488 489 ret = 0; 490 491 finish: 492 if (is_event_shared(map)) 493 sdei_map_unlock(map); 494 495 return ret; 496 } 497 498 /* Disable SDEI event */ 499 static int sdei_event_disable(int ev_num) 500 { 501 sdei_ev_map_t *map; 502 sdei_entry_t *se; 503 int ret; 504 bool before, after; 505 506 /* Check if valid event number */ 507 map = find_event_map(ev_num); 508 if (map == NULL) 509 return SDEI_EINVAL; 510 511 se = get_event_entry(map); 512 ret = SDEI_EDENY; 513 514 if (is_event_shared(map)) 515 sdei_map_lock(map); 516 517 before = GET_EV_STATE(se, ENABLED); 518 if (!can_sdei_state_trans(se, DO_DISABLE)) 519 goto finish; 520 after = GET_EV_STATE(se, ENABLED); 521 522 /* 523 * Disable interrupt for bound events only if there's a change in 524 * enabled state. 525 */ 526 if (is_map_bound(map) && (before && !after)) 527 plat_ic_disable_interrupt(map->intr); 528 529 ret = 0; 530 531 finish: 532 if (is_event_shared(map)) 533 sdei_map_unlock(map); 534 535 return ret; 536 } 537 538 /* Query SDEI event information */ 539 static int64_t sdei_event_get_info(int ev_num, int info) 540 { 541 sdei_entry_t *se; 542 sdei_ev_map_t *map; 543 544 uint64_t flags; 545 bool registered; 546 uint64_t affinity; 547 548 /* Check if valid event number */ 549 map = find_event_map(ev_num); 550 if (map == NULL) 551 return SDEI_EINVAL; 552 553 se = get_event_entry(map); 554 555 if (is_event_shared(map)) 556 sdei_map_lock(map); 557 558 /* Sample state under lock */ 559 registered = GET_EV_STATE(se, REGISTERED); 560 flags = se->reg_flags; 561 affinity = se->affinity; 562 563 if (is_event_shared(map)) 564 sdei_map_unlock(map); 565 566 switch (info) { 567 case SDEI_INFO_EV_TYPE: 568 return is_event_shared(map); 569 570 case SDEI_INFO_EV_NOT_SIGNALED: 571 return !is_event_signalable(map); 572 573 case SDEI_INFO_EV_PRIORITY: 574 return is_event_critical(map); 575 576 case SDEI_INFO_EV_ROUTING_MODE: 577 if (!is_event_shared(map)) 578 return SDEI_EINVAL; 579 if (!registered) 580 return SDEI_EDENY; 581 return (flags == SDEI_REGF_RM_PE); 582 583 case SDEI_INFO_EV_ROUTING_AFF: 584 if (!is_event_shared(map)) 585 return SDEI_EINVAL; 586 if (!registered) 587 return SDEI_EDENY; 588 if (flags != SDEI_REGF_RM_PE) 589 return SDEI_EINVAL; 590 return affinity; 591 592 default: 593 return SDEI_EINVAL; 594 } 595 } 596 597 /* Unregister an SDEI event */ 598 static int sdei_event_unregister(int ev_num) 599 { 600 int ret = 0; 601 sdei_entry_t *se; 602 sdei_ev_map_t *map; 603 604 /* Check if valid event number */ 605 map = find_event_map(ev_num); 606 if (map == NULL) 607 return SDEI_EINVAL; 608 609 se = get_event_entry(map); 610 611 /* 612 * Even though unregister operation is per-event (additionally for 613 * private events, unregistration is required individually), it has to 614 * be serialised with respect to bind/release, which are global 615 * operations. So we hold the lock throughout, unconditionally. 616 */ 617 sdei_map_lock(map); 618 619 if (!can_sdei_state_trans(se, DO_UNREGISTER)) { 620 /* 621 * Even if the call is invalid, and the handler is running (for 622 * example, having unregistered from a running handler earlier), 623 * return pending error code; otherwise, return deny. 624 */ 625 ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY; 626 627 goto finish; 628 } 629 630 map->reg_count--; 631 if (is_event_private(map)) { 632 /* Multiple calls to register are possible for private events */ 633 assert(map->reg_count >= 0); 634 } else { 635 /* Only single call to register is possible for shared events */ 636 assert(map->reg_count == 0); 637 } 638 639 if (is_map_bound(map)) { 640 plat_ic_disable_interrupt(map->intr); 641 642 /* 643 * Clear pending interrupt. Skip for SGIs as they may not be 644 * cleared on interrupt controllers. 645 */ 646 if (ev_num != SDEI_EVENT_0) 647 plat_ic_clear_interrupt_pending(map->intr); 648 649 assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3); 650 plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS); 651 plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY); 652 } 653 654 clear_event_entries(se); 655 656 /* 657 * If the handler is running at the time of unregister, return the 658 * pending error code. 659 */ 660 if (GET_EV_STATE(se, RUNNING)) 661 ret = SDEI_EPEND; 662 663 finish: 664 sdei_map_unlock(map); 665 666 return ret; 667 } 668 669 /* Query status of an SDEI event */ 670 static int sdei_event_status(int ev_num) 671 { 672 sdei_ev_map_t *map; 673 sdei_entry_t *se; 674 sdei_state_t state; 675 676 /* Check if valid event number */ 677 map = find_event_map(ev_num); 678 if (map == NULL) 679 return SDEI_EINVAL; 680 681 se = get_event_entry(map); 682 683 if (is_event_shared(map)) 684 sdei_map_lock(map); 685 686 /* State value directly maps to the expected return format */ 687 state = se->state; 688 689 if (is_event_shared(map)) 690 sdei_map_unlock(map); 691 692 return (int) state; 693 } 694 695 /* Bind an SDEI event to an interrupt */ 696 static int sdei_interrupt_bind(unsigned int intr_num) 697 { 698 sdei_ev_map_t *map; 699 bool retry = true, shared_mapping; 700 701 /* SGIs are not allowed to be bound */ 702 if (plat_ic_is_sgi(intr_num) != 0) 703 return SDEI_EINVAL; 704 705 shared_mapping = (plat_ic_is_spi(intr_num) != 0); 706 do { 707 /* 708 * Bail out if there is already an event for this interrupt, 709 * either platform-defined or dynamic. 710 */ 711 map = find_event_map_by_intr(intr_num, shared_mapping); 712 if (map != NULL) { 713 if (is_map_dynamic(map)) { 714 if (is_map_bound(map)) { 715 /* 716 * Dynamic event, already bound. Return 717 * event number. 718 */ 719 return map->ev_num; 720 } 721 } else { 722 /* Binding non-dynamic event */ 723 return SDEI_EINVAL; 724 } 725 } 726 727 /* 728 * The interrupt is not bound yet. Try to find a free slot to 729 * bind it. Free dynamic mappings have their interrupt set as 730 * SDEI_DYN_IRQ. 731 */ 732 map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping); 733 if (map == NULL) 734 return SDEI_ENOMEM; 735 736 /* The returned mapping must be dynamic */ 737 assert(is_map_dynamic(map)); 738 739 /* 740 * We cannot assert for bound maps here, as we might be racing 741 * with another bind. 742 */ 743 744 /* The requested interrupt must already belong to NS */ 745 if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS) 746 return SDEI_EDENY; 747 748 /* 749 * Interrupt programming and ownership transfer are deferred 750 * until register. 751 */ 752 753 sdei_map_lock(map); 754 if (!is_map_bound(map)) { 755 map->intr = intr_num; 756 set_map_bound(map); 757 retry = false; 758 } 759 sdei_map_unlock(map); 760 } while (retry); 761 762 return map->ev_num; 763 } 764 765 /* Release a bound SDEI event previously to an interrupt */ 766 static int sdei_interrupt_release(int ev_num) 767 { 768 int ret = 0; 769 sdei_ev_map_t *map; 770 sdei_entry_t *se; 771 772 /* Check if valid event number */ 773 map = find_event_map(ev_num); 774 if (map == NULL) 775 return SDEI_EINVAL; 776 777 if (!is_map_dynamic(map)) 778 return SDEI_EINVAL; 779 780 se = get_event_entry(map); 781 782 sdei_map_lock(map); 783 784 /* Event must have been unregistered before release */ 785 if (map->reg_count != 0) { 786 ret = SDEI_EDENY; 787 goto finish; 788 } 789 790 /* 791 * Interrupt release never causes the state to change. We only check 792 * whether it's permissible or not. 793 */ 794 if (!can_sdei_state_trans(se, DO_RELEASE)) { 795 ret = SDEI_EDENY; 796 goto finish; 797 } 798 799 if (is_map_bound(map)) { 800 /* 801 * Deny release if the interrupt is active, which means it's 802 * probably being acknowledged and handled elsewhere. 803 */ 804 if (plat_ic_get_interrupt_active(map->intr) != 0U) { 805 ret = SDEI_EDENY; 806 goto finish; 807 } 808 809 /* 810 * Interrupt programming and ownership transfer are already done 811 * during unregister. 812 */ 813 814 map->intr = SDEI_DYN_IRQ; 815 clr_map_bound(map); 816 } else { 817 SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map), 818 map->reg_count); 819 ret = SDEI_EINVAL; 820 } 821 822 finish: 823 sdei_map_unlock(map); 824 825 return ret; 826 } 827 828 /* Perform reset of private SDEI events */ 829 static int sdei_private_reset(void) 830 { 831 sdei_ev_map_t *map; 832 int ret = 0, final_ret = 0; 833 unsigned int i; 834 835 /* Unregister all private events */ 836 for_each_private_map(i, map) { 837 /* 838 * The unregister can fail if the event is not registered, which 839 * is allowed, and a deny will be returned. But if the event is 840 * running or unregister pending, the call fails. 841 */ 842 ret = sdei_event_unregister(map->ev_num); 843 if ((ret == SDEI_EPEND) && (final_ret == 0)) 844 final_ret = SDEI_EDENY; 845 } 846 847 return final_ret; 848 } 849 850 /* Perform reset of shared SDEI events */ 851 static int sdei_shared_reset(void) 852 { 853 const sdei_mapping_t *mapping; 854 sdei_ev_map_t *map; 855 int ret = 0, final_ret = 0; 856 unsigned int i, j; 857 858 /* Unregister all shared events */ 859 for_each_shared_map(i, map) { 860 /* 861 * The unregister can fail if the event is not registered, which 862 * is allowed, and a deny will be returned. But if the event is 863 * running or unregister pending, the call fails. 864 */ 865 ret = sdei_event_unregister(map->ev_num); 866 if ((ret == SDEI_EPEND) && (final_ret == 0)) 867 final_ret = SDEI_EDENY; 868 } 869 870 if (final_ret != 0) 871 return final_ret; 872 873 /* 874 * Loop through both private and shared mappings, and release all 875 * bindings. 876 */ 877 for_each_mapping_type(i, mapping) { 878 iterate_mapping(mapping, j, map) { 879 /* 880 * Release bindings for mappings that are dynamic and 881 * bound. 882 */ 883 if (is_map_dynamic(map) && is_map_bound(map)) { 884 /* 885 * Any failure to release would mean there is at 886 * least a PE registered for the event. 887 */ 888 ret = sdei_interrupt_release(map->ev_num); 889 if ((ret != 0) && (final_ret == 0)) 890 final_ret = ret; 891 } 892 } 893 } 894 895 return final_ret; 896 } 897 898 /* Send a signal to another SDEI client PE */ 899 static int sdei_signal(int ev_num, uint64_t target_pe) 900 { 901 sdei_ev_map_t *map; 902 903 /* Only event 0 can be signalled */ 904 if (ev_num != SDEI_EVENT_0) 905 return SDEI_EINVAL; 906 907 /* Find mapping for event 0 */ 908 map = find_event_map(SDEI_EVENT_0); 909 if (map == NULL) 910 return SDEI_EINVAL; 911 912 /* The event must be signalable */ 913 if (!is_event_signalable(map)) 914 return SDEI_EINVAL; 915 916 /* Validate target */ 917 if (plat_core_pos_by_mpidr(target_pe) < 0) 918 return SDEI_EINVAL; 919 920 /* Raise SGI. Platform will validate target_pe */ 921 plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe); 922 923 return 0; 924 } 925 926 /* Query SDEI dispatcher features */ 927 static uint64_t sdei_features(unsigned int feature) 928 { 929 if (feature == SDEI_FEATURE_BIND_SLOTS) { 930 return FEATURE_BIND_SLOTS(num_dyn_priv_slots, 931 num_dyn_shrd_slots); 932 } 933 934 return (uint64_t) SDEI_EINVAL; 935 } 936 937 /* SDEI top level handler for servicing SMCs */ 938 uint64_t sdei_smc_handler(uint32_t smc_fid, 939 uint64_t x1, 940 uint64_t x2, 941 uint64_t x3, 942 uint64_t x4, 943 void *cookie, 944 void *handle, 945 uint64_t flags) 946 { 947 948 uint64_t x5; 949 unsigned int ss = (unsigned int) get_interrupt_src_ss(flags); 950 int64_t ret; 951 bool resume = false; 952 cpu_context_t *ctx = handle; 953 int ev_num = (int) x1; 954 955 if (ss != NON_SECURE) 956 SMC_RET1(ctx, SMC_UNK); 957 958 /* Verify the caller EL */ 959 if (GET_EL(read_spsr_el3()) != sdei_client_el()) 960 SMC_RET1(ctx, SMC_UNK); 961 962 switch (smc_fid) { 963 case SDEI_VERSION: 964 SDEI_LOG("> VER\n"); 965 ret = (int64_t) sdei_version(); 966 SDEI_LOG("< VER:%" PRIx64 "\n", ret); 967 SMC_RET1(ctx, ret); 968 969 case SDEI_EVENT_REGISTER: 970 x5 = SMC_GET_GP(ctx, CTX_GPREG_X5); 971 SDEI_LOG("> REG(n:%d e:%" PRIx64 " a:%" PRIx64 " f:%x m:%" PRIx64 "\n", ev_num, 972 x2, x3, (int) x4, x5); 973 ret = sdei_event_register(ev_num, x2, x3, x4, x5); 974 SDEI_LOG("< REG:%" PRId64 "\n", ret); 975 SMC_RET1(ctx, ret); 976 977 case SDEI_EVENT_ENABLE: 978 SDEI_LOG("> ENABLE(n:%d)\n", (int) x1); 979 ret = sdei_event_enable(ev_num); 980 SDEI_LOG("< ENABLE:%" PRId64 "\n", ret); 981 SMC_RET1(ctx, ret); 982 983 case SDEI_EVENT_DISABLE: 984 SDEI_LOG("> DISABLE(n:0x%x)\n", ev_num); 985 ret = sdei_event_disable(ev_num); 986 SDEI_LOG("< DISABLE:%" PRId64 "\n", ret); 987 SMC_RET1(ctx, ret); 988 989 case SDEI_EVENT_CONTEXT: 990 SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1()); 991 ret = sdei_event_context(ctx, (unsigned int) x1); 992 SDEI_LOG("< CTX:%" PRId64 "\n", ret); 993 SMC_RET1(ctx, ret); 994 995 case SDEI_EVENT_COMPLETE_AND_RESUME: 996 resume = true; 997 /* Fallthrough */ 998 999 case SDEI_EVENT_COMPLETE: 1000 SDEI_LOG("> COMPLETE(r:%u sta/ep:%" PRIx64 "):%lx\n", 1001 (unsigned int) resume, x1, read_mpidr_el1()); 1002 ret = sdei_event_complete(resume, x1); 1003 SDEI_LOG("< COMPLETE:%" PRIx64 "\n", ret); 1004 1005 /* 1006 * Set error code only if the call failed. If the call 1007 * succeeded, we discard the dispatched context, and restore the 1008 * interrupted context to a pristine condition, and therefore 1009 * shouldn't be modified. We don't return to the caller in this 1010 * case anyway. 1011 */ 1012 if (ret != 0) 1013 SMC_RET1(ctx, ret); 1014 1015 SMC_RET0(ctx); 1016 1017 case SDEI_EVENT_STATUS: 1018 SDEI_LOG("> STAT(n:0x%x)\n", ev_num); 1019 ret = sdei_event_status(ev_num); 1020 SDEI_LOG("< STAT:%" PRId64 "\n", ret); 1021 SMC_RET1(ctx, ret); 1022 1023 case SDEI_EVENT_GET_INFO: 1024 SDEI_LOG("> INFO(n:0x%x, %d)\n", ev_num, (int) x2); 1025 ret = sdei_event_get_info(ev_num, (int) x2); 1026 SDEI_LOG("< INFO:%" PRId64 "\n", ret); 1027 SMC_RET1(ctx, ret); 1028 1029 case SDEI_EVENT_UNREGISTER: 1030 SDEI_LOG("> UNREG(n:0x%x)\n", ev_num); 1031 ret = sdei_event_unregister(ev_num); 1032 SDEI_LOG("< UNREG:%" PRId64 "\n", ret); 1033 SMC_RET1(ctx, ret); 1034 1035 case SDEI_PE_UNMASK: 1036 SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1()); 1037 sdei_pe_unmask(); 1038 SDEI_LOG("< UNMASK:%d\n", 0); 1039 SMC_RET1(ctx, 0); 1040 1041 case SDEI_PE_MASK: 1042 SDEI_LOG("> MASK:%lx\n", read_mpidr_el1()); 1043 ret = sdei_pe_mask(); 1044 SDEI_LOG("< MASK:%" PRId64 "\n", ret); 1045 SMC_RET1(ctx, ret); 1046 1047 case SDEI_INTERRUPT_BIND: 1048 SDEI_LOG("> BIND(%d)\n", (int) x1); 1049 ret = sdei_interrupt_bind((unsigned int) x1); 1050 SDEI_LOG("< BIND:%" PRId64 "\n", ret); 1051 SMC_RET1(ctx, ret); 1052 1053 case SDEI_INTERRUPT_RELEASE: 1054 SDEI_LOG("> REL(0x%x)\n", ev_num); 1055 ret = sdei_interrupt_release(ev_num); 1056 SDEI_LOG("< REL:%" PRId64 "\n", ret); 1057 SMC_RET1(ctx, ret); 1058 1059 case SDEI_SHARED_RESET: 1060 SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1()); 1061 ret = sdei_shared_reset(); 1062 SDEI_LOG("< S_RESET:%" PRId64 "\n", ret); 1063 SMC_RET1(ctx, ret); 1064 1065 case SDEI_PRIVATE_RESET: 1066 SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1()); 1067 ret = sdei_private_reset(); 1068 SDEI_LOG("< P_RESET:%" PRId64 "\n", ret); 1069 SMC_RET1(ctx, ret); 1070 1071 case SDEI_EVENT_ROUTING_SET: 1072 SDEI_LOG("> ROUTE_SET(n:%d f:%" PRIx64 " aff:%" PRIx64 ")\n", ev_num, x2, x3); 1073 ret = sdei_event_routing_set(ev_num, x2, x3); 1074 SDEI_LOG("< ROUTE_SET:%" PRId64 "\n", ret); 1075 SMC_RET1(ctx, ret); 1076 1077 case SDEI_FEATURES: 1078 SDEI_LOG("> FTRS(f:%" PRIx64 ")\n", x1); 1079 ret = (int64_t) sdei_features((unsigned int) x1); 1080 SDEI_LOG("< FTRS:%" PRIx64 "\n", ret); 1081 SMC_RET1(ctx, ret); 1082 1083 case SDEI_EVENT_SIGNAL: 1084 SDEI_LOG("> SIGNAL(e:%d t:%" PRIx64 ")\n", ev_num, x2); 1085 ret = sdei_signal(ev_num, x2); 1086 SDEI_LOG("< SIGNAL:%" PRId64 "\n", ret); 1087 SMC_RET1(ctx, ret); 1088 1089 default: 1090 /* Do nothing in default case */ 1091 break; 1092 } 1093 1094 WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid); 1095 SMC_RET1(ctx, SMC_UNK); 1096 } 1097 1098 /* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */ 1099 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init); 1100 1101 /* Subscribe to PSCI CPU suspend finisher for per-CPU configuration */ 1102 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, sdei_cpu_wakeup_init); 1103