1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2017, 2023-2024 Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <compiler.h> 10 #include <config.h> 11 #include <drivers/gic.h> 12 #include <dt-bindings/interrupt-controller/arm-gic.h> 13 #include <initcall.h> 14 #include <io.h> 15 #include <keep.h> 16 #include <kernel/dt.h> 17 #include <kernel/dt_driver.h> 18 #include <kernel/interrupt.h> 19 #include <kernel/misc.h> 20 #include <kernel/mutex.h> 21 #include <kernel/panic.h> 22 #include <libfdt.h> 23 #include <mm/core_memprot.h> 24 #include <mm/core_mmu.h> 25 #include <trace.h> 26 #include <util.h> 27 28 /* Offsets from gic.gicc_base */ 29 #define GICC_CTLR (0x000) 30 #define GICC_PMR (0x004) 31 #define GICC_IAR (0x00C) 32 #define GICC_EOIR (0x010) 33 34 #define GICC_CTLR_ENABLEGRP0 (1 << 0) 35 #define GICC_CTLR_ENABLEGRP1 (1 << 1) 36 #define GICC_CTLR_FIQEN (1 << 3) 37 38 /* Offsets from gic.gicd_base */ 39 #define GICD_CTLR (0x000) 40 #define GICD_TYPER (0x004) 41 #define GICD_IGROUPR(n) (0x080 + (n) * 4) 42 #define GICD_ISENABLER(n) (0x100 + (n) * 4) 43 #define GICD_ICENABLER(n) (0x180 + (n) * 4) 44 #define GICD_ISPENDR(n) (0x200 + (n) * 4) 45 #define GICD_ICPENDR(n) (0x280 + (n) * 4) 46 #define GICD_IPRIORITYR(n) (0x400 + (n) * 4) 47 #define GICD_ITARGETSR(n) (0x800 + (n) * 4) 48 #define GICD_ICFGR(n) (0xc00 + (n) * 4) 49 #define GICD_IGROUPMODR(n) (0xd00 + (n) * 4) 50 #define GICD_SGIR (0xF00) 51 52 #ifdef CFG_ARM_GICV3 53 #define GICD_PIDR2 (0xFFE8) 54 #else 55 /* Called ICPIDR2 in GICv2 specification */ 56 #define GICD_PIDR2 (0xFE8) 57 #endif 58 59 #define GICD_CTLR_ENABLEGRP0 BIT32(0) 60 #define GICD_CTLR_ENABLEGRP1NS BIT32(1) 61 #define GICD_CTLR_ENABLEGRP1S BIT32(2) 62 #define GICD_CTLR_ARE_S BIT32(4) 63 #define GICD_CTLR_ARE_NS BIT32(5) 64 65 /* Offsets from gic.gicr_base[core_pos] */ 66 #define GICR_V3_PCPUBASE_SIZE (2 * 64 * 1024) 67 #define GICR_SGI_BASE_OFFSET (64 * 1024) 68 #define GICR_CTLR (0x00) 69 #define GICR_TYPER (0x08) 70 71 #define GICR_IGROUPR0 (GICR_SGI_BASE_OFFSET + 0x080) 72 #define GICR_IGRPMODR0 (GICR_SGI_BASE_OFFSET + 0xD00) 73 #define GICR_ICENABLER0 (GICR_SGI_BASE_OFFSET + 0x180) 74 #define GICR_ICPENDR0 (GICR_SGI_BASE_OFFSET + 0x280) 75 #define GICR_ISENABLER0 (GICR_SGI_BASE_OFFSET + 0x100) 76 #define GICR_ICFGR0 (GICR_SGI_BASE_OFFSET + 0xC00) 77 #define GICR_ICFGR1 (GICR_SGI_BASE_OFFSET + 0xC04) 78 #define GICR_IPRIORITYR(n) (GICR_SGI_BASE_OFFSET + 0x400 + (n) * 4) 79 80 #define GICR_CTLR_RWP BIT32(3) 81 82 #define GICR_TYPER_LAST BIT64(4) 83 #define GICR_TYPER_AFF3_SHIFT 56 84 #define GICR_TYPER_AFF2_SHIFT 48 85 #define GICR_TYPER_AFF1_SHIFT 40 86 #define GICR_TYPER_AFF0_SHIFT 32 87 88 /* GICD IDR2 name differs on GICv3 and GICv2 but uses same bit map */ 89 #define GICD_PIDR2_ARCHREV_SHIFT 4 90 #define GICD_PIDR2_ARCHREV_MASK 0xF 91 92 /* Number of Private Peripheral Interrupt */ 93 #define NUM_PPI 32 94 95 /* Number of Software Generated Interrupt */ 96 #define NUM_SGI 16 97 98 /* Number of Non-secure Software Generated Interrupt */ 99 #define NUM_NS_SGI 8 100 101 /* Number of interrupts in one register */ 102 #define NUM_INTS_PER_REG 32 103 104 /* Number of targets in one register */ 105 #define NUM_TARGETS_PER_REG 4 106 107 /* Accessors to access ITARGETSRn */ 108 #define ITARGETSR_FIELD_BITS 8 109 #define ITARGETSR_FIELD_MASK 0xff 110 111 #define GICD_TYPER_IT_LINES_NUM_MASK 0x1f 112 #define GICC_IAR_IT_ID_MASK 0x3ff 113 #define GICC_IAR_CPU_ID_MASK 0x7 114 #define GICC_IAR_CPU_ID_SHIFT 10 115 116 #define GICC_SGI_IRM_BIT 40 117 #define GICC_SGI_AFF1_SHIFT 16 118 #define GICC_SGI_AFF2_SHIFT 32 119 #define GICC_SGI_AFF3_SHIFT 48 120 121 #define GICD_SGIR_SIGINTID_MASK 0xf 122 #define GICD_SGIR_TO_OTHER_CPUS 0x1 123 #define GICD_SGIR_TO_THIS_CPU 0x2 124 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT 24 125 #define GICD_SGIR_NSATT_SHIFT 15 126 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT 16 127 128 /* GICD ICFGR bit fields */ 129 #define GICD_ICFGR_TYPE_EDGE 2 130 #define GICD_ICFGR_TYPE_LEVEL 0 131 #define GICD_ICFGR_FIELD_BITS 2 132 #define GICD_ICFGR_FIELD_MASK 0x3 133 #define GICD_ICFGR_NUM_INTS_PER_REG (NUM_INTS_PER_REG / \ 134 GICD_ICFGR_FIELD_BITS) 135 136 struct gic_data { 137 vaddr_t gicc_base; 138 vaddr_t gicd_base; 139 #if defined(CFG_ARM_GICV3) 140 vaddr_t gicr_base[CFG_TEE_CORE_NB_CORE]; 141 #endif 142 size_t max_it; 143 uint32_t per_cpu_group_status; 144 uint32_t per_cpu_group_modifier; 145 uint32_t per_cpu_enable; 146 struct itr_chip chip; 147 }; 148 149 static bool gic_primary_done __nex_bss; 150 static struct gic_data gic_data __nex_bss; 151 static struct mutex gic_mutex = MUTEX_INITIALIZER; 152 153 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type, 154 uint32_t prio); 155 static void gic_op_enable(struct itr_chip *chip, size_t it); 156 static void gic_op_disable(struct itr_chip *chip, size_t it); 157 static void gic_op_raise_pi(struct itr_chip *chip, size_t it); 158 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it, 159 uint32_t cpu_mask); 160 static void gic_op_set_affinity(struct itr_chip *chip, size_t it, 161 uint8_t cpu_mask); 162 163 static const struct itr_ops gic_ops = { 164 .add = gic_op_add, 165 .mask = gic_op_disable, 166 .unmask = gic_op_enable, 167 .enable = gic_op_enable, 168 .disable = gic_op_disable, 169 .raise_pi = gic_op_raise_pi, 170 .raise_sgi = gic_op_raise_sgi, 171 .set_affinity = gic_op_set_affinity, 172 }; 173 DECLARE_KEEP_PAGER(gic_ops); 174 175 static vaddr_t __maybe_unused get_gicr_base(struct gic_data *gd __maybe_unused) 176 { 177 #if defined(CFG_ARM_GICV3) 178 return gd->gicr_base[get_core_pos()]; 179 #else 180 return 0; 181 #endif 182 } 183 184 static bool affinity_routing_is_enabled(struct gic_data *gd) 185 { 186 return IS_ENABLED(CFG_ARM_GICV3) && 187 io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S; 188 } 189 190 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base) 191 { 192 int i; 193 uint32_t old_ctlr; 194 size_t ret = 0; 195 size_t max_regs = io_read32(gicd_base + GICD_TYPER) & 196 GICD_TYPER_IT_LINES_NUM_MASK; 197 198 /* 199 * Probe which interrupt number is the largest. 200 */ 201 #if defined(CFG_ARM_GICV3) 202 old_ctlr = read_icc_ctlr(); 203 write_icc_ctlr(0); 204 #else 205 old_ctlr = io_read32(gicc_base + GICC_CTLR); 206 io_write32(gicc_base + GICC_CTLR, 0); 207 #endif 208 for (i = max_regs; i >= 0; i--) { 209 uint32_t old_reg; 210 uint32_t reg; 211 int b; 212 213 old_reg = io_read32(gicd_base + GICD_ISENABLER(i)); 214 io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff); 215 reg = io_read32(gicd_base + GICD_ISENABLER(i)); 216 io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg); 217 for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) { 218 if (BIT32(b) & reg) { 219 ret = i * NUM_INTS_PER_REG + b; 220 goto out; 221 } 222 } 223 } 224 out: 225 #if defined(CFG_ARM_GICV3) 226 write_icc_ctlr(old_ctlr); 227 #else 228 io_write32(gicc_base + GICC_CTLR, old_ctlr); 229 #endif 230 return ret; 231 } 232 233 static void gicr_wait_for_pending_write(vaddr_t gicr_base) 234 { 235 /* 236 * Wait for changes to 237 * - GICR_ICENABLER0 238 * - GICR_CTLR.DPG1S 239 * - GICR_CTLR.DPG1NS 240 * - GICR_CTLR.DPG0 241 * to be visible to all agents in the system. 242 */ 243 while (io_read32(gicr_base + GICR_CTLR) & GICR_CTLR_RWP) 244 ; 245 } 246 247 static void gicv3_sync_redist_config(struct gic_data *gd) 248 { 249 vaddr_t gicr_base = get_gicr_base(gd); 250 bool need_sync = false; 251 uint32_t gmod0 = 0; 252 uint32_t grp0 = 0; 253 size_t n = 0; 254 255 /* 256 * If gicr_base isn't available there's no need to synchronize SGI 257 * configuration since gic_init_donate_sgi_to_ns() would panic. 258 */ 259 if (!gicr_base) 260 return; 261 262 grp0 = io_read32(gicr_base + GICR_IGROUPR0); 263 gmod0 = io_read32(gicr_base + GICR_IGRPMODR0); 264 for (n = GIC_SGI_SEC_BASE; n < GIC_SPI_BASE; n++) { 265 /* Ignore matching bits */ 266 if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)) && 267 !(BIT32(n) & (gmod0 ^ gd->per_cpu_group_modifier))) 268 continue; 269 /* 270 * SGI/PPI-n differs from primary CPU configuration, 271 * let's sync up. 272 */ 273 need_sync = true; 274 275 /* Disable interrupt */ 276 io_write32(gicr_base + GICR_ICENABLER0, BIT32(n)); 277 278 /* Wait for the write to GICR_ICENABLER0 to propagate */ 279 gicr_wait_for_pending_write(gicr_base); 280 281 /* Make interrupt non-pending */ 282 io_write32(gicr_base + GICR_ICPENDR0, BIT32(n)); 283 284 if (BIT32(n) & gd->per_cpu_group_status) 285 grp0 |= BIT32(n); 286 else 287 grp0 &= ~BIT32(n); 288 if (BIT32(n) & gd->per_cpu_group_modifier) 289 gmod0 |= BIT32(n); 290 else 291 gmod0 &= ~BIT32(n); 292 } 293 294 if (need_sync) { 295 io_write32(gicr_base + GICR_IGROUPR0, grp0); 296 io_write32(gicr_base + GICR_IGRPMODR0, gmod0); 297 io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable); 298 } 299 } 300 301 static void gic_legacy_sync_dist_config(struct gic_data *gd) 302 { 303 bool need_sync = false; 304 uint32_t grp0 = 0; 305 size_t n = 0; 306 307 grp0 = io_read32(gd->gicd_base + GICD_IGROUPR(0)); 308 for (n = GIC_SGI_SEC_BASE; n < GIC_SPI_BASE; n++) { 309 /* Ignore matching bits */ 310 if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status))) 311 continue; 312 /* 313 * SGI/PPI-n differs from primary CPU configuration, 314 * let's sync up. 315 */ 316 need_sync = true; 317 318 /* Disable interrupt */ 319 io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(n)); 320 321 /* Make interrupt non-pending */ 322 io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(n)); 323 324 if (BIT32(n) & gd->per_cpu_group_status) 325 grp0 |= BIT32(n); 326 else 327 grp0 &= ~BIT32(n); 328 } 329 330 if (need_sync) { 331 io_write32(gd->gicd_base + GICD_IGROUPR(0), grp0); 332 io_write32(gd->gicd_base + GICD_ISENABLER(0), 333 gd->per_cpu_enable); 334 } 335 } 336 337 static void init_gic_per_cpu(struct gic_data *gd) 338 { 339 io_write32(gd->gicd_base + GICD_IGROUPR(0), gd->per_cpu_group_status); 340 341 /* 342 * Set the priority mask to permit Non-secure interrupts, and to 343 * allow the Non-secure world to adjust the priority mask itself 344 */ 345 #if defined(CFG_ARM_GICV3) 346 write_icc_pmr(0x80); 347 write_icc_igrpen1(1); 348 #else 349 io_write32(gd->gicc_base + GICC_PMR, 0x80); 350 351 /* Enable GIC */ 352 io_write32(gd->gicc_base + GICC_CTLR, 353 GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | 354 GICC_CTLR_FIQEN); 355 #endif 356 } 357 358 void gic_init_per_cpu(void) 359 { 360 struct gic_data *gd = &gic_data; 361 362 #if defined(CFG_ARM_GICV3) 363 assert(gd->gicd_base); 364 #else 365 assert(gd->gicd_base && gd->gicc_base); 366 #endif 367 368 if (IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) { 369 /* 370 * GIC is already initialized by TF-A, we only need to 371 * handle eventual SGI or PPI configuration changes. 372 */ 373 if (affinity_routing_is_enabled(gd)) 374 gicv3_sync_redist_config(gd); 375 else 376 gic_legacy_sync_dist_config(gd); 377 } else { 378 /* 379 * Non-TF-A case where all CPU specific configuration 380 * of GIC must be done here. 381 */ 382 init_gic_per_cpu(gd); 383 } 384 } 385 386 void gic_init_donate_sgi_to_ns(size_t it) 387 { 388 struct gic_data *gd = &gic_data; 389 390 assert(it >= GIC_SGI_SEC_BASE && it <= GIC_SGI_SEC_MAX); 391 392 /* Assert it's secure to start with. */ 393 assert(!(gd->per_cpu_group_status & BIT32(it)) && 394 (gd->per_cpu_group_modifier & BIT32(it))); 395 396 gd->per_cpu_group_modifier &= ~BIT32(it); 397 gd->per_cpu_group_status |= BIT32(it); 398 399 if (affinity_routing_is_enabled(gd)) { 400 vaddr_t gicr_base = get_gicr_base(gd); 401 402 if (!gicr_base) 403 panic("GICR_BASE missing"); 404 405 /* Disable interrupt */ 406 io_write32(gicr_base + GICR_ICENABLER0, BIT32(it)); 407 408 /* Wait for the write to GICR_ICENABLER0 to propagate */ 409 gicr_wait_for_pending_write(gicr_base); 410 411 /* Make interrupt non-pending */ 412 io_write32(gicr_base + GICR_ICPENDR0, BIT32(it)); 413 414 /* Make it to non-secure */ 415 io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status); 416 io_write32(gicr_base + GICR_IGRPMODR0, 417 gd->per_cpu_group_modifier); 418 } else { 419 /* Disable interrupt */ 420 io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(it)); 421 422 /* Make interrupt non-pending */ 423 io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(it)); 424 425 /* Make it to non-secure */ 426 io_write32(gd->gicd_base + GICD_IGROUPR(0), 427 gd->per_cpu_group_status); 428 } 429 } 430 431 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type, 432 uint32_t *prio) 433 { 434 int it_num = DT_INFO_INVALID_INTERRUPT; 435 uint32_t detection_type = IRQ_TYPE_NONE; 436 uint32_t interrupt_type = GIC_PPI; 437 438 if (!properties || count < 2 || count > 3) 439 return DT_INFO_INVALID_INTERRUPT; 440 441 interrupt_type = fdt32_to_cpu(properties[0]); 442 it_num = (int)fdt32_to_cpu(properties[1]); 443 444 if (count == 3) { 445 detection_type = fdt32_to_cpu(properties[2]) & GENMASK_32(3, 0); 446 if (interrupt_type == GIC_PPI && 447 detection_type != IRQ_TYPE_EDGE_RISING) { 448 EMSG("PPI must be edge rising"); 449 return DT_INFO_INVALID_INTERRUPT; 450 } 451 452 if (interrupt_type == GIC_SPI && 453 (detection_type != IRQ_TYPE_EDGE_RISING && 454 detection_type != IRQ_TYPE_LEVEL_HIGH)) { 455 EMSG("SPI must be edge rising or high level"); 456 return DT_INFO_INVALID_INTERRUPT; 457 } 458 } 459 460 switch (interrupt_type) { 461 case GIC_PPI: 462 it_num += 16; 463 detection_type = IRQ_TYPE_EDGE_RISING; 464 break; 465 case GIC_SPI: 466 it_num += 32; 467 break; 468 default: 469 return DT_INFO_INVALID_INTERRUPT; 470 } 471 472 if (type) 473 *type = detection_type; 474 475 if (prio) 476 *prio = 0; 477 478 return it_num; 479 } 480 481 static void __maybe_unused probe_redist_base_addrs(vaddr_t *gicr_base_addrs, 482 paddr_t gicr_base_pa) 483 { 484 size_t sz = GICR_V3_PCPUBASE_SIZE; 485 paddr_t pa = gicr_base_pa; 486 size_t core_pos = 0; 487 uint64_t mt_bit = 0; 488 uint64_t mpidr = 0; 489 uint64_t tv = 0; 490 vaddr_t va = 0; 491 492 #ifdef ARM64 493 mt_bit = read_mpidr_el1() & MPIDR_MT_MASK; 494 #endif 495 do { 496 va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz); 497 if (!va) 498 panic(); 499 tv = io_read64(va + GICR_TYPER); 500 501 /* 502 * Extract an mpidr from the Type register to calculate the 503 * core position of this redistributer instance. 504 */ 505 mpidr = mt_bit; 506 mpidr |= SHIFT_U64((tv >> GICR_TYPER_AFF3_SHIFT) & 507 MPIDR_AFFLVL_MASK, MPIDR_AFF3_SHIFT); 508 mpidr |= (tv >> GICR_TYPER_AFF0_SHIFT) & 509 (MPIDR_AFF0_MASK | MPIDR_AFF1_MASK | MPIDR_AFF2_MASK); 510 core_pos = get_core_pos_mpidr(mpidr); 511 if (core_pos < CFG_TEE_CORE_NB_CORE) { 512 DMSG("GICR_BASE[%zu] at %#"PRIxVA, core_pos, va); 513 gicr_base_addrs[core_pos] = va; 514 } else { 515 EMSG("Skipping too large core_pos %zu from GICR_TYPER", 516 core_pos); 517 } 518 pa += sz; 519 } while (!(tv & GICR_TYPER_LAST)); 520 } 521 522 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa, 523 paddr_t gicr_base_pa __maybe_unused) 524 { 525 struct gic_data *gd = &gic_data; 526 vaddr_t gicc_base = 0; 527 vaddr_t gicd_base = 0; 528 uint32_t vers __maybe_unused = 0; 529 530 assert(cpu_mmu_enabled()); 531 532 gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC, 533 GIC_DIST_REG_SIZE); 534 if (!gicd_base) 535 panic(); 536 537 vers = io_read32(gicd_base + GICD_PIDR2); 538 vers >>= GICD_PIDR2_ARCHREV_SHIFT; 539 vers &= GICD_PIDR2_ARCHREV_MASK; 540 541 if (IS_ENABLED(CFG_ARM_GICV3)) { 542 assert(vers == 4 || vers == 3); 543 } else { 544 assert(vers == 2 || vers == 1); 545 gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC, 546 GIC_CPU_REG_SIZE); 547 if (!gicc_base) 548 panic(); 549 } 550 551 gd->gicc_base = gicc_base; 552 gd->gicd_base = gicd_base; 553 gd->max_it = probe_max_it(gicc_base, gicd_base); 554 #if defined(CFG_ARM_GICV3) 555 if (affinity_routing_is_enabled(gd) && gicr_base_pa) 556 probe_redist_base_addrs(gd->gicr_base, gicr_base_pa); 557 #endif 558 gd->chip.ops = &gic_ops; 559 560 if (IS_ENABLED(CFG_DT)) 561 gd->chip.dt_get_irq = gic_dt_get_irq; 562 } 563 564 void gic_init_v3(paddr_t gicc_base_pa, paddr_t gicd_base_pa, 565 paddr_t gicr_base_pa) 566 { 567 struct gic_data __maybe_unused *gd = &gic_data; 568 size_t __maybe_unused n = 0; 569 570 gic_init_base_addr(gicc_base_pa, gicd_base_pa, gicr_base_pa); 571 572 #if defined(CFG_WITH_ARM_TRUSTED_FW) 573 /* GIC configuration is initialized from TF-A when embedded */ 574 if (affinity_routing_is_enabled(gd)) { 575 /* Secure affinity routing enabled */ 576 vaddr_t gicr_base = get_gicr_base(gd); 577 578 if (gicr_base) { 579 gd->per_cpu_group_status = io_read32(gicr_base + 580 GICR_IGROUPR0); 581 gd->per_cpu_group_modifier = io_read32(gicr_base + 582 GICR_IGRPMODR0); 583 } else { 584 IMSG("GIC redistributor base address not provided"); 585 IMSG("Assuming default GIC group status and modifier"); 586 gd->per_cpu_group_status = 0xffff00ff; 587 gd->per_cpu_group_modifier = ~gd->per_cpu_group_status; 588 } 589 } else { 590 /* Legacy operation with secure affinity routing disabled */ 591 gd->per_cpu_group_status = io_read32(gd->gicd_base + 592 GICD_IGROUPR(0)); 593 gd->per_cpu_group_modifier = ~gd->per_cpu_group_status; 594 } 595 #else /*!CFG_WITH_ARM_TRUSTED_FW*/ 596 /* 597 * Without TF-A, GIC is always configured in for legacy operation 598 * with secure affinity routing disabled. 599 */ 600 for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) { 601 /* Disable interrupts */ 602 io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff); 603 604 /* Make interrupts non-pending */ 605 io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff); 606 607 /* Mark interrupts non-secure */ 608 if (n == 0) { 609 /* per-CPU inerrupts config: 610 * ID0-ID7(SGI) for Non-secure interrupts 611 * ID8-ID15(SGI) for Secure interrupts. 612 * All PPI config as Non-secure interrupts. 613 */ 614 gd->per_cpu_group_status = 0xffff00ff; 615 gd->per_cpu_group_modifier = ~gd->per_cpu_group_status; 616 io_write32(gd->gicd_base + GICD_IGROUPR(n), 617 gd->per_cpu_group_status); 618 } else { 619 io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff); 620 } 621 } 622 623 /* Set the priority mask to permit Non-secure interrupts, and to 624 * allow the Non-secure world to adjust the priority mask itself 625 */ 626 #if defined(CFG_ARM_GICV3) 627 write_icc_pmr(0x80); 628 write_icc_igrpen1(1); 629 io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S); 630 #else 631 io_write32(gd->gicc_base + GICC_PMR, 0x80); 632 633 /* Enable GIC */ 634 io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN | 635 GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1); 636 io_setbits32(gd->gicd_base + GICD_CTLR, 637 GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1NS); 638 #endif 639 #endif /*!CFG_WITH_ARM_TRUSTED_FW*/ 640 641 interrupt_main_init(&gic_data.chip); 642 } 643 644 static void gic_it_add(struct gic_data *gd, size_t it) 645 { 646 size_t idx = it / NUM_INTS_PER_REG; 647 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 648 649 assert(gd == &gic_data); 650 651 /* Disable the interrupt */ 652 io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask); 653 /* Make it non-pending */ 654 io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask); 655 /* Assign it to group0 */ 656 io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask); 657 #if defined(CFG_ARM_GICV3) 658 /* Assign it to group1S */ 659 io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask); 660 #endif 661 } 662 663 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it, 664 uint8_t cpu_mask) 665 { 666 size_t idx __maybe_unused = it / NUM_INTS_PER_REG; 667 uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG); 668 uint32_t target, target_shift; 669 vaddr_t itargetsr = gd->gicd_base + 670 GICD_ITARGETSR(it / NUM_TARGETS_PER_REG); 671 672 assert(gd == &gic_data); 673 674 /* Assigned to group0 */ 675 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 676 677 /* Route it to selected CPUs */ 678 target = io_read32(itargetsr); 679 target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS; 680 target &= ~(ITARGETSR_FIELD_MASK << target_shift); 681 target |= cpu_mask << target_shift; 682 DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr); 683 io_write32(itargetsr, target); 684 DMSG("cpu_mask: 0x%x", io_read32(itargetsr)); 685 } 686 687 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio) 688 { 689 size_t idx __maybe_unused = it / NUM_INTS_PER_REG; 690 uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG); 691 692 assert(gd == &gic_data); 693 694 /* Assigned to group0 */ 695 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 696 697 /* Set prio it to selected CPUs */ 698 DMSG("prio: writing 0x%x to 0x%" PRIxVA, 699 prio, gd->gicd_base + GICD_IPRIORITYR(0) + it); 700 io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio); 701 } 702 703 static void gic_it_set_type(struct gic_data *gd, size_t it, uint32_t type) 704 { 705 size_t index = it / GICD_ICFGR_NUM_INTS_PER_REG; 706 uint32_t shift = (it % GICD_ICFGR_NUM_INTS_PER_REG) * 707 GICD_ICFGR_FIELD_BITS; 708 uint32_t icfg = 0; 709 710 assert(type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH); 711 712 if (type == IRQ_TYPE_EDGE_RISING) 713 icfg = GICD_ICFGR_TYPE_EDGE; 714 else 715 icfg = GICD_ICFGR_TYPE_LEVEL; 716 717 io_mask32(gd->gicd_base + GICD_ICFGR(index), 718 SHIFT_U32(icfg, shift), 719 SHIFT_U32(GICD_ICFGR_FIELD_MASK, shift)); 720 } 721 722 static void gic_it_enable(struct gic_data *gd, size_t it) 723 { 724 size_t idx = it / NUM_INTS_PER_REG; 725 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 726 vaddr_t base = gd->gicd_base; 727 728 assert(gd == &gic_data); 729 730 /* Assigned to group0 */ 731 assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask)); 732 733 /* Enable the interrupt */ 734 io_write32(base + GICD_ISENABLER(idx), mask); 735 } 736 737 static void gic_it_disable(struct gic_data *gd, size_t it) 738 { 739 size_t idx = it / NUM_INTS_PER_REG; 740 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 741 742 assert(gd == &gic_data); 743 744 /* Assigned to group0 */ 745 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 746 747 /* Disable the interrupt */ 748 io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask); 749 } 750 751 static void gic_it_set_pending(struct gic_data *gd, size_t it) 752 { 753 size_t idx = it / NUM_INTS_PER_REG; 754 uint32_t mask = BIT32(it % NUM_INTS_PER_REG); 755 756 assert(gd == &gic_data); 757 758 /* Should be Peripheral Interrupt */ 759 assert(it >= NUM_SGI); 760 761 /* Raise the interrupt */ 762 io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask); 763 } 764 765 static void assert_cpu_mask_is_valid(uint32_t cpu_mask) 766 { 767 bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS; 768 bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU; 769 bool __maybe_unused to_list = cpu_mask & 0xff; 770 771 /* One and only one of the bit fields shall be non-zero */ 772 assert(to_others + to_current + to_list == 1); 773 } 774 775 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it, 776 uint32_t cpu_mask, bool ns) 777 { 778 #if defined(CFG_ARM_GICV3) 779 uint32_t mask_id = it & 0xf; 780 uint64_t mask = SHIFT_U64(mask_id, 24); 781 782 assert_cpu_mask_is_valid(cpu_mask); 783 784 if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) { 785 mask |= BIT64(GICC_SGI_IRM_BIT); 786 } else { 787 uint64_t mpidr = read_mpidr(); 788 uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >> 789 MPIDR_AFF1_SHIFT; 790 uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >> 791 MPIDR_AFF2_SHIFT; 792 uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >> 793 MPIDR_AFF3_SHIFT; 794 795 mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT); 796 mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT); 797 mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT); 798 799 if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) { 800 mask |= BIT32(mpidr & 0xf); 801 } else { 802 /* 803 * Only support sending SGI to the cores in the 804 * same cluster now. 805 */ 806 mask |= cpu_mask & 0xff; 807 } 808 } 809 810 /* Raise the interrupt */ 811 if (ns) 812 write_icc_asgi1r(mask); 813 else 814 write_icc_sgi1r(mask); 815 #else 816 uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK; 817 uint32_t mask_group = ns; 818 uint32_t mask = mask_id; 819 820 assert_cpu_mask_is_valid(cpu_mask); 821 822 mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT); 823 if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) { 824 mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS, 825 GICD_SGIR_TARGET_LIST_FILTER_SHIFT); 826 } else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) { 827 mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU, 828 GICD_SGIR_TARGET_LIST_FILTER_SHIFT); 829 } else { 830 mask |= SHIFT_U32(cpu_mask & 0xff, 831 GICD_SGIR_CPU_TARGET_LIST_SHIFT); 832 } 833 834 /* Raise the interrupt */ 835 io_write32(gd->gicd_base + GICD_SGIR, mask); 836 #endif 837 } 838 839 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused) 840 { 841 assert(gd == &gic_data); 842 843 #if defined(CFG_ARM_GICV3) 844 return read_icc_iar1(); 845 #else 846 return io_read32(gd->gicc_base + GICC_IAR); 847 #endif 848 } 849 850 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir) 851 { 852 assert(gd == &gic_data); 853 854 #if defined(CFG_ARM_GICV3) 855 write_icc_eoir1(eoir); 856 #else 857 io_write32(gd->gicc_base + GICC_EOIR, eoir); 858 #endif 859 } 860 861 static bool gic_it_is_enabled(struct gic_data *gd, size_t it) 862 { 863 size_t idx = it / NUM_INTS_PER_REG; 864 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 865 866 assert(gd == &gic_data); 867 return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask); 868 } 869 870 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it) 871 { 872 size_t idx = it / NUM_INTS_PER_REG; 873 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 874 875 assert(gd == &gic_data); 876 return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask); 877 } 878 879 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it) 880 { 881 size_t reg_idx = it / NUM_TARGETS_PER_REG; 882 uint32_t target_shift = (it % NUM_TARGETS_PER_REG) * 883 ITARGETSR_FIELD_BITS; 884 uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift; 885 uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx)); 886 887 assert(gd == &gic_data); 888 return (target & target_mask) >> target_shift; 889 } 890 891 void gic_dump_state(void) 892 { 893 struct gic_data *gd = &gic_data; 894 int i = 0; 895 896 #if defined(CFG_ARM_GICV3) 897 DMSG("GICC_CTLR: 0x%x", read_icc_ctlr()); 898 #else 899 DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR)); 900 #endif 901 DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR)); 902 903 for (i = 0; i <= (int)gd->max_it; i++) { 904 if (gic_it_is_enabled(gd, i)) { 905 DMSG("irq%d: enabled, group:%d, target:%x", i, 906 gic_it_get_group(gd, i), gic_it_get_target(gd, i)); 907 } 908 } 909 } 910 911 TEE_Result gic_spi_release_to_ns(size_t it) 912 { 913 struct gic_data *gd = &gic_data; 914 size_t idx = it / NUM_INTS_PER_REG; 915 uint32_t mask = BIT32(it % NUM_INTS_PER_REG); 916 917 if (it >= gd->max_it || it < GIC_SPI_BASE) 918 return TEE_ERROR_BAD_PARAMETERS; 919 /* Make sure it's already disabled */ 920 if (!gic_it_is_enabled(gd, it)) 921 return TEE_ERROR_BAD_STATE; 922 /* Assert it's secure to start with */ 923 if (!gic_it_get_group(gd, it)) 924 return TEE_ERROR_BAD_STATE; 925 926 mutex_lock(&gic_mutex); 927 gic_it_set_cpu_mask(gd, it, 0); 928 gic_it_set_prio(gd, it, GIC_SPI_PRI_NS_EL1); 929 930 /* Clear pending status */ 931 io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask); 932 /* Assign it to NS Group1 */ 933 io_setbits32(gd->gicd_base + GICD_IGROUPR(idx), mask); 934 #if defined(CFG_ARM_GICV3) 935 io_clrbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask); 936 #endif 937 mutex_unlock(&gic_mutex); 938 return TEE_SUCCESS; 939 } 940 941 static void __maybe_unused gic_native_itr_handler(void) 942 { 943 struct gic_data *gd = &gic_data; 944 uint32_t iar = 0; 945 uint32_t id = 0; 946 947 iar = gic_read_iar(gd); 948 id = iar & GICC_IAR_IT_ID_MASK; 949 950 if (id <= gd->max_it) 951 interrupt_call_handlers(&gd->chip, id); 952 else 953 DMSG("ignoring interrupt %" PRIu32, id); 954 955 gic_write_eoir(gd, iar); 956 } 957 958 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI 959 /* Override interrupt_main_handler() with driver implementation */ 960 void interrupt_main_handler(void) 961 { 962 gic_native_itr_handler(); 963 } 964 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/ 965 966 static void gic_op_add(struct itr_chip *chip, size_t it, 967 uint32_t type, uint32_t prio __unused) 968 { 969 struct gic_data *gd = container_of(chip, struct gic_data, chip); 970 971 assert(gd == &gic_data); 972 973 if (it > gd->max_it) 974 panic(); 975 976 if (it < GIC_SPI_BASE) { 977 if (gic_primary_done) 978 panic("Cannot add SGI or PPI after boot"); 979 980 /* Assign it to Secure Group 1, G1S */ 981 gd->per_cpu_group_modifier |= BIT32(it); 982 gd->per_cpu_group_status &= ~BIT32(it); 983 } 984 985 if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) { 986 vaddr_t gicr_base = get_gicr_base(gd); 987 988 if (!gicr_base) 989 panic("GICR_BASE missing"); 990 991 /* Disable interrupt */ 992 io_write32(gicr_base + GICR_ICENABLER0, BIT32(it)); 993 994 /* Wait for the write to GICR_ICENABLER0 to propagate */ 995 gicr_wait_for_pending_write(gicr_base); 996 997 /* Make interrupt non-pending */ 998 io_write32(gicr_base + GICR_ICPENDR0, BIT32(it)); 999 1000 /* Make it to Secure */ 1001 io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status); 1002 io_write32(gicr_base + GICR_IGRPMODR0, 1003 gd->per_cpu_group_modifier); 1004 } else { 1005 gic_it_add(gd, it); 1006 /* Set the CPU mask to deliver interrupts to any online core */ 1007 gic_it_set_cpu_mask(gd, it, 0xff); 1008 gic_it_set_prio(gd, it, 0x1); 1009 if (type != IRQ_TYPE_NONE) 1010 gic_it_set_type(gd, it, type); 1011 } 1012 } 1013 1014 static void gic_op_enable(struct itr_chip *chip, size_t it) 1015 { 1016 struct gic_data *gd = container_of(chip, struct gic_data, chip); 1017 1018 assert(gd == &gic_data); 1019 1020 if (it > gd->max_it) 1021 panic(); 1022 1023 if (it < GIC_SPI_BASE) 1024 gd->per_cpu_enable |= BIT(it); 1025 1026 if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) { 1027 vaddr_t gicr_base = get_gicr_base(gd); 1028 1029 if (!gicr_base) 1030 panic("GICR_BASE missing"); 1031 1032 /* Assigned to G1S */ 1033 assert(gd->per_cpu_group_modifier & BIT(it) && 1034 !(gd->per_cpu_group_status & BIT(it))); 1035 io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable); 1036 } else { 1037 gic_it_enable(gd, it); 1038 } 1039 } 1040 1041 static void gic_op_disable(struct itr_chip *chip, size_t it) 1042 { 1043 struct gic_data *gd = container_of(chip, struct gic_data, chip); 1044 1045 assert(gd == &gic_data); 1046 1047 if (it > gd->max_it) 1048 panic(); 1049 1050 gic_it_disable(gd, it); 1051 } 1052 1053 static void gic_op_raise_pi(struct itr_chip *chip, size_t it) 1054 { 1055 struct gic_data *gd = container_of(chip, struct gic_data, chip); 1056 1057 assert(gd == &gic_data); 1058 1059 if (it > gd->max_it) 1060 panic(); 1061 1062 gic_it_set_pending(gd, it); 1063 } 1064 1065 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it, 1066 uint32_t cpu_mask) 1067 { 1068 struct gic_data *gd = container_of(chip, struct gic_data, chip); 1069 bool ns = false; 1070 1071 assert(gd == &gic_data); 1072 1073 /* Should be Software Generated Interrupt */ 1074 assert(it < NUM_SGI); 1075 1076 ns = BIT32(it) & gd->per_cpu_group_status; 1077 gic_it_raise_sgi(gd, it, cpu_mask, ns); 1078 } 1079 1080 static void gic_op_set_affinity(struct itr_chip *chip, size_t it, 1081 uint8_t cpu_mask) 1082 { 1083 struct gic_data *gd = container_of(chip, struct gic_data, chip); 1084 1085 assert(gd == &gic_data); 1086 1087 if (it > gd->max_it) 1088 panic(); 1089 1090 gic_it_set_cpu_mask(gd, it, cpu_mask); 1091 } 1092 1093 #ifdef CFG_DT 1094 /* Callback for "interrupts" and "interrupts-extended" DT node properties */ 1095 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data, 1096 struct itr_desc *itr_desc) 1097 { 1098 int itr_num = DT_INFO_INVALID_INTERRUPT; 1099 struct itr_chip *chip = priv_data; 1100 uint32_t phandle_args[3] = { }; 1101 uint32_t type = 0; 1102 uint32_t prio = 0; 1103 1104 assert(arg && itr_desc); 1105 1106 /* 1107 * gic_dt_get_irq() expects phandle arguments passed are still in DT 1108 * format (big-endian) whereas struct dt_pargs carries converted 1109 * formats. Therefore swap again phandle arguments. gic_dt_get_irq() 1110 * consumes only the 2 first arguments. 1111 */ 1112 if (arg->args_count < 2) 1113 return TEE_ERROR_GENERIC; 1114 1115 phandle_args[0] = cpu_to_fdt32(arg->args[0]); 1116 phandle_args[1] = cpu_to_fdt32(arg->args[1]); 1117 if (arg->args_count >= 3) 1118 phandle_args[2] = cpu_to_fdt32(arg->args[2]); 1119 1120 itr_num = gic_dt_get_irq((const void *)phandle_args, arg->args_count, 1121 &type, &prio); 1122 if (itr_num == DT_INFO_INVALID_INTERRUPT) 1123 return TEE_ERROR_GENERIC; 1124 1125 gic_op_add(chip, itr_num, type, prio); 1126 1127 itr_desc->chip = chip; 1128 itr_desc->itr_num = itr_num; 1129 1130 return TEE_SUCCESS; 1131 } 1132 1133 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused) 1134 { 1135 if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb, 1136 &gic_data.chip)) 1137 panic(); 1138 1139 return TEE_SUCCESS; 1140 } 1141 1142 static const struct dt_device_match gic_match_table[] = { 1143 { .compatible = "arm,cortex-a15-gic" }, 1144 { .compatible = "arm,cortex-a7-gic" }, 1145 { .compatible = "arm,cortex-a5-gic" }, 1146 { .compatible = "arm,cortex-a9-gic" }, 1147 { .compatible = "arm,gic-400" }, 1148 { } 1149 }; 1150 1151 DEFINE_DT_DRIVER(gic_dt_driver) = { 1152 .name = "gic", 1153 .match_table = gic_match_table, 1154 .probe = gic_probe, 1155 }; 1156 #endif /*CFG_DT*/ 1157 1158 static TEE_Result gic_set_primary_done(void) 1159 { 1160 gic_primary_done = true; 1161 return TEE_SUCCESS; 1162 } 1163 1164 nex_release_init_resource(gic_set_primary_done); 1165