1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2017, 2023 Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <dt-bindings/interrupt-controller/arm-gic.h> 10 #include <compiler.h> 11 #include <config.h> 12 #include <drivers/gic.h> 13 #include <io.h> 14 #include <keep.h> 15 #include <kernel/dt.h> 16 #include <kernel/dt_driver.h> 17 #include <kernel/interrupt.h> 18 #include <kernel/misc.h> 19 #include <kernel/panic.h> 20 #include <libfdt.h> 21 #include <mm/core_memprot.h> 22 #include <mm/core_mmu.h> 23 #include <trace.h> 24 #include <util.h> 25 26 /* Offsets from gic.gicc_base */ 27 #define GICC_CTLR (0x000) 28 #define GICC_PMR (0x004) 29 #define GICC_IAR (0x00C) 30 #define GICC_EOIR (0x010) 31 32 #define GICC_CTLR_ENABLEGRP0 (1 << 0) 33 #define GICC_CTLR_ENABLEGRP1 (1 << 1) 34 #define GICC_CTLR_FIQEN (1 << 3) 35 36 /* Offsets from gic.gicd_base */ 37 #define GICD_CTLR (0x000) 38 #define GICD_TYPER (0x004) 39 #define GICD_IGROUPR(n) (0x080 + (n) * 4) 40 #define GICD_ISENABLER(n) (0x100 + (n) * 4) 41 #define GICD_ICENABLER(n) (0x180 + (n) * 4) 42 #define GICD_ISPENDR(n) (0x200 + (n) * 4) 43 #define GICD_ICPENDR(n) (0x280 + (n) * 4) 44 #define GICD_IPRIORITYR(n) (0x400 + (n) * 4) 45 #define GICD_ITARGETSR(n) (0x800 + (n) * 4) 46 #define GICD_IGROUPMODR(n) (0xd00 + (n) * 4) 47 #define GICD_SGIR (0xF00) 48 49 #ifdef CFG_ARM_GICV3 50 #define GICD_PIDR2 (0xFFE8) 51 #else 52 /* Called ICPIDR2 in GICv2 specification */ 53 #define GICD_PIDR2 (0xFE8) 54 #endif 55 56 #define GICD_CTLR_ENABLEGRP0 BIT32(0) 57 #define GICD_CTLR_ENABLEGRP1NS BIT32(1) 58 #define GICD_CTLR_ENABLEGRP1S BIT32(2) 59 #define GICD_CTLR_ARE_S BIT32(4) 60 #define GICD_CTLR_ARE_NS BIT32(5) 61 62 /* Offsets from gic.gicr_base[core_pos] */ 63 #define GICR_V3_PCPUBASE_SIZE (2 * 64 * 1024) 64 #define GICR_SGI_BASE_OFFSET (64 * 1024) 65 #define GICR_CTLR (0x00) 66 #define GICR_TYPER (0x08) 67 68 #define GICR_IGROUPR0 (GICR_SGI_BASE_OFFSET + 0x080) 69 #define GICR_IGRPMODR0 (GICR_SGI_BASE_OFFSET + 0xD00) 70 71 #define GICR_TYPER_LAST BIT64(4) 72 #define GICR_TYPER_AFF3_SHIFT 56 73 #define GICR_TYPER_AFF2_SHIFT 48 74 #define GICR_TYPER_AFF1_SHIFT 40 75 #define GICR_TYPER_AFF0_SHIFT 32 76 77 /* GICD IDR2 name differs on GICv3 and GICv2 but uses same bit map */ 78 #define GICD_PIDR2_ARCHREV_SHIFT 4 79 #define GICD_PIDR2_ARCHREV_MASK 0xF 80 81 /* Number of Private Peripheral Interrupt */ 82 #define NUM_PPI 32 83 84 /* Number of Software Generated Interrupt */ 85 #define NUM_SGI 16 86 87 /* Number of Non-secure Software Generated Interrupt */ 88 #define NUM_NS_SGI 8 89 90 /* Number of interrupts in one register */ 91 #define NUM_INTS_PER_REG 32 92 93 /* Number of targets in one register */ 94 #define NUM_TARGETS_PER_REG 4 95 96 /* Accessors to access ITARGETSRn */ 97 #define ITARGETSR_FIELD_BITS 8 98 #define ITARGETSR_FIELD_MASK 0xff 99 100 #define GICD_TYPER_IT_LINES_NUM_MASK 0x1f 101 #define GICC_IAR_IT_ID_MASK 0x3ff 102 #define GICC_IAR_CPU_ID_MASK 0x7 103 #define GICC_IAR_CPU_ID_SHIFT 10 104 105 #define GICC_SGI_IRM_BIT 40 106 #define GICC_SGI_AFF1_SHIFT 16 107 #define GICC_SGI_AFF2_SHIFT 32 108 #define GICC_SGI_AFF3_SHIFT 48 109 110 #define GICD_SGIR_SIGINTID_MASK 0xf 111 #define GICD_SGIR_TO_OTHER_CPUS 0x1 112 #define GICD_SGIR_TO_THIS_CPU 0x2 113 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT 24 114 #define GICD_SGIR_NSATT_SHIFT 15 115 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT 16 116 117 struct gic_data { 118 vaddr_t gicc_base; 119 vaddr_t gicd_base; 120 #if defined(CFG_ARM_GICV3) 121 vaddr_t gicr_base[CFG_TEE_CORE_NB_CORE]; 122 #endif 123 size_t max_it; 124 uint32_t per_cpu_group_status; 125 uint32_t per_cpu_group_modifier; 126 struct itr_chip chip; 127 }; 128 129 static struct gic_data gic_data __nex_bss; 130 131 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type, 132 uint32_t prio); 133 static void gic_op_enable(struct itr_chip *chip, size_t it); 134 static void gic_op_disable(struct itr_chip *chip, size_t it); 135 static void gic_op_raise_pi(struct itr_chip *chip, size_t it); 136 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it, 137 uint32_t cpu_mask); 138 static void gic_op_set_affinity(struct itr_chip *chip, size_t it, 139 uint8_t cpu_mask); 140 141 static const struct itr_ops gic_ops = { 142 .add = gic_op_add, 143 .mask = gic_op_disable, 144 .unmask = gic_op_enable, 145 .enable = gic_op_enable, 146 .disable = gic_op_disable, 147 .raise_pi = gic_op_raise_pi, 148 .raise_sgi = gic_op_raise_sgi, 149 .set_affinity = gic_op_set_affinity, 150 }; 151 DECLARE_KEEP_PAGER(gic_ops); 152 153 static vaddr_t __maybe_unused get_gicr_base(struct gic_data *gd __maybe_unused) 154 { 155 #if defined(CFG_ARM_GICV3) 156 return gd->gicr_base[get_core_pos()]; 157 #else 158 return 0; 159 #endif 160 } 161 162 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base) 163 { 164 int i; 165 uint32_t old_ctlr; 166 size_t ret = 0; 167 size_t max_regs = io_read32(gicd_base + GICD_TYPER) & 168 GICD_TYPER_IT_LINES_NUM_MASK; 169 170 /* 171 * Probe which interrupt number is the largest. 172 */ 173 #if defined(CFG_ARM_GICV3) 174 old_ctlr = read_icc_ctlr(); 175 write_icc_ctlr(0); 176 #else 177 old_ctlr = io_read32(gicc_base + GICC_CTLR); 178 io_write32(gicc_base + GICC_CTLR, 0); 179 #endif 180 for (i = max_regs; i >= 0; i--) { 181 uint32_t old_reg; 182 uint32_t reg; 183 int b; 184 185 old_reg = io_read32(gicd_base + GICD_ISENABLER(i)); 186 io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff); 187 reg = io_read32(gicd_base + GICD_ISENABLER(i)); 188 io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg); 189 for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) { 190 if (BIT32(b) & reg) { 191 ret = i * NUM_INTS_PER_REG + b; 192 goto out; 193 } 194 } 195 } 196 out: 197 #if defined(CFG_ARM_GICV3) 198 write_icc_ctlr(old_ctlr); 199 #else 200 io_write32(gicc_base + GICC_CTLR, old_ctlr); 201 #endif 202 return ret; 203 } 204 205 void gic_cpu_init(void) 206 { 207 struct gic_data *gd = &gic_data; 208 209 #if defined(CFG_ARM_GICV3) 210 assert(gd->gicd_base); 211 #else 212 assert(gd->gicd_base && gd->gicc_base); 213 #endif 214 215 io_write32(gd->gicd_base + GICD_IGROUPR(0), gd->per_cpu_group_status); 216 217 /* 218 * Set the priority mask to permit Non-secure interrupts, and to 219 * allow the Non-secure world to adjust the priority mask itself 220 */ 221 #if defined(CFG_ARM_GICV3) 222 write_icc_pmr(0x80); 223 write_icc_igrpen1(1); 224 #else 225 io_write32(gd->gicc_base + GICC_PMR, 0x80); 226 227 /* Enable GIC */ 228 io_write32(gd->gicc_base + GICC_CTLR, 229 GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | 230 GICC_CTLR_FIQEN); 231 #endif 232 } 233 234 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type, 235 uint32_t *prio) 236 { 237 int it_num = DT_INFO_INVALID_INTERRUPT; 238 239 if (type) 240 *type = IRQ_TYPE_NONE; 241 242 if (prio) 243 *prio = 0; 244 245 if (!properties || count < 2) 246 return DT_INFO_INVALID_INTERRUPT; 247 248 it_num = fdt32_to_cpu(properties[1]); 249 250 switch (fdt32_to_cpu(properties[0])) { 251 case GIC_PPI: 252 it_num += 16; 253 break; 254 case GIC_SPI: 255 it_num += 32; 256 break; 257 default: 258 it_num = DT_INFO_INVALID_INTERRUPT; 259 } 260 261 return it_num; 262 } 263 264 static void __maybe_unused probe_redist_base_addrs(vaddr_t *gicr_base_addrs, 265 paddr_t gicr_base_pa) 266 { 267 size_t sz = GICR_V3_PCPUBASE_SIZE; 268 paddr_t pa = gicr_base_pa; 269 size_t core_pos = 0; 270 uint64_t mt_bit = 0; 271 uint64_t mpidr = 0; 272 uint64_t tv = 0; 273 vaddr_t va = 0; 274 275 #ifdef ARM64 276 mt_bit = read_mpidr_el1() & MPIDR_MT_MASK; 277 #endif 278 do { 279 va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz); 280 if (!va) 281 panic(); 282 tv = io_read64(va + GICR_TYPER); 283 284 /* 285 * Extract an mpidr from the Type register to calculate the 286 * core position of this redistributer instance. 287 */ 288 mpidr = mt_bit; 289 mpidr |= SHIFT_U64((tv >> GICR_TYPER_AFF3_SHIFT) & 290 MPIDR_AFFLVL_MASK, MPIDR_AFF3_SHIFT); 291 mpidr |= (tv >> GICR_TYPER_AFF0_SHIFT) & 292 (MPIDR_AFF0_MASK | MPIDR_AFF1_MASK | MPIDR_AFF2_MASK); 293 core_pos = get_core_pos_mpidr(mpidr); 294 if (core_pos < CFG_TEE_CORE_NB_CORE) { 295 DMSG("GICR_BASE[%zu] at %#"PRIxVA, core_pos, va); 296 gicr_base_addrs[core_pos] = va; 297 } else { 298 EMSG("Skipping too large core_pos %zu from GICR_TYPER", 299 core_pos); 300 } 301 pa += sz; 302 } while (!(tv & GICR_TYPER_LAST)); 303 } 304 305 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa, 306 paddr_t gicr_base_pa __maybe_unused) 307 { 308 struct gic_data *gd = &gic_data; 309 vaddr_t gicc_base = 0; 310 vaddr_t gicd_base = 0; 311 uint32_t vers __maybe_unused = 0; 312 313 assert(cpu_mmu_enabled()); 314 315 gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC, 316 GIC_DIST_REG_SIZE); 317 if (!gicd_base) 318 panic(); 319 320 vers = io_read32(gicd_base + GICD_PIDR2); 321 vers >>= GICD_PIDR2_ARCHREV_SHIFT; 322 vers &= GICD_PIDR2_ARCHREV_MASK; 323 324 if (IS_ENABLED(CFG_ARM_GICV3)) { 325 assert(vers == 3); 326 } else { 327 assert(vers == 2); 328 gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC, 329 GIC_CPU_REG_SIZE); 330 if (!gicc_base) 331 panic(); 332 } 333 334 gd->gicc_base = gicc_base; 335 gd->gicd_base = gicd_base; 336 gd->max_it = probe_max_it(gicc_base, gicd_base); 337 #if defined(CFG_ARM_GICV3) 338 probe_redist_base_addrs(gd->gicr_base, gicr_base_pa); 339 #endif 340 gd->chip.ops = &gic_ops; 341 342 if (IS_ENABLED(CFG_DT)) 343 gd->chip.dt_get_irq = gic_dt_get_irq; 344 } 345 346 void gic_init_v3(paddr_t gicc_base_pa, paddr_t gicd_base_pa, 347 paddr_t gicr_base_pa) 348 { 349 struct gic_data __maybe_unused *gd = &gic_data; 350 size_t __maybe_unused n = 0; 351 352 gic_init_base_addr(gicc_base_pa, gicd_base_pa, gicr_base_pa); 353 354 #if defined(CFG_WITH_ARM_TRUSTED_FW) 355 /* GIC configuration is initialized from TF-A when embedded */ 356 if (io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S) { 357 vaddr_t gicr_base = get_gicr_base(gd); 358 359 if (!gicr_base) 360 panic("GICR_BASE missing for affinity routing"); 361 /* Secure affinity routing enabled */ 362 gd->per_cpu_group_status = io_read32(gicr_base + GICR_IGROUPR0); 363 gd->per_cpu_group_modifier = io_read32(gicr_base + 364 GICR_IGRPMODR0); 365 } else { 366 /* Legacy operation with secure affinity routing disabled */ 367 gd->per_cpu_group_status = io_read32(gd->gicd_base + 368 GICD_IGROUPR(0)); 369 gd->per_cpu_group_modifier = ~gd->per_cpu_group_status; 370 } 371 #else /*!CFG_WITH_ARM_TRUSTED_FW*/ 372 /* 373 * Without TF-A, GIC is always configured in for legacy operation 374 * with secure affinity routing disabled. 375 */ 376 for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) { 377 /* Disable interrupts */ 378 io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff); 379 380 /* Make interrupts non-pending */ 381 io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff); 382 383 /* Mark interrupts non-secure */ 384 if (n == 0) { 385 /* per-CPU inerrupts config: 386 * ID0-ID7(SGI) for Non-secure interrupts 387 * ID8-ID15(SGI) for Secure interrupts. 388 * All PPI config as Non-secure interrupts. 389 */ 390 gd->per_cpu_group_status = 0xffff00ff; 391 gd->per_cpu_group_modifier = ~gd->per_cpu_group_status; 392 io_write32(gd->gicd_base + GICD_IGROUPR(n), 393 gd->per_cpu_group_status); 394 } else { 395 io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff); 396 } 397 } 398 399 /* Set the priority mask to permit Non-secure interrupts, and to 400 * allow the Non-secure world to adjust the priority mask itself 401 */ 402 #if defined(CFG_ARM_GICV3) 403 write_icc_pmr(0x80); 404 write_icc_igrpen1(1); 405 io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S); 406 #else 407 io_write32(gd->gicc_base + GICC_PMR, 0x80); 408 409 /* Enable GIC */ 410 io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN | 411 GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1); 412 io_setbits32(gd->gicd_base + GICD_CTLR, 413 GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1NS); 414 #endif 415 #endif /*!CFG_WITH_ARM_TRUSTED_FW*/ 416 417 interrupt_main_init(&gic_data.chip); 418 } 419 420 static void gic_it_add(struct gic_data *gd, size_t it) 421 { 422 size_t idx = it / NUM_INTS_PER_REG; 423 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 424 425 assert(gd == &gic_data); 426 427 /* Disable the interrupt */ 428 io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask); 429 /* Make it non-pending */ 430 io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask); 431 /* Assign it to group0 */ 432 io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask); 433 #if defined(CFG_ARM_GICV3) 434 /* Assign it to group1S */ 435 io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask); 436 #endif 437 } 438 439 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it, 440 uint8_t cpu_mask) 441 { 442 size_t idx __maybe_unused = it / NUM_INTS_PER_REG; 443 uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG); 444 uint32_t target, target_shift; 445 vaddr_t itargetsr = gd->gicd_base + 446 GICD_ITARGETSR(it / NUM_TARGETS_PER_REG); 447 448 assert(gd == &gic_data); 449 450 /* Assigned to group0 */ 451 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 452 453 /* Route it to selected CPUs */ 454 target = io_read32(itargetsr); 455 target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS; 456 target &= ~(ITARGETSR_FIELD_MASK << target_shift); 457 target |= cpu_mask << target_shift; 458 DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr); 459 io_write32(itargetsr, target); 460 DMSG("cpu_mask: 0x%x", io_read32(itargetsr)); 461 } 462 463 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio) 464 { 465 size_t idx __maybe_unused = it / NUM_INTS_PER_REG; 466 uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG); 467 468 assert(gd == &gic_data); 469 470 /* Assigned to group0 */ 471 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 472 473 /* Set prio it to selected CPUs */ 474 DMSG("prio: writing 0x%x to 0x%" PRIxVA, 475 prio, gd->gicd_base + GICD_IPRIORITYR(0) + it); 476 io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio); 477 } 478 479 static void gic_it_enable(struct gic_data *gd, size_t it) 480 { 481 size_t idx = it / NUM_INTS_PER_REG; 482 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 483 vaddr_t base = gd->gicd_base; 484 485 assert(gd == &gic_data); 486 487 /* Assigned to group0 */ 488 assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask)); 489 490 /* Enable the interrupt */ 491 io_write32(base + GICD_ISENABLER(idx), mask); 492 } 493 494 static void gic_it_disable(struct gic_data *gd, size_t it) 495 { 496 size_t idx = it / NUM_INTS_PER_REG; 497 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 498 499 assert(gd == &gic_data); 500 501 /* Assigned to group0 */ 502 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 503 504 /* Disable the interrupt */ 505 io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask); 506 } 507 508 static void gic_it_set_pending(struct gic_data *gd, size_t it) 509 { 510 size_t idx = it / NUM_INTS_PER_REG; 511 uint32_t mask = BIT32(it % NUM_INTS_PER_REG); 512 513 assert(gd == &gic_data); 514 515 /* Should be Peripheral Interrupt */ 516 assert(it >= NUM_SGI); 517 518 /* Raise the interrupt */ 519 io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask); 520 } 521 522 static void assert_cpu_mask_is_valid(uint32_t cpu_mask) 523 { 524 bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS; 525 bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU; 526 bool __maybe_unused to_list = cpu_mask & 0xff; 527 528 /* One and only one of the bit fields shall be non-zero */ 529 assert(to_others + to_current + to_list == 1); 530 } 531 532 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it, 533 uint32_t cpu_mask, uint8_t group) 534 { 535 #if defined(CFG_ARM_GICV3) 536 uint32_t mask_id = it & 0xf; 537 uint64_t mask = SHIFT_U64(mask_id, 24); 538 539 assert_cpu_mask_is_valid(cpu_mask); 540 541 if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) { 542 mask |= BIT64(GICC_SGI_IRM_BIT); 543 } else { 544 uint64_t mpidr = read_mpidr(); 545 uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >> 546 MPIDR_AFF1_SHIFT; 547 uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >> 548 MPIDR_AFF2_SHIFT; 549 uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >> 550 MPIDR_AFF3_SHIFT; 551 552 mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT); 553 mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT); 554 mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT); 555 556 if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) { 557 mask |= BIT32(mpidr & 0xf); 558 } else { 559 /* 560 * Only support sending SGI to the cores in the 561 * same cluster now. 562 */ 563 mask |= cpu_mask & 0xff; 564 } 565 } 566 567 /* Raise the interrupt */ 568 if (group) 569 write_icc_asgi1r(mask); 570 else 571 write_icc_sgi1r(mask); 572 #else 573 uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK; 574 uint32_t mask_group = group & 0x1; 575 uint32_t mask = mask_id; 576 577 assert_cpu_mask_is_valid(cpu_mask); 578 579 mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT); 580 if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) { 581 mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS, 582 GICD_SGIR_TARGET_LIST_FILTER_SHIFT); 583 } else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) { 584 mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU, 585 GICD_SGIR_TARGET_LIST_FILTER_SHIFT); 586 } else { 587 mask |= SHIFT_U32(cpu_mask & 0xff, 588 GICD_SGIR_CPU_TARGET_LIST_SHIFT); 589 } 590 591 /* Raise the interrupt */ 592 io_write32(gd->gicd_base + GICD_SGIR, mask); 593 #endif 594 } 595 596 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused) 597 { 598 assert(gd == &gic_data); 599 600 #if defined(CFG_ARM_GICV3) 601 return read_icc_iar1(); 602 #else 603 return io_read32(gd->gicc_base + GICC_IAR); 604 #endif 605 } 606 607 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir) 608 { 609 assert(gd == &gic_data); 610 611 #if defined(CFG_ARM_GICV3) 612 write_icc_eoir1(eoir); 613 #else 614 io_write32(gd->gicc_base + GICC_EOIR, eoir); 615 #endif 616 } 617 618 static bool gic_it_is_enabled(struct gic_data *gd, size_t it) 619 { 620 size_t idx = it / NUM_INTS_PER_REG; 621 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 622 623 assert(gd == &gic_data); 624 return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask); 625 } 626 627 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it) 628 { 629 size_t idx = it / NUM_INTS_PER_REG; 630 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 631 632 assert(gd == &gic_data); 633 return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask); 634 } 635 636 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it) 637 { 638 size_t reg_idx = it / NUM_TARGETS_PER_REG; 639 uint32_t target_shift = (it % NUM_TARGETS_PER_REG) * 640 ITARGETSR_FIELD_BITS; 641 uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift; 642 uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx)); 643 644 assert(gd == &gic_data); 645 return (target & target_mask) >> target_shift; 646 } 647 648 void gic_dump_state(void) 649 { 650 struct gic_data *gd = &gic_data; 651 int i = 0; 652 653 #if defined(CFG_ARM_GICV3) 654 DMSG("GICC_CTLR: 0x%x", read_icc_ctlr()); 655 #else 656 DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR)); 657 #endif 658 DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR)); 659 660 for (i = 0; i <= (int)gd->max_it; i++) { 661 if (gic_it_is_enabled(gd, i)) { 662 DMSG("irq%d: enabled, group:%d, target:%x", i, 663 gic_it_get_group(gd, i), gic_it_get_target(gd, i)); 664 } 665 } 666 } 667 668 static void __maybe_unused gic_native_itr_handler(void) 669 { 670 struct gic_data *gd = &gic_data; 671 uint32_t iar = 0; 672 uint32_t id = 0; 673 674 iar = gic_read_iar(gd); 675 id = iar & GICC_IAR_IT_ID_MASK; 676 677 if (id <= gd->max_it) 678 interrupt_call_handlers(&gd->chip, id); 679 else 680 DMSG("ignoring interrupt %" PRIu32, id); 681 682 gic_write_eoir(gd, iar); 683 } 684 685 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI 686 /* Override interrupt_main_handler() with driver implementation */ 687 void interrupt_main_handler(void) 688 { 689 gic_native_itr_handler(); 690 } 691 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/ 692 693 static void gic_op_add(struct itr_chip *chip, size_t it, 694 uint32_t type __unused, 695 uint32_t prio __unused) 696 { 697 struct gic_data *gd = container_of(chip, struct gic_data, chip); 698 699 assert(gd == &gic_data); 700 701 if (it > gd->max_it) 702 panic(); 703 704 gic_it_add(gd, it); 705 /* Set the CPU mask to deliver interrupts to any online core */ 706 gic_it_set_cpu_mask(gd, it, 0xff); 707 gic_it_set_prio(gd, it, 0x1); 708 } 709 710 static void gic_op_enable(struct itr_chip *chip, size_t it) 711 { 712 struct gic_data *gd = container_of(chip, struct gic_data, chip); 713 714 assert(gd == &gic_data); 715 716 if (it > gd->max_it) 717 panic(); 718 719 gic_it_enable(gd, it); 720 } 721 722 static void gic_op_disable(struct itr_chip *chip, size_t it) 723 { 724 struct gic_data *gd = container_of(chip, struct gic_data, chip); 725 726 assert(gd == &gic_data); 727 728 if (it > gd->max_it) 729 panic(); 730 731 gic_it_disable(gd, it); 732 } 733 734 static void gic_op_raise_pi(struct itr_chip *chip, size_t it) 735 { 736 struct gic_data *gd = container_of(chip, struct gic_data, chip); 737 738 assert(gd == &gic_data); 739 740 if (it > gd->max_it) 741 panic(); 742 743 gic_it_set_pending(gd, it); 744 } 745 746 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it, 747 uint32_t cpu_mask) 748 { 749 struct gic_data *gd = container_of(chip, struct gic_data, chip); 750 751 assert(gd == &gic_data); 752 753 /* Should be Software Generated Interrupt */ 754 assert(it < NUM_SGI); 755 756 if (it < NUM_NS_SGI) 757 gic_it_raise_sgi(gd, it, cpu_mask, 1); 758 else 759 gic_it_raise_sgi(gd, it, cpu_mask, 0); 760 } 761 762 static void gic_op_set_affinity(struct itr_chip *chip, size_t it, 763 uint8_t cpu_mask) 764 { 765 struct gic_data *gd = container_of(chip, struct gic_data, chip); 766 767 assert(gd == &gic_data); 768 769 if (it > gd->max_it) 770 panic(); 771 772 gic_it_set_cpu_mask(gd, it, cpu_mask); 773 } 774 775 #ifdef CFG_DT 776 /* Callback for "interrupts" and "interrupts-extended" DT node properties */ 777 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data, 778 struct itr_desc *itr_desc) 779 { 780 int itr_num = DT_INFO_INVALID_INTERRUPT; 781 struct itr_chip *chip = priv_data; 782 uint32_t phandle_args[2] = { }; 783 uint32_t type = 0; 784 uint32_t prio = 0; 785 786 assert(arg && itr_desc); 787 788 /* 789 * gic_dt_get_irq() expects phandle arguments passed are still in DT 790 * format (big-endian) whereas struct dt_pargs carries converted 791 * formats. Therefore swap again phandle arguments. gic_dt_get_irq() 792 * consumes only the 2 first arguments. 793 */ 794 if (arg->args_count < 2) 795 return TEE_ERROR_GENERIC; 796 phandle_args[0] = cpu_to_fdt32(arg->args[0]); 797 phandle_args[1] = cpu_to_fdt32(arg->args[1]); 798 799 itr_num = gic_dt_get_irq((const void *)phandle_args, 2, &type, &prio); 800 if (itr_num == DT_INFO_INVALID_INTERRUPT) 801 return TEE_ERROR_GENERIC; 802 803 gic_op_add(chip, itr_num, type, prio); 804 805 itr_desc->chip = chip; 806 itr_desc->itr_num = itr_num; 807 808 return TEE_SUCCESS; 809 } 810 811 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused) 812 { 813 if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb, 814 &gic_data.chip)) 815 panic(); 816 817 return TEE_SUCCESS; 818 } 819 820 static const struct dt_device_match gic_match_table[] = { 821 { .compatible = "arm,cortex-a15-gic" }, 822 { .compatible = "arm,cortex-a7-gic" }, 823 { .compatible = "arm,cortex-a5-gic" }, 824 { .compatible = "arm,cortex-a9-gic" }, 825 { .compatible = "arm,gic-400" }, 826 { } 827 }; 828 829 DEFINE_DT_DRIVER(gic_dt_driver) = { 830 .name = "gic", 831 .match_table = gic_match_table, 832 .probe = gic_probe, 833 }; 834 #endif /*CFG_DT*/ 835