1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2017, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <arm.h> 8 #include <assert.h> 9 #include <config.h> 10 #include <drivers/gic.h> 11 #include <keep.h> 12 #include <kernel/dt.h> 13 #include <kernel/interrupt.h> 14 #include <kernel/panic.h> 15 #include <libfdt.h> 16 #include <util.h> 17 #include <io.h> 18 #include <trace.h> 19 20 /* Offsets from gic.gicc_base */ 21 #define GICC_CTLR (0x000) 22 #define GICC_PMR (0x004) 23 #define GICC_IAR (0x00C) 24 #define GICC_EOIR (0x010) 25 26 #define GICC_CTLR_ENABLEGRP0 (1 << 0) 27 #define GICC_CTLR_ENABLEGRP1 (1 << 1) 28 #define GICD_CTLR_ENABLEGRP1S (1 << 2) 29 #define GICC_CTLR_FIQEN (1 << 3) 30 31 /* Offsets from gic.gicd_base */ 32 #define GICD_CTLR (0x000) 33 #define GICD_TYPER (0x004) 34 #define GICD_IGROUPR(n) (0x080 + (n) * 4) 35 #define GICD_ISENABLER(n) (0x100 + (n) * 4) 36 #define GICD_ICENABLER(n) (0x180 + (n) * 4) 37 #define GICD_ISPENDR(n) (0x200 + (n) * 4) 38 #define GICD_ICPENDR(n) (0x280 + (n) * 4) 39 #define GICD_IPRIORITYR(n) (0x400 + (n) * 4) 40 #define GICD_ITARGETSR(n) (0x800 + (n) * 4) 41 #define GICD_IGROUPMODR(n) (0xd00 + (n) * 4) 42 #define GICD_SGIR (0xF00) 43 44 #define GICD_CTLR_ENABLEGRP0 (1 << 0) 45 #define GICD_CTLR_ENABLEGRP1 (1 << 1) 46 47 /* Number of Private Peripheral Interrupt */ 48 #define NUM_PPI 32 49 50 /* Number of Software Generated Interrupt */ 51 #define NUM_SGI 16 52 53 /* Number of Non-secure Software Generated Interrupt */ 54 #define NUM_NS_SGI 8 55 56 /* Number of interrupts in one register */ 57 #define NUM_INTS_PER_REG 32 58 59 /* Number of targets in one register */ 60 #define NUM_TARGETS_PER_REG 4 61 62 /* Accessors to access ITARGETSRn */ 63 #define ITARGETSR_FIELD_BITS 8 64 #define ITARGETSR_FIELD_MASK 0xff 65 66 /* Maximum number of interrups a GIC can support */ 67 #define GIC_MAX_INTS 1020 68 69 #define GICC_IAR_IT_ID_MASK 0x3ff 70 #define GICC_IAR_CPU_ID_MASK 0x7 71 #define GICC_IAR_CPU_ID_SHIFT 10 72 73 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t flags); 74 static void gic_op_enable(struct itr_chip *chip, size_t it); 75 static void gic_op_disable(struct itr_chip *chip, size_t it); 76 static void gic_op_raise_pi(struct itr_chip *chip, size_t it); 77 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it, 78 uint8_t cpu_mask); 79 static void gic_op_set_affinity(struct itr_chip *chip, size_t it, 80 uint8_t cpu_mask); 81 82 static const struct itr_ops gic_ops = { 83 .add = gic_op_add, 84 .enable = gic_op_enable, 85 .disable = gic_op_disable, 86 .raise_pi = gic_op_raise_pi, 87 .raise_sgi = gic_op_raise_sgi, 88 .set_affinity = gic_op_set_affinity, 89 }; 90 DECLARE_KEEP_PAGER(gic_ops); 91 92 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base) 93 { 94 int i; 95 uint32_t old_ctlr; 96 size_t ret = 0; 97 const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) / 98 NUM_INTS_PER_REG) - 1; 99 100 /* 101 * Probe which interrupt number is the largest. 102 */ 103 #if defined(CFG_ARM_GICV3) 104 old_ctlr = read_icc_ctlr(); 105 write_icc_ctlr(0); 106 #else 107 old_ctlr = io_read32(gicc_base + GICC_CTLR); 108 io_write32(gicc_base + GICC_CTLR, 0); 109 #endif 110 for (i = max_regs; i >= 0; i--) { 111 uint32_t old_reg; 112 uint32_t reg; 113 int b; 114 115 old_reg = io_read32(gicd_base + GICD_ISENABLER(i)); 116 io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff); 117 reg = io_read32(gicd_base + GICD_ISENABLER(i)); 118 io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg); 119 for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) { 120 if (BIT32(b) & reg) { 121 ret = i * NUM_INTS_PER_REG + b; 122 goto out; 123 } 124 } 125 } 126 out: 127 #if defined(CFG_ARM_GICV3) 128 write_icc_ctlr(old_ctlr); 129 #else 130 io_write32(gicc_base + GICC_CTLR, old_ctlr); 131 #endif 132 return ret; 133 } 134 135 void gic_cpu_init(struct gic_data *gd) 136 { 137 #if defined(CFG_ARM_GICV3) 138 assert(gd->gicd_base); 139 #else 140 assert(gd->gicd_base && gd->gicc_base); 141 #endif 142 143 /* per-CPU interrupts config: 144 * ID0-ID7(SGI) for Non-secure interrupts 145 * ID8-ID15(SGI) for Secure interrupts. 146 * All PPI config as Non-secure interrupts. 147 */ 148 io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff); 149 150 /* Set the priority mask to permit Non-secure interrupts, and to 151 * allow the Non-secure world to adjust the priority mask itself 152 */ 153 #if defined(CFG_ARM_GICV3) 154 write_icc_pmr(0x80); 155 write_icc_igrpen1(1); 156 #else 157 io_write32(gd->gicc_base + GICC_PMR, 0x80); 158 159 /* Enable GIC */ 160 io_write32(gd->gicc_base + GICC_CTLR, 161 GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | 162 GICC_CTLR_FIQEN); 163 #endif 164 } 165 166 void gic_init(struct gic_data *gd, vaddr_t gicc_base __maybe_unused, 167 vaddr_t gicd_base) 168 { 169 size_t n; 170 171 gic_init_base_addr(gd, gicc_base, gicd_base); 172 173 for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) { 174 /* Disable interrupts */ 175 io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff); 176 177 /* Make interrupts non-pending */ 178 io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff); 179 180 /* Mark interrupts non-secure */ 181 if (n == 0) { 182 /* per-CPU inerrupts config: 183 * ID0-ID7(SGI) for Non-secure interrupts 184 * ID8-ID15(SGI) for Secure interrupts. 185 * All PPI config as Non-secure interrupts. 186 */ 187 io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff); 188 } else { 189 io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff); 190 } 191 } 192 193 /* Set the priority mask to permit Non-secure interrupts, and to 194 * allow the Non-secure world to adjust the priority mask itself 195 */ 196 #if defined(CFG_ARM_GICV3) 197 write_icc_pmr(0x80); 198 write_icc_igrpen1(1); 199 io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S); 200 #else 201 io_write32(gd->gicc_base + GICC_PMR, 0x80); 202 203 /* Enable GIC */ 204 io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN | 205 GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1); 206 io_setbits32(gd->gicd_base + GICD_CTLR, 207 GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1); 208 #endif 209 } 210 211 static int gic_dt_get_irq(const uint32_t *properties, int len) 212 { 213 int it_num = DT_INFO_INVALID_INTERRUPT; 214 215 if (!properties || len < 2) 216 return DT_INFO_INVALID_INTERRUPT; 217 218 it_num = fdt32_to_cpu(properties[1]); 219 220 switch (fdt32_to_cpu(properties[0])) { 221 case 1: 222 it_num += 16; 223 break; 224 case 0: 225 it_num += 32; 226 break; 227 default: 228 it_num = DT_INFO_INVALID_INTERRUPT; 229 } 230 231 return it_num; 232 } 233 234 void gic_init_base_addr(struct gic_data *gd, vaddr_t gicc_base __maybe_unused, 235 vaddr_t gicd_base) 236 { 237 gd->gicc_base = gicc_base; 238 gd->gicd_base = gicd_base; 239 gd->max_it = probe_max_it(gicc_base, gicd_base); 240 gd->chip.ops = &gic_ops; 241 242 if (IS_ENABLED(CFG_DT)) 243 gd->chip.dt_get_irq = gic_dt_get_irq; 244 } 245 246 static void gic_it_add(struct gic_data *gd, size_t it) 247 { 248 size_t idx = it / NUM_INTS_PER_REG; 249 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 250 251 /* Disable the interrupt */ 252 io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask); 253 /* Make it non-pending */ 254 io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask); 255 /* Assign it to group0 */ 256 io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask); 257 #if defined(CFG_ARM_GICV3) 258 /* Assign it to group1S */ 259 io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask); 260 #endif 261 } 262 263 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it, 264 uint8_t cpu_mask) 265 { 266 size_t idx __maybe_unused = it / NUM_INTS_PER_REG; 267 uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG); 268 uint32_t target, target_shift; 269 vaddr_t itargetsr = gd->gicd_base + 270 GICD_ITARGETSR(it / NUM_TARGETS_PER_REG); 271 272 /* Assigned to group0 */ 273 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 274 275 /* Route it to selected CPUs */ 276 target = io_read32(itargetsr); 277 target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS; 278 target &= ~(ITARGETSR_FIELD_MASK << target_shift); 279 target |= cpu_mask << target_shift; 280 DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr); 281 io_write32(itargetsr, target); 282 DMSG("cpu_mask: 0x%x", io_read32(itargetsr)); 283 } 284 285 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio) 286 { 287 size_t idx __maybe_unused = it / NUM_INTS_PER_REG; 288 uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG); 289 290 /* Assigned to group0 */ 291 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 292 293 /* Set prio it to selected CPUs */ 294 DMSG("prio: writing 0x%x to 0x%" PRIxVA, 295 prio, gd->gicd_base + GICD_IPRIORITYR(0) + it); 296 io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio); 297 } 298 299 static void gic_it_enable(struct gic_data *gd, size_t it) 300 { 301 size_t idx = it / NUM_INTS_PER_REG; 302 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 303 vaddr_t base = gd->gicd_base; 304 305 /* Assigned to group0 */ 306 assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask)); 307 308 /* Enable the interrupt */ 309 io_write32(base + GICD_ISENABLER(idx), mask); 310 } 311 312 static void gic_it_disable(struct gic_data *gd, size_t it) 313 { 314 size_t idx = it / NUM_INTS_PER_REG; 315 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 316 317 /* Assigned to group0 */ 318 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 319 320 /* Disable the interrupt */ 321 io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask); 322 } 323 324 static void gic_it_set_pending(struct gic_data *gd, size_t it) 325 { 326 size_t idx = it / NUM_INTS_PER_REG; 327 uint32_t mask = BIT32(it % NUM_INTS_PER_REG); 328 329 /* Should be Peripheral Interrupt */ 330 assert(it >= NUM_SGI); 331 332 /* Raise the interrupt */ 333 io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask); 334 } 335 336 static void gic_it_raise_sgi(struct gic_data *gd, size_t it, 337 uint8_t cpu_mask, uint8_t group) 338 { 339 uint32_t mask_id = it & 0xf; 340 uint32_t mask_group = group & 0x1; 341 uint32_t mask_cpu = cpu_mask & 0xff; 342 uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) | 343 SHIFT_U32(mask_cpu, 16)); 344 345 /* Should be Software Generated Interrupt */ 346 assert(it < NUM_SGI); 347 348 /* Raise the interrupt */ 349 io_write32(gd->gicd_base + GICD_SGIR, mask); 350 } 351 352 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused) 353 { 354 #if defined(CFG_ARM_GICV3) 355 return read_icc_iar1(); 356 #else 357 return io_read32(gd->gicc_base + GICC_IAR); 358 #endif 359 } 360 361 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir) 362 { 363 #if defined(CFG_ARM_GICV3) 364 write_icc_eoir1(eoir); 365 #else 366 io_write32(gd->gicc_base + GICC_EOIR, eoir); 367 #endif 368 } 369 370 static bool gic_it_is_enabled(struct gic_data *gd, size_t it) 371 { 372 size_t idx = it / NUM_INTS_PER_REG; 373 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 374 return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask); 375 } 376 377 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it) 378 { 379 size_t idx = it / NUM_INTS_PER_REG; 380 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 381 return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask); 382 } 383 384 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it) 385 { 386 size_t reg_idx = it / NUM_TARGETS_PER_REG; 387 uint32_t target_shift = (it % NUM_TARGETS_PER_REG) * 388 ITARGETSR_FIELD_BITS; 389 uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift; 390 uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx)); 391 392 return (target & target_mask) >> target_shift; 393 } 394 395 void gic_dump_state(struct gic_data *gd) 396 { 397 int i; 398 399 #if defined(CFG_ARM_GICV3) 400 DMSG("GICC_CTLR: 0x%x", read_icc_ctlr()); 401 #else 402 DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR)); 403 #endif 404 DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR)); 405 406 for (i = 0; i <= (int)gd->max_it; i++) { 407 if (gic_it_is_enabled(gd, i)) { 408 DMSG("irq%d: enabled, group:%d, target:%x", i, 409 gic_it_get_group(gd, i), gic_it_get_target(gd, i)); 410 } 411 } 412 } 413 414 void gic_it_handle(struct gic_data *gd) 415 { 416 uint32_t iar; 417 uint32_t id; 418 419 iar = gic_read_iar(gd); 420 id = iar & GICC_IAR_IT_ID_MASK; 421 422 if (id <= gd->max_it) 423 itr_handle(id); 424 else 425 DMSG("ignoring interrupt %" PRIu32, id); 426 427 gic_write_eoir(gd, iar); 428 } 429 430 static void gic_op_add(struct itr_chip *chip, size_t it, 431 uint32_t flags __unused) 432 { 433 struct gic_data *gd = container_of(chip, struct gic_data, chip); 434 435 if (it > gd->max_it) 436 panic(); 437 438 gic_it_add(gd, it); 439 /* Set the CPU mask to deliver interrupts to any online core */ 440 gic_it_set_cpu_mask(gd, it, 0xff); 441 gic_it_set_prio(gd, it, 0x1); 442 } 443 444 static void gic_op_enable(struct itr_chip *chip, size_t it) 445 { 446 struct gic_data *gd = container_of(chip, struct gic_data, chip); 447 448 if (it > gd->max_it) 449 panic(); 450 451 gic_it_enable(gd, it); 452 } 453 454 static void gic_op_disable(struct itr_chip *chip, size_t it) 455 { 456 struct gic_data *gd = container_of(chip, struct gic_data, chip); 457 458 if (it > gd->max_it) 459 panic(); 460 461 gic_it_disable(gd, it); 462 } 463 464 static void gic_op_raise_pi(struct itr_chip *chip, size_t it) 465 { 466 struct gic_data *gd = container_of(chip, struct gic_data, chip); 467 468 if (it > gd->max_it) 469 panic(); 470 471 gic_it_set_pending(gd, it); 472 } 473 474 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it, 475 uint8_t cpu_mask) 476 { 477 struct gic_data *gd = container_of(chip, struct gic_data, chip); 478 479 if (it > gd->max_it) 480 panic(); 481 482 if (it < NUM_NS_SGI) 483 gic_it_raise_sgi(gd, it, cpu_mask, 1); 484 else 485 gic_it_raise_sgi(gd, it, cpu_mask, 0); 486 } 487 static void gic_op_set_affinity(struct itr_chip *chip, size_t it, 488 uint8_t cpu_mask) 489 { 490 struct gic_data *gd = container_of(chip, struct gic_data, chip); 491 492 if (it > gd->max_it) 493 panic(); 494 495 gic_it_set_cpu_mask(gd, it, cpu_mask); 496 } 497