1 /* 2 * Copyright (c) 2016, Linaro Limited 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <assert.h> 30 #include <drivers/gic.h> 31 #include <kernel/interrupt.h> 32 #include <kernel/panic.h> 33 #include <util.h> 34 #include <io.h> 35 #include <trace.h> 36 37 /* Offsets from gic.gicc_base */ 38 #define GICC_CTLR (0x000) 39 #define GICC_PMR (0x004) 40 #define GICC_IAR (0x00C) 41 #define GICC_EOIR (0x010) 42 43 #define GICC_CTLR_ENABLEGRP0 (1 << 0) 44 #define GICC_CTLR_ENABLEGRP1 (1 << 1) 45 #define GICC_CTLR_FIQEN (1 << 3) 46 47 /* Offsets from gic.gicd_base */ 48 #define GICD_CTLR (0x000) 49 #define GICD_TYPER (0x004) 50 #define GICD_IGROUPR(n) (0x080 + (n) * 4) 51 #define GICD_ISENABLER(n) (0x100 + (n) * 4) 52 #define GICD_ICENABLER(n) (0x180 + (n) * 4) 53 #define GICD_ISPENDR(n) (0x200 + (n) * 4) 54 #define GICD_ICPENDR(n) (0x280 + (n) * 4) 55 #define GICD_IPRIORITYR(n) (0x400 + (n) * 4) 56 #define GICD_ITARGETSR(n) (0x800 + (n) * 4) 57 #define GICD_SGIR (0xF00) 58 59 #define GICD_CTLR_ENABLEGRP0 (1 << 0) 60 #define GICD_CTLR_ENABLEGRP1 (1 << 1) 61 62 /* Number of Private Peripheral Interrupt */ 63 #define NUM_PPI 32 64 65 /* Number of Software Generated Interrupt */ 66 #define NUM_SGI 16 67 68 /* Number of Non-secure Software Generated Interrupt */ 69 #define NUM_NS_SGI 8 70 71 /* Number of interrupts in one register */ 72 #define NUM_INTS_PER_REG 32 73 74 /* Number of targets in one register */ 75 #define NUM_TARGETS_PER_REG 4 76 77 /* Accessors to access ITARGETSRn */ 78 #define ITARGETSR_FIELD_BITS 8 79 #define ITARGETSR_FIELD_MASK 0xff 80 81 /* Maximum number of interrups a GIC can support */ 82 #define GIC_MAX_INTS 1020 83 84 #define GICC_IAR_IT_ID_MASK 0x3ff 85 #define GICC_IAR_CPU_ID_MASK 0x7 86 #define GICC_IAR_CPU_ID_SHIFT 10 87 88 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t flags); 89 static void gic_op_enable(struct itr_chip *chip, size_t it); 90 static void gic_op_disable(struct itr_chip *chip, size_t it); 91 static void gic_op_raise_pi(struct itr_chip *chip, size_t it); 92 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it, 93 uint8_t cpu_mask); 94 static void gic_op_set_affinity(struct itr_chip *chip, size_t it, 95 uint8_t cpu_mask); 96 97 static const struct itr_ops gic_ops = { 98 .add = gic_op_add, 99 .enable = gic_op_enable, 100 .disable = gic_op_disable, 101 .raise_pi = gic_op_raise_pi, 102 .raise_sgi = gic_op_raise_sgi, 103 .set_affinity = gic_op_set_affinity, 104 }; 105 106 static size_t probe_max_it(vaddr_t gicc_base, vaddr_t gicd_base) 107 { 108 int i; 109 uint32_t old_ctlr; 110 size_t ret = 0; 111 const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) / 112 NUM_INTS_PER_REG) - 1; 113 114 /* 115 * Probe which interrupt number is the largest. 116 */ 117 old_ctlr = read32(gicc_base + GICC_CTLR); 118 write32(0, gicc_base + GICC_CTLR); 119 for (i = max_regs; i >= 0; i--) { 120 uint32_t old_reg; 121 uint32_t reg; 122 int b; 123 124 old_reg = read32(gicd_base + GICD_ISENABLER(i)); 125 write32(0xffffffff, gicd_base + GICD_ISENABLER(i)); 126 reg = read32(gicd_base + GICD_ISENABLER(i)); 127 write32(old_reg, gicd_base + GICD_ICENABLER(i)); 128 for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) { 129 if (BIT32(b) & reg) { 130 ret = i * NUM_INTS_PER_REG + b; 131 goto out; 132 } 133 } 134 } 135 out: 136 write32(old_ctlr, gicc_base + GICC_CTLR); 137 return ret; 138 } 139 140 void gic_cpu_init(struct gic_data *gd) 141 { 142 assert(gd->gicd_base && gd->gicc_base); 143 144 /* per-CPU interrupts config: 145 * ID0-ID7(SGI) for Non-secure interrupts 146 * ID8-ID15(SGI) for Secure interrupts. 147 * All PPI config as Non-secure interrupts. 148 */ 149 write32(0xffff00ff, gd->gicd_base + GICD_IGROUPR(0)); 150 151 /* Set the priority mask to permit Non-secure interrupts, and to 152 * allow the Non-secure world to adjust the priority mask itself 153 */ 154 write32(0x80, gd->gicc_base + GICC_PMR); 155 156 /* Enable GIC */ 157 write32(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN, 158 gd->gicc_base + GICC_CTLR); 159 } 160 161 void gic_init(struct gic_data *gd, vaddr_t gicc_base, vaddr_t gicd_base) 162 { 163 size_t n; 164 165 gic_init_base_addr(gd, gicc_base, gicd_base); 166 167 for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) { 168 /* Disable interrupts */ 169 write32(0xffffffff, gd->gicd_base + GICD_ICENABLER(n)); 170 171 /* Make interrupts non-pending */ 172 write32(0xffffffff, gd->gicd_base + GICD_ICPENDR(n)); 173 174 /* Mark interrupts non-secure */ 175 if (n == 0) { 176 /* per-CPU inerrupts config: 177 * ID0-ID7(SGI) for Non-secure interrupts 178 * ID8-ID15(SGI) for Secure interrupts. 179 * All PPI config as Non-secure interrupts. 180 */ 181 write32(0xffff00ff, gd->gicd_base + GICD_IGROUPR(n)); 182 } else { 183 write32(0xffffffff, gd->gicd_base + GICD_IGROUPR(n)); 184 } 185 } 186 187 /* Set the priority mask to permit Non-secure interrupts, and to 188 * allow the Non-secure world to adjust the priority mask itself 189 */ 190 write32(0x80, gd->gicc_base + GICC_PMR); 191 192 /* Enable GIC */ 193 write32(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN, 194 gd->gicc_base + GICC_CTLR); 195 write32(GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1, 196 gd->gicd_base + GICD_CTLR); 197 } 198 199 void gic_init_base_addr(struct gic_data *gd, vaddr_t gicc_base, 200 vaddr_t gicd_base) 201 { 202 gd->gicc_base = gicc_base; 203 gd->gicd_base = gicd_base; 204 gd->max_it = probe_max_it(gicc_base, gicd_base); 205 gd->chip.ops = &gic_ops; 206 } 207 208 static void gic_it_add(struct gic_data *gd, size_t it) 209 { 210 size_t idx = it / NUM_INTS_PER_REG; 211 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 212 213 /* Disable the interrupt */ 214 write32(mask, gd->gicd_base + GICD_ICENABLER(idx)); 215 /* Make it non-pending */ 216 write32(mask, gd->gicd_base + GICD_ICPENDR(idx)); 217 /* Assign it to group0 */ 218 write32(read32(gd->gicd_base + GICD_IGROUPR(idx)) & ~mask, 219 gd->gicd_base + GICD_IGROUPR(idx)); 220 } 221 222 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it, 223 uint8_t cpu_mask) 224 { 225 size_t idx __maybe_unused = it / NUM_INTS_PER_REG; 226 uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG); 227 uint32_t target, target_shift; 228 229 /* Assigned to group0 */ 230 assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 231 232 /* Route it to selected CPUs */ 233 target = read32(gd->gicd_base + 234 GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)); 235 target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS; 236 target &= ~(ITARGETSR_FIELD_MASK << target_shift); 237 target |= cpu_mask << target_shift; 238 DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, 239 target, gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)); 240 write32(target, 241 gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)); 242 DMSG("cpu_mask: 0x%x\n", 243 read32(gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG))); 244 } 245 246 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio) 247 { 248 size_t idx __maybe_unused = it / NUM_INTS_PER_REG; 249 uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG); 250 251 /* Assigned to group0 */ 252 assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 253 254 /* Set prio it to selected CPUs */ 255 DMSG("prio: writing 0x%x to 0x%" PRIxVA, 256 prio, gd->gicd_base + GICD_IPRIORITYR(0) + it); 257 write8(prio, gd->gicd_base + GICD_IPRIORITYR(0) + it); 258 } 259 260 static void gic_it_enable(struct gic_data *gd, size_t it) 261 { 262 size_t idx = it / NUM_INTS_PER_REG; 263 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 264 265 /* Assigned to group0 */ 266 assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 267 if (it >= NUM_SGI) { 268 /* 269 * Not enabled yet, except Software Generated Interrupt 270 * which is implementation defined 271 */ 272 assert(!(read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask)); 273 } 274 275 /* Enable the interrupt */ 276 write32(mask, gd->gicd_base + GICD_ISENABLER(idx)); 277 } 278 279 static void gic_it_disable(struct gic_data *gd, size_t it) 280 { 281 size_t idx = it / NUM_INTS_PER_REG; 282 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 283 284 /* Assigned to group0 */ 285 assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 286 287 /* Disable the interrupt */ 288 write32(mask, gd->gicd_base + GICD_ICENABLER(idx)); 289 } 290 291 static void gic_it_set_pending(struct gic_data *gd, size_t it) 292 { 293 size_t idx = it / NUM_INTS_PER_REG; 294 uint32_t mask = BIT32(it % NUM_INTS_PER_REG); 295 296 /* Should be Peripheral Interrupt */ 297 assert(it >= NUM_SGI); 298 /* Assigned to group0 */ 299 assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); 300 301 /* Raise the interrupt */ 302 write32(mask, gd->gicd_base + GICD_ISPENDR(idx)); 303 } 304 305 static void gic_it_raise_sgi(struct gic_data *gd, size_t it, 306 uint8_t cpu_mask, uint8_t group) 307 { 308 uint32_t mask_id = it & 0xf; 309 uint32_t mask_group = group & 0x1; 310 uint32_t mask_cpu = cpu_mask & 0xff; 311 uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) | 312 SHIFT_U32(mask_cpu, 16)); 313 314 /* Should be Software Generated Interrupt */ 315 assert(it < NUM_SGI); 316 317 /* Raise the interrupt */ 318 write32(mask, gd->gicd_base + GICD_SGIR); 319 } 320 321 static uint32_t gic_read_iar(struct gic_data *gd) 322 { 323 return read32(gd->gicc_base + GICC_IAR); 324 } 325 326 static void gic_write_eoir(struct gic_data *gd, uint32_t eoir) 327 { 328 write32(eoir, gd->gicc_base + GICC_EOIR); 329 } 330 331 static bool gic_it_is_enabled(struct gic_data *gd, size_t it) 332 { 333 size_t idx = it / NUM_INTS_PER_REG; 334 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 335 return !!(read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask); 336 } 337 338 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it) 339 { 340 size_t idx = it / NUM_INTS_PER_REG; 341 uint32_t mask = 1 << (it % NUM_INTS_PER_REG); 342 return !!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask); 343 } 344 345 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it) 346 { 347 size_t reg_idx = it / NUM_TARGETS_PER_REG; 348 uint32_t target_shift = (it % NUM_TARGETS_PER_REG) * 349 ITARGETSR_FIELD_BITS; 350 uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift; 351 uint32_t target = 352 read32(gd->gicd_base + GICD_ITARGETSR(reg_idx)) & target_mask; 353 354 target = target >> target_shift; 355 return target; 356 } 357 358 void gic_dump_state(struct gic_data *gd) 359 { 360 int i; 361 362 DMSG("GICC_CTLR: 0x%x", read32(gd->gicc_base + GICC_CTLR)); 363 DMSG("GICD_CTLR: 0x%x", read32(gd->gicd_base + GICD_CTLR)); 364 365 for (i = 0; i < (int)gd->max_it; i++) { 366 if (gic_it_is_enabled(gd, i)) { 367 DMSG("irq%d: enabled, group:%d, target:%x", i, 368 gic_it_get_group(gd, i), gic_it_get_target(gd, i)); 369 } 370 } 371 } 372 373 void gic_it_handle(struct gic_data *gd) 374 { 375 uint32_t iar; 376 uint32_t id; 377 378 iar = gic_read_iar(gd); 379 id = iar & GICC_IAR_IT_ID_MASK; 380 381 if (id < gd->max_it) 382 itr_handle(id); 383 else 384 DMSG("ignoring interrupt %" PRIu32, id); 385 386 gic_write_eoir(gd, iar); 387 } 388 389 static void gic_op_add(struct itr_chip *chip, size_t it, 390 uint32_t flags __unused) 391 { 392 struct gic_data *gd = container_of(chip, struct gic_data, chip); 393 394 if (it >= gd->max_it) 395 panic(); 396 397 gic_it_add(gd, it); 398 /* Set the CPU mask to deliver interrupts to any online core */ 399 gic_it_set_cpu_mask(gd, it, 0xff); 400 gic_it_set_prio(gd, it, 0x1); 401 } 402 403 static void gic_op_enable(struct itr_chip *chip, size_t it) 404 { 405 struct gic_data *gd = container_of(chip, struct gic_data, chip); 406 407 if (it >= gd->max_it) 408 panic(); 409 410 gic_it_enable(gd, it); 411 } 412 413 static void gic_op_disable(struct itr_chip *chip, size_t it) 414 { 415 struct gic_data *gd = container_of(chip, struct gic_data, chip); 416 417 if (it >= gd->max_it) 418 panic(); 419 420 gic_it_disable(gd, it); 421 } 422 423 static void gic_op_raise_pi(struct itr_chip *chip, size_t it) 424 { 425 struct gic_data *gd = container_of(chip, struct gic_data, chip); 426 427 if (it >= gd->max_it) 428 panic(); 429 430 gic_it_set_pending(gd, it); 431 } 432 433 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it, 434 uint8_t cpu_mask) 435 { 436 struct gic_data *gd = container_of(chip, struct gic_data, chip); 437 438 if (it >= gd->max_it) 439 panic(); 440 441 if (it < NUM_NS_SGI) 442 gic_it_raise_sgi(gd, it, cpu_mask, 1); 443 else 444 gic_it_raise_sgi(gd, it, cpu_mask, 0); 445 } 446 static void gic_op_set_affinity(struct itr_chip *chip, size_t it, 447 uint8_t cpu_mask) 448 { 449 struct gic_data *gd = container_of(chip, struct gic_data, chip); 450 451 if (it >= gd->max_it) 452 panic(); 453 454 gic_it_set_cpu_mask(gd, it, cpu_mask); 455 } 456