xref: /optee_os/core/drivers/gic.c (revision 67e55c51c9149ea549664b3981ad9032dcf4ce7f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <config.h>
10 #include <compiler.h>
11 #include <drivers/gic.h>
12 #include <keep.h>
13 #include <kernel/dt.h>
14 #include <kernel/interrupt.h>
15 #include <kernel/panic.h>
16 #include <mm/core_memprot.h>
17 #include <mm/core_mmu.h>
18 #include <libfdt.h>
19 #include <util.h>
20 #include <io.h>
21 #include <trace.h>
22 
23 /* Offsets from gic.gicc_base */
24 #define GICC_CTLR		(0x000)
25 #define GICC_PMR		(0x004)
26 #define GICC_IAR		(0x00C)
27 #define GICC_EOIR		(0x010)
28 
29 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
30 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
31 #define GICD_CTLR_ENABLEGRP1S	(1 << 2)
32 #define GICC_CTLR_FIQEN		(1 << 3)
33 
34 /* Offsets from gic.gicd_base */
35 #define GICD_CTLR		(0x000)
36 #define GICD_TYPER		(0x004)
37 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
38 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
39 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
40 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
41 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
42 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
43 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
44 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
45 #define GICD_SGIR		(0xF00)
46 
47 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
48 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
49 
50 /* Number of Private Peripheral Interrupt */
51 #define NUM_PPI	32
52 
53 /* Number of Software Generated Interrupt */
54 #define NUM_SGI			16
55 
56 /* Number of Non-secure Software Generated Interrupt */
57 #define NUM_NS_SGI		8
58 
59 /* Number of interrupts in one register */
60 #define NUM_INTS_PER_REG	32
61 
62 /* Number of targets in one register */
63 #define NUM_TARGETS_PER_REG	4
64 
65 /* Accessors to access ITARGETSRn */
66 #define ITARGETSR_FIELD_BITS	8
67 #define ITARGETSR_FIELD_MASK	0xff
68 
69 /* Maximum number of interrups a GIC can support */
70 #define GIC_MAX_INTS		1020
71 
72 #define GICC_IAR_IT_ID_MASK	0x3ff
73 #define GICC_IAR_CPU_ID_MASK	0x7
74 #define GICC_IAR_CPU_ID_SHIFT	10
75 
76 struct gic_data {
77 	vaddr_t gicc_base;
78 	vaddr_t gicd_base;
79 	size_t max_it;
80 	struct itr_chip chip;
81 };
82 
83 static struct gic_data gic_data __nex_bss;
84 
85 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
86 		       uint32_t prio);
87 static void gic_op_enable(struct itr_chip *chip, size_t it);
88 static void gic_op_disable(struct itr_chip *chip, size_t it);
89 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
90 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
91 			uint8_t cpu_mask);
92 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
93 			uint8_t cpu_mask);
94 
95 static const struct itr_ops gic_ops = {
96 	.add = gic_op_add,
97 	.enable = gic_op_enable,
98 	.disable = gic_op_disable,
99 	.raise_pi = gic_op_raise_pi,
100 	.raise_sgi = gic_op_raise_sgi,
101 	.set_affinity = gic_op_set_affinity,
102 };
103 DECLARE_KEEP_PAGER(gic_ops);
104 
105 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
106 {
107 	int i;
108 	uint32_t old_ctlr;
109 	size_t ret = 0;
110 	const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) /
111 					NUM_INTS_PER_REG) - 1;
112 
113 	/*
114 	 * Probe which interrupt number is the largest.
115 	 */
116 #if defined(CFG_ARM_GICV3)
117 	old_ctlr = read_icc_ctlr();
118 	write_icc_ctlr(0);
119 #else
120 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
121 	io_write32(gicc_base + GICC_CTLR, 0);
122 #endif
123 	for (i = max_regs; i >= 0; i--) {
124 		uint32_t old_reg;
125 		uint32_t reg;
126 		int b;
127 
128 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
129 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
130 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
131 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
132 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
133 			if (BIT32(b) & reg) {
134 				ret = i * NUM_INTS_PER_REG + b;
135 				goto out;
136 			}
137 		}
138 	}
139 out:
140 #if defined(CFG_ARM_GICV3)
141 	write_icc_ctlr(old_ctlr);
142 #else
143 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
144 #endif
145 	return ret;
146 }
147 
148 void gic_cpu_init(void)
149 {
150 	struct gic_data *gd = &gic_data;
151 
152 #if defined(CFG_ARM_GICV3)
153 	assert(gd->gicd_base);
154 #else
155 	assert(gd->gicd_base && gd->gicc_base);
156 #endif
157 
158 	/* per-CPU interrupts config:
159 	 * ID0-ID7(SGI)   for Non-secure interrupts
160 	 * ID8-ID15(SGI)  for Secure interrupts.
161 	 * All PPI config as Non-secure interrupts.
162 	 */
163 	io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
164 
165 	/* Set the priority mask to permit Non-secure interrupts, and to
166 	 * allow the Non-secure world to adjust the priority mask itself
167 	 */
168 #if defined(CFG_ARM_GICV3)
169 	write_icc_pmr(0x80);
170 	write_icc_igrpen1(1);
171 #else
172 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
173 
174 	/* Enable GIC */
175 	io_write32(gd->gicc_base + GICC_CTLR,
176 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
177 		   GICC_CTLR_FIQEN);
178 #endif
179 }
180 
181 void gic_init(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
182 {
183 	struct gic_data *gd = &gic_data;
184 	size_t n;
185 
186 	gic_init_base_addr(gicc_base_pa, gicd_base_pa);
187 
188 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
189 		/* Disable interrupts */
190 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
191 
192 		/* Make interrupts non-pending */
193 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
194 
195 		/* Mark interrupts non-secure */
196 		if (n == 0) {
197 			/* per-CPU inerrupts config:
198 			 * ID0-ID7(SGI)	  for Non-secure interrupts
199 			 * ID8-ID15(SGI)  for Secure interrupts.
200 			 * All PPI config as Non-secure interrupts.
201 			 */
202 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
203 		} else {
204 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
205 		}
206 	}
207 
208 	/* Set the priority mask to permit Non-secure interrupts, and to
209 	 * allow the Non-secure world to adjust the priority mask itself
210 	 */
211 #if defined(CFG_ARM_GICV3)
212 	write_icc_pmr(0x80);
213 	write_icc_igrpen1(1);
214 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
215 #else
216 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
217 
218 	/* Enable GIC */
219 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
220 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
221 	io_setbits32(gd->gicd_base + GICD_CTLR,
222 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
223 #endif
224 
225 	itr_init(&gic_data.chip);
226 }
227 
228 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
229 			  uint32_t *prio)
230 {
231 	int it_num = DT_INFO_INVALID_INTERRUPT;
232 
233 	if (type)
234 		*type = IRQ_TYPE_NONE;
235 
236 	if (prio)
237 		*prio = 0;
238 
239 	if (!properties || count < 2)
240 		return DT_INFO_INVALID_INTERRUPT;
241 
242 	it_num = fdt32_to_cpu(properties[1]);
243 
244 	switch (fdt32_to_cpu(properties[0])) {
245 	case 1:
246 		it_num += 16;
247 		break;
248 	case 0:
249 		it_num += 32;
250 		break;
251 	default:
252 		it_num = DT_INFO_INVALID_INTERRUPT;
253 	}
254 
255 	return it_num;
256 }
257 
258 void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
259 {
260 	struct gic_data *gd = &gic_data;
261 	vaddr_t gicc_base = 0;
262 	vaddr_t gicd_base = 0;
263 
264 	assert(cpu_mmu_enabled());
265 
266 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
267 				    GIC_DIST_REG_SIZE);
268 	if (!gicd_base)
269 		panic();
270 
271 	if (!IS_ENABLED(CFG_ARM_GICV3)) {
272 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
273 					    GIC_CPU_REG_SIZE);
274 		if (!gicc_base)
275 			panic();
276 	}
277 
278 	gd->gicc_base = gicc_base;
279 	gd->gicd_base = gicd_base;
280 	gd->max_it = probe_max_it(gicc_base, gicd_base);
281 	gd->chip.ops = &gic_ops;
282 
283 	if (IS_ENABLED(CFG_DT))
284 		gd->chip.dt_get_irq = gic_dt_get_irq;
285 }
286 
287 static void gic_it_add(struct gic_data *gd, size_t it)
288 {
289 	size_t idx = it / NUM_INTS_PER_REG;
290 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
291 
292 	assert(gd == &gic_data);
293 
294 	/* Disable the interrupt */
295 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
296 	/* Make it non-pending */
297 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
298 	/* Assign it to group0 */
299 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
300 #if defined(CFG_ARM_GICV3)
301 	/* Assign it to group1S */
302 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
303 #endif
304 }
305 
306 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
307 				uint8_t cpu_mask)
308 {
309 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
310 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
311 	uint32_t target, target_shift;
312 	vaddr_t itargetsr = gd->gicd_base +
313 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
314 
315 	assert(gd == &gic_data);
316 
317 	/* Assigned to group0 */
318 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
319 
320 	/* Route it to selected CPUs */
321 	target = io_read32(itargetsr);
322 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
323 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
324 	target |= cpu_mask << target_shift;
325 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
326 	io_write32(itargetsr, target);
327 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
328 }
329 
330 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
331 {
332 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
333 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
334 
335 	assert(gd == &gic_data);
336 
337 	/* Assigned to group0 */
338 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
339 
340 	/* Set prio it to selected CPUs */
341 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
342 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
343 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
344 }
345 
346 static void gic_it_enable(struct gic_data *gd, size_t it)
347 {
348 	size_t idx = it / NUM_INTS_PER_REG;
349 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
350 	vaddr_t base = gd->gicd_base;
351 
352 	assert(gd == &gic_data);
353 
354 	/* Assigned to group0 */
355 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
356 
357 	/* Enable the interrupt */
358 	io_write32(base + GICD_ISENABLER(idx), mask);
359 }
360 
361 static void gic_it_disable(struct gic_data *gd, size_t it)
362 {
363 	size_t idx = it / NUM_INTS_PER_REG;
364 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
365 
366 	assert(gd == &gic_data);
367 
368 	/* Assigned to group0 */
369 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
370 
371 	/* Disable the interrupt */
372 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
373 }
374 
375 static void gic_it_set_pending(struct gic_data *gd, size_t it)
376 {
377 	size_t idx = it / NUM_INTS_PER_REG;
378 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
379 
380 	assert(gd == &gic_data);
381 
382 	/* Should be Peripheral Interrupt */
383 	assert(it >= NUM_SGI);
384 
385 	/* Raise the interrupt */
386 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
387 }
388 
389 static void gic_it_raise_sgi(struct gic_data *gd, size_t it,
390 		uint8_t cpu_mask, uint8_t group)
391 {
392 	uint32_t mask_id = it & 0xf;
393 	uint32_t mask_group = group & 0x1;
394 	uint32_t mask_cpu = cpu_mask & 0xff;
395 	uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
396 		SHIFT_U32(mask_cpu, 16));
397 
398 	assert(gd == &gic_data);
399 
400 	/* Should be Software Generated Interrupt */
401 	assert(it < NUM_SGI);
402 
403 	/* Raise the interrupt */
404 	io_write32(gd->gicd_base + GICD_SGIR, mask);
405 }
406 
407 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
408 {
409 	assert(gd == &gic_data);
410 
411 #if defined(CFG_ARM_GICV3)
412 	return read_icc_iar1();
413 #else
414 	return io_read32(gd->gicc_base + GICC_IAR);
415 #endif
416 }
417 
418 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
419 {
420 	assert(gd == &gic_data);
421 
422 #if defined(CFG_ARM_GICV3)
423 	write_icc_eoir1(eoir);
424 #else
425 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
426 #endif
427 }
428 
429 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
430 {
431 	size_t idx = it / NUM_INTS_PER_REG;
432 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
433 
434 	assert(gd == &gic_data);
435 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
436 }
437 
438 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
439 {
440 	size_t idx = it / NUM_INTS_PER_REG;
441 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
442 
443 	assert(gd == &gic_data);
444 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
445 }
446 
447 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
448 {
449 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
450 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
451 				ITARGETSR_FIELD_BITS;
452 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
453 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
454 
455 	assert(gd == &gic_data);
456 	return (target & target_mask) >> target_shift;
457 }
458 
459 void gic_dump_state(void)
460 {
461 	struct gic_data *gd = &gic_data;
462 	int i = 0;
463 
464 #if defined(CFG_ARM_GICV3)
465 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
466 #else
467 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
468 #endif
469 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
470 
471 	for (i = 0; i <= (int)gd->max_it; i++) {
472 		if (gic_it_is_enabled(gd, i)) {
473 			DMSG("irq%d: enabled, group:%d, target:%x", i,
474 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
475 		}
476 	}
477 }
478 
479 static void __maybe_unused gic_native_itr_handler(void)
480 {
481 	struct gic_data *gd = &gic_data;
482 	uint32_t iar = 0;
483 	uint32_t id = 0;
484 
485 	iar = gic_read_iar(gd);
486 	id = iar & GICC_IAR_IT_ID_MASK;
487 
488 	if (id <= gd->max_it)
489 		itr_handle(id);
490 	else
491 		DMSG("ignoring interrupt %" PRIu32, id);
492 
493 	gic_write_eoir(gd, iar);
494 }
495 
496 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
497 /* Override itr_core_handler() with core interrupt controller implementation */
498 void itr_core_handler(void)
499 {
500 	gic_native_itr_handler();
501 }
502 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
503 
504 static void gic_op_add(struct itr_chip *chip, size_t it,
505 		       uint32_t type __unused,
506 		       uint32_t prio __unused)
507 {
508 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
509 
510 	assert(gd == &gic_data);
511 
512 	if (it > gd->max_it)
513 		panic();
514 
515 	gic_it_add(gd, it);
516 	/* Set the CPU mask to deliver interrupts to any online core */
517 	gic_it_set_cpu_mask(gd, it, 0xff);
518 	gic_it_set_prio(gd, it, 0x1);
519 }
520 
521 static void gic_op_enable(struct itr_chip *chip, size_t it)
522 {
523 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
524 
525 	assert(gd == &gic_data);
526 
527 	if (it > gd->max_it)
528 		panic();
529 
530 	gic_it_enable(gd, it);
531 }
532 
533 static void gic_op_disable(struct itr_chip *chip, size_t it)
534 {
535 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
536 
537 	assert(gd == &gic_data);
538 
539 	if (it > gd->max_it)
540 		panic();
541 
542 	gic_it_disable(gd, it);
543 }
544 
545 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
546 {
547 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
548 
549 	assert(gd == &gic_data);
550 
551 	if (it > gd->max_it)
552 		panic();
553 
554 	gic_it_set_pending(gd, it);
555 }
556 
557 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
558 			uint8_t cpu_mask)
559 {
560 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
561 
562 	assert(gd == &gic_data);
563 
564 	if (it > gd->max_it)
565 		panic();
566 
567 	if (it < NUM_NS_SGI)
568 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
569 	else
570 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
571 }
572 
573 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
574 			uint8_t cpu_mask)
575 {
576 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
577 
578 	assert(gd == &gic_data);
579 
580 	if (it > gd->max_it)
581 		panic();
582 
583 	gic_it_set_cpu_mask(gd, it, cpu_mask);
584 }
585