xref: /optee_os/core/drivers/gic.c (revision b357d34fe91f4e7f6e0eacea17a7fbe5f6c01e7e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <config.h>
10 #include <compiler.h>
11 #include <drivers/gic.h>
12 #include <keep.h>
13 #include <kernel/dt.h>
14 #include <kernel/interrupt.h>
15 #include <kernel/panic.h>
16 #include <mm/core_memprot.h>
17 #include <mm/core_mmu.h>
18 #include <libfdt.h>
19 #include <util.h>
20 #include <io.h>
21 #include <trace.h>
22 
23 /* Offsets from gic.gicc_base */
24 #define GICC_CTLR		(0x000)
25 #define GICC_PMR		(0x004)
26 #define GICC_IAR		(0x00C)
27 #define GICC_EOIR		(0x010)
28 
29 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
30 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
31 #define GICD_CTLR_ENABLEGRP1S	(1 << 2)
32 #define GICC_CTLR_FIQEN		(1 << 3)
33 
34 /* Offsets from gic.gicd_base */
35 #define GICD_CTLR		(0x000)
36 #define GICD_TYPER		(0x004)
37 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
38 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
39 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
40 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
41 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
42 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
43 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
44 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
45 #define GICD_SGIR		(0xF00)
46 
47 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
48 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
49 
50 /* Number of Private Peripheral Interrupt */
51 #define NUM_PPI	32
52 
53 /* Number of Software Generated Interrupt */
54 #define NUM_SGI			16
55 
56 /* Number of Non-secure Software Generated Interrupt */
57 #define NUM_NS_SGI		8
58 
59 /* Number of interrupts in one register */
60 #define NUM_INTS_PER_REG	32
61 
62 /* Number of targets in one register */
63 #define NUM_TARGETS_PER_REG	4
64 
65 /* Accessors to access ITARGETSRn */
66 #define ITARGETSR_FIELD_BITS	8
67 #define ITARGETSR_FIELD_MASK	0xff
68 
69 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
70 #define GICC_IAR_IT_ID_MASK	0x3ff
71 #define GICC_IAR_CPU_ID_MASK	0x7
72 #define GICC_IAR_CPU_ID_SHIFT	10
73 
74 struct gic_data {
75 	vaddr_t gicc_base;
76 	vaddr_t gicd_base;
77 	size_t max_it;
78 	struct itr_chip chip;
79 };
80 
81 static struct gic_data gic_data __nex_bss;
82 
83 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
84 		       uint32_t prio);
85 static void gic_op_enable(struct itr_chip *chip, size_t it);
86 static void gic_op_disable(struct itr_chip *chip, size_t it);
87 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
88 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
89 			uint8_t cpu_mask);
90 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
91 			uint8_t cpu_mask);
92 
93 static const struct itr_ops gic_ops = {
94 	.add = gic_op_add,
95 	.enable = gic_op_enable,
96 	.disable = gic_op_disable,
97 	.raise_pi = gic_op_raise_pi,
98 	.raise_sgi = gic_op_raise_sgi,
99 	.set_affinity = gic_op_set_affinity,
100 };
101 DECLARE_KEEP_PAGER(gic_ops);
102 
103 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
104 {
105 	int i;
106 	uint32_t old_ctlr;
107 	size_t ret = 0;
108 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
109 			  GICD_TYPER_IT_LINES_NUM_MASK;
110 
111 	/*
112 	 * Probe which interrupt number is the largest.
113 	 */
114 #if defined(CFG_ARM_GICV3)
115 	old_ctlr = read_icc_ctlr();
116 	write_icc_ctlr(0);
117 #else
118 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
119 	io_write32(gicc_base + GICC_CTLR, 0);
120 #endif
121 	for (i = max_regs; i >= 0; i--) {
122 		uint32_t old_reg;
123 		uint32_t reg;
124 		int b;
125 
126 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
127 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
128 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
129 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
130 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
131 			if (BIT32(b) & reg) {
132 				ret = i * NUM_INTS_PER_REG + b;
133 				goto out;
134 			}
135 		}
136 	}
137 out:
138 #if defined(CFG_ARM_GICV3)
139 	write_icc_ctlr(old_ctlr);
140 #else
141 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
142 #endif
143 	return ret;
144 }
145 
146 void gic_cpu_init(void)
147 {
148 	struct gic_data *gd = &gic_data;
149 
150 #if defined(CFG_ARM_GICV3)
151 	assert(gd->gicd_base);
152 #else
153 	assert(gd->gicd_base && gd->gicc_base);
154 #endif
155 
156 	/* per-CPU interrupts config:
157 	 * ID0-ID7(SGI)   for Non-secure interrupts
158 	 * ID8-ID15(SGI)  for Secure interrupts.
159 	 * All PPI config as Non-secure interrupts.
160 	 */
161 	io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
162 
163 	/* Set the priority mask to permit Non-secure interrupts, and to
164 	 * allow the Non-secure world to adjust the priority mask itself
165 	 */
166 #if defined(CFG_ARM_GICV3)
167 	write_icc_pmr(0x80);
168 	write_icc_igrpen1(1);
169 #else
170 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
171 
172 	/* Enable GIC */
173 	io_write32(gd->gicc_base + GICC_CTLR,
174 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
175 		   GICC_CTLR_FIQEN);
176 #endif
177 }
178 
179 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
180 			  uint32_t *prio)
181 {
182 	int it_num = DT_INFO_INVALID_INTERRUPT;
183 
184 	if (type)
185 		*type = IRQ_TYPE_NONE;
186 
187 	if (prio)
188 		*prio = 0;
189 
190 	if (!properties || count < 2)
191 		return DT_INFO_INVALID_INTERRUPT;
192 
193 	it_num = fdt32_to_cpu(properties[1]);
194 
195 	switch (fdt32_to_cpu(properties[0])) {
196 	case 1:
197 		it_num += 16;
198 		break;
199 	case 0:
200 		it_num += 32;
201 		break;
202 	default:
203 		it_num = DT_INFO_INVALID_INTERRUPT;
204 	}
205 
206 	return it_num;
207 }
208 
209 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
210 {
211 	struct gic_data *gd = &gic_data;
212 	vaddr_t gicc_base = 0;
213 	vaddr_t gicd_base = 0;
214 
215 	assert(cpu_mmu_enabled());
216 
217 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
218 				    GIC_DIST_REG_SIZE);
219 	if (!gicd_base)
220 		panic();
221 
222 	if (!IS_ENABLED(CFG_ARM_GICV3)) {
223 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
224 					    GIC_CPU_REG_SIZE);
225 		if (!gicc_base)
226 			panic();
227 	}
228 
229 	gd->gicc_base = gicc_base;
230 	gd->gicd_base = gicd_base;
231 	gd->max_it = probe_max_it(gicc_base, gicd_base);
232 	gd->chip.ops = &gic_ops;
233 
234 	if (IS_ENABLED(CFG_DT))
235 		gd->chip.dt_get_irq = gic_dt_get_irq;
236 }
237 
238 void gic_init(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
239 {
240 	struct gic_data __maybe_unused *gd = &gic_data;
241 	size_t __maybe_unused n = 0;
242 
243 	gic_init_base_addr(gicc_base_pa, gicd_base_pa);
244 
245 	/* GIC configuration is initialized from TF-A when embedded */
246 #ifndef CFG_WITH_ARM_TRUSTED_FW
247 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
248 		/* Disable interrupts */
249 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
250 
251 		/* Make interrupts non-pending */
252 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
253 
254 		/* Mark interrupts non-secure */
255 		if (n == 0) {
256 			/* per-CPU inerrupts config:
257 			 * ID0-ID7(SGI)	  for Non-secure interrupts
258 			 * ID8-ID15(SGI)  for Secure interrupts.
259 			 * All PPI config as Non-secure interrupts.
260 			 */
261 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
262 		} else {
263 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
264 		}
265 	}
266 
267 	/* Set the priority mask to permit Non-secure interrupts, and to
268 	 * allow the Non-secure world to adjust the priority mask itself
269 	 */
270 #if defined(CFG_ARM_GICV3)
271 	write_icc_pmr(0x80);
272 	write_icc_igrpen1(1);
273 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
274 #else
275 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
276 
277 	/* Enable GIC */
278 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
279 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
280 	io_setbits32(gd->gicd_base + GICD_CTLR,
281 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
282 #endif
283 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
284 
285 	interrupt_main_init(&gic_data.chip);
286 }
287 
288 static void gic_it_add(struct gic_data *gd, size_t it)
289 {
290 	size_t idx = it / NUM_INTS_PER_REG;
291 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
292 
293 	assert(gd == &gic_data);
294 
295 	/* Disable the interrupt */
296 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
297 	/* Make it non-pending */
298 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
299 	/* Assign it to group0 */
300 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
301 #if defined(CFG_ARM_GICV3)
302 	/* Assign it to group1S */
303 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
304 #endif
305 }
306 
307 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
308 				uint8_t cpu_mask)
309 {
310 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
311 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
312 	uint32_t target, target_shift;
313 	vaddr_t itargetsr = gd->gicd_base +
314 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
315 
316 	assert(gd == &gic_data);
317 
318 	/* Assigned to group0 */
319 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
320 
321 	/* Route it to selected CPUs */
322 	target = io_read32(itargetsr);
323 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
324 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
325 	target |= cpu_mask << target_shift;
326 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
327 	io_write32(itargetsr, target);
328 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
329 }
330 
331 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
332 {
333 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
334 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
335 
336 	assert(gd == &gic_data);
337 
338 	/* Assigned to group0 */
339 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
340 
341 	/* Set prio it to selected CPUs */
342 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
343 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
344 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
345 }
346 
347 static void gic_it_enable(struct gic_data *gd, size_t it)
348 {
349 	size_t idx = it / NUM_INTS_PER_REG;
350 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
351 	vaddr_t base = gd->gicd_base;
352 
353 	assert(gd == &gic_data);
354 
355 	/* Assigned to group0 */
356 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
357 
358 	/* Enable the interrupt */
359 	io_write32(base + GICD_ISENABLER(idx), mask);
360 }
361 
362 static void gic_it_disable(struct gic_data *gd, size_t it)
363 {
364 	size_t idx = it / NUM_INTS_PER_REG;
365 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
366 
367 	assert(gd == &gic_data);
368 
369 	/* Assigned to group0 */
370 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
371 
372 	/* Disable the interrupt */
373 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
374 }
375 
376 static void gic_it_set_pending(struct gic_data *gd, size_t it)
377 {
378 	size_t idx = it / NUM_INTS_PER_REG;
379 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
380 
381 	assert(gd == &gic_data);
382 
383 	/* Should be Peripheral Interrupt */
384 	assert(it >= NUM_SGI);
385 
386 	/* Raise the interrupt */
387 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
388 }
389 
390 static void gic_it_raise_sgi(struct gic_data *gd, size_t it,
391 		uint8_t cpu_mask, uint8_t group)
392 {
393 	uint32_t mask_id = it & 0xf;
394 	uint32_t mask_group = group & 0x1;
395 	uint32_t mask_cpu = cpu_mask & 0xff;
396 	uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
397 		SHIFT_U32(mask_cpu, 16));
398 
399 	assert(gd == &gic_data);
400 
401 	/* Should be Software Generated Interrupt */
402 	assert(it < NUM_SGI);
403 
404 	/* Raise the interrupt */
405 	io_write32(gd->gicd_base + GICD_SGIR, mask);
406 }
407 
408 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
409 {
410 	assert(gd == &gic_data);
411 
412 #if defined(CFG_ARM_GICV3)
413 	return read_icc_iar1();
414 #else
415 	return io_read32(gd->gicc_base + GICC_IAR);
416 #endif
417 }
418 
419 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
420 {
421 	assert(gd == &gic_data);
422 
423 #if defined(CFG_ARM_GICV3)
424 	write_icc_eoir1(eoir);
425 #else
426 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
427 #endif
428 }
429 
430 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
431 {
432 	size_t idx = it / NUM_INTS_PER_REG;
433 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
434 
435 	assert(gd == &gic_data);
436 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
437 }
438 
439 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
440 {
441 	size_t idx = it / NUM_INTS_PER_REG;
442 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
443 
444 	assert(gd == &gic_data);
445 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
446 }
447 
448 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
449 {
450 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
451 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
452 				ITARGETSR_FIELD_BITS;
453 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
454 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
455 
456 	assert(gd == &gic_data);
457 	return (target & target_mask) >> target_shift;
458 }
459 
460 void gic_dump_state(void)
461 {
462 	struct gic_data *gd = &gic_data;
463 	int i = 0;
464 
465 #if defined(CFG_ARM_GICV3)
466 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
467 #else
468 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
469 #endif
470 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
471 
472 	for (i = 0; i <= (int)gd->max_it; i++) {
473 		if (gic_it_is_enabled(gd, i)) {
474 			DMSG("irq%d: enabled, group:%d, target:%x", i,
475 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
476 		}
477 	}
478 }
479 
480 static void __maybe_unused gic_native_itr_handler(void)
481 {
482 	struct gic_data *gd = &gic_data;
483 	uint32_t iar = 0;
484 	uint32_t id = 0;
485 
486 	iar = gic_read_iar(gd);
487 	id = iar & GICC_IAR_IT_ID_MASK;
488 
489 	if (id <= gd->max_it)
490 		itr_handle(id);
491 	else
492 		DMSG("ignoring interrupt %" PRIu32, id);
493 
494 	gic_write_eoir(gd, iar);
495 }
496 
497 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
498 /* Override interrupt_main_handler() with driver implementation */
499 void interrupt_main_handler(void)
500 {
501 	gic_native_itr_handler();
502 }
503 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
504 
505 static void gic_op_add(struct itr_chip *chip, size_t it,
506 		       uint32_t type __unused,
507 		       uint32_t prio __unused)
508 {
509 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
510 
511 	assert(gd == &gic_data);
512 
513 	if (it > gd->max_it)
514 		panic();
515 
516 	gic_it_add(gd, it);
517 	/* Set the CPU mask to deliver interrupts to any online core */
518 	gic_it_set_cpu_mask(gd, it, 0xff);
519 	gic_it_set_prio(gd, it, 0x1);
520 }
521 
522 static void gic_op_enable(struct itr_chip *chip, size_t it)
523 {
524 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
525 
526 	assert(gd == &gic_data);
527 
528 	if (it > gd->max_it)
529 		panic();
530 
531 	gic_it_enable(gd, it);
532 }
533 
534 static void gic_op_disable(struct itr_chip *chip, size_t it)
535 {
536 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
537 
538 	assert(gd == &gic_data);
539 
540 	if (it > gd->max_it)
541 		panic();
542 
543 	gic_it_disable(gd, it);
544 }
545 
546 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
547 {
548 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
549 
550 	assert(gd == &gic_data);
551 
552 	if (it > gd->max_it)
553 		panic();
554 
555 	gic_it_set_pending(gd, it);
556 }
557 
558 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
559 			uint8_t cpu_mask)
560 {
561 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
562 
563 	assert(gd == &gic_data);
564 
565 	if (it > gd->max_it)
566 		panic();
567 
568 	if (it < NUM_NS_SGI)
569 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
570 	else
571 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
572 }
573 
574 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
575 			uint8_t cpu_mask)
576 {
577 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
578 
579 	assert(gd == &gic_data);
580 
581 	if (it > gd->max_it)
582 		panic();
583 
584 	gic_it_set_cpu_mask(gd, it, cpu_mask);
585 }
586