xref: /optee_os/core/drivers/gic.c (revision 77bdbf67c42209142ef43129e01113d29d9c62f6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <config.h>
10 #include <drivers/gic.h>
11 #include <keep.h>
12 #include <kernel/dt.h>
13 #include <kernel/interrupt.h>
14 #include <kernel/panic.h>
15 #include <libfdt.h>
16 #include <util.h>
17 #include <io.h>
18 #include <trace.h>
19 
20 /* Offsets from gic.gicc_base */
21 #define GICC_CTLR		(0x000)
22 #define GICC_PMR		(0x004)
23 #define GICC_IAR		(0x00C)
24 #define GICC_EOIR		(0x010)
25 
26 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
27 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
28 #define GICD_CTLR_ENABLEGRP1S	(1 << 2)
29 #define GICC_CTLR_FIQEN		(1 << 3)
30 
31 /* Offsets from gic.gicd_base */
32 #define GICD_CTLR		(0x000)
33 #define GICD_TYPER		(0x004)
34 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
35 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
36 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
37 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
38 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
39 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
40 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
41 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
42 #define GICD_SGIR		(0xF00)
43 
44 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
45 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
46 
47 /* Number of Private Peripheral Interrupt */
48 #define NUM_PPI	32
49 
50 /* Number of Software Generated Interrupt */
51 #define NUM_SGI			16
52 
53 /* Number of Non-secure Software Generated Interrupt */
54 #define NUM_NS_SGI		8
55 
56 /* Number of interrupts in one register */
57 #define NUM_INTS_PER_REG	32
58 
59 /* Number of targets in one register */
60 #define NUM_TARGETS_PER_REG	4
61 
62 /* Accessors to access ITARGETSRn */
63 #define ITARGETSR_FIELD_BITS	8
64 #define ITARGETSR_FIELD_MASK	0xff
65 
66 /* Maximum number of interrups a GIC can support */
67 #define GIC_MAX_INTS		1020
68 
69 #define GICC_IAR_IT_ID_MASK	0x3ff
70 #define GICC_IAR_CPU_ID_MASK	0x7
71 #define GICC_IAR_CPU_ID_SHIFT	10
72 
73 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t flags);
74 static void gic_op_enable(struct itr_chip *chip, size_t it);
75 static void gic_op_disable(struct itr_chip *chip, size_t it);
76 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
77 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
78 			uint8_t cpu_mask);
79 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
80 			uint8_t cpu_mask);
81 
82 static const struct itr_ops gic_ops = {
83 	.add = gic_op_add,
84 	.enable = gic_op_enable,
85 	.disable = gic_op_disable,
86 	.raise_pi = gic_op_raise_pi,
87 	.raise_sgi = gic_op_raise_sgi,
88 	.set_affinity = gic_op_set_affinity,
89 };
90 DECLARE_KEEP_PAGER(gic_ops);
91 
92 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
93 {
94 	int i;
95 	uint32_t old_ctlr;
96 	size_t ret = 0;
97 	const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) /
98 					NUM_INTS_PER_REG) - 1;
99 
100 	/*
101 	 * Probe which interrupt number is the largest.
102 	 */
103 #if defined(CFG_ARM_GICV3)
104 	old_ctlr = read_icc_ctlr();
105 	write_icc_ctlr(0);
106 #else
107 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
108 	io_write32(gicc_base + GICC_CTLR, 0);
109 #endif
110 	for (i = max_regs; i >= 0; i--) {
111 		uint32_t old_reg;
112 		uint32_t reg;
113 		int b;
114 
115 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
116 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
117 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
118 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
119 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
120 			if (BIT32(b) & reg) {
121 				ret = i * NUM_INTS_PER_REG + b;
122 				goto out;
123 			}
124 		}
125 	}
126 out:
127 #if defined(CFG_ARM_GICV3)
128 	write_icc_ctlr(old_ctlr);
129 #else
130 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
131 #endif
132 	return ret;
133 }
134 
135 void gic_cpu_init(struct gic_data *gd)
136 {
137 #if defined(CFG_ARM_GICV3)
138 	assert(gd->gicd_base);
139 #else
140 	assert(gd->gicd_base && gd->gicc_base);
141 #endif
142 
143 	/* per-CPU interrupts config:
144 	 * ID0-ID7(SGI)   for Non-secure interrupts
145 	 * ID8-ID15(SGI)  for Secure interrupts.
146 	 * All PPI config as Non-secure interrupts.
147 	 */
148 	io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
149 
150 	/* Set the priority mask to permit Non-secure interrupts, and to
151 	 * allow the Non-secure world to adjust the priority mask itself
152 	 */
153 #if defined(CFG_ARM_GICV3)
154 	write_icc_pmr(0x80);
155 	write_icc_igrpen1(1);
156 #else
157 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
158 
159 	/* Enable GIC */
160 	io_write32(gd->gicc_base + GICC_CTLR,
161 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
162 		   GICC_CTLR_FIQEN);
163 #endif
164 }
165 
166 void gic_init(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
167 	      vaddr_t gicd_base)
168 {
169 	size_t n;
170 
171 	gic_init_base_addr(gd, gicc_base, gicd_base);
172 
173 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
174 		/* Disable interrupts */
175 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
176 
177 		/* Make interrupts non-pending */
178 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
179 
180 		/* Mark interrupts non-secure */
181 		if (n == 0) {
182 			/* per-CPU inerrupts config:
183                          * ID0-ID7(SGI)   for Non-secure interrupts
184                          * ID8-ID15(SGI)  for Secure interrupts.
185                          * All PPI config as Non-secure interrupts.
186 			 */
187 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
188 		} else {
189 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
190 		}
191 	}
192 
193 	/* Set the priority mask to permit Non-secure interrupts, and to
194 	 * allow the Non-secure world to adjust the priority mask itself
195 	 */
196 #if defined(CFG_ARM_GICV3)
197 	write_icc_pmr(0x80);
198 	write_icc_igrpen1(1);
199 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
200 #else
201 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
202 
203 	/* Enable GIC */
204 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
205 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
206 	io_setbits32(gd->gicd_base + GICD_CTLR,
207 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
208 #endif
209 }
210 
211 static int gic_dt_get_irq(const uint32_t *properties, int len)
212 {
213 	int it_num = DT_INFO_INVALID_INTERRUPT;
214 
215 	if (!properties || len < 2)
216 		return DT_INFO_INVALID_INTERRUPT;
217 
218 	it_num = fdt32_to_cpu(properties[1]);
219 
220 	switch (fdt32_to_cpu(properties[0])) {
221 	case 1:
222 		it_num += 16;
223 		break;
224 	case 0:
225 		it_num += 32;
226 		break;
227 	default:
228 		it_num = DT_INFO_INVALID_INTERRUPT;
229 	}
230 
231 	return it_num;
232 }
233 
234 void gic_init_base_addr(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
235 			vaddr_t gicd_base)
236 {
237 	gd->gicc_base = gicc_base;
238 	gd->gicd_base = gicd_base;
239 	gd->max_it = probe_max_it(gicc_base, gicd_base);
240 	gd->chip.ops = &gic_ops;
241 
242 	if (IS_ENABLED(CFG_DT))
243 		gd->chip.dt_get_irq = gic_dt_get_irq;
244 }
245 
246 static void gic_it_add(struct gic_data *gd, size_t it)
247 {
248 	size_t idx = it / NUM_INTS_PER_REG;
249 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
250 
251 	/* Disable the interrupt */
252 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
253 	/* Make it non-pending */
254 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
255 	/* Assign it to group0 */
256 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
257 #if defined(CFG_ARM_GICV3)
258 	/* Assign it to group1S */
259 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
260 #endif
261 }
262 
263 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
264 				uint8_t cpu_mask)
265 {
266 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
267 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
268 	uint32_t target, target_shift;
269 	vaddr_t itargetsr = gd->gicd_base +
270 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
271 
272 	/* Assigned to group0 */
273 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
274 
275 	/* Route it to selected CPUs */
276 	target = io_read32(itargetsr);
277 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
278 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
279 	target |= cpu_mask << target_shift;
280 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
281 	io_write32(itargetsr, target);
282 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
283 }
284 
285 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
286 {
287 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
288 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
289 
290 	/* Assigned to group0 */
291 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
292 
293 	/* Set prio it to selected CPUs */
294 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
295 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
296 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
297 }
298 
299 static void gic_it_enable(struct gic_data *gd, size_t it)
300 {
301 	size_t idx = it / NUM_INTS_PER_REG;
302 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
303 	vaddr_t base = gd->gicd_base;
304 
305 	/* Assigned to group0 */
306 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
307 	if (it >= NUM_SGI) {
308 		/*
309 		 * Not enabled yet, except Software Generated Interrupt
310 		 * which is implementation defined
311 		 */
312 		assert(!(io_read32(base + GICD_ISENABLER(idx)) & mask));
313 	}
314 
315 	/* Enable the interrupt */
316 	io_write32(base + GICD_ISENABLER(idx), mask);
317 }
318 
319 static void gic_it_disable(struct gic_data *gd, size_t it)
320 {
321 	size_t idx = it / NUM_INTS_PER_REG;
322 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
323 
324 	/* Assigned to group0 */
325 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
326 
327 	/* Disable the interrupt */
328 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
329 }
330 
331 static void gic_it_set_pending(struct gic_data *gd, size_t it)
332 {
333 	size_t idx = it / NUM_INTS_PER_REG;
334 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
335 
336 	/* Should be Peripheral Interrupt */
337 	assert(it >= NUM_SGI);
338 
339 	/* Raise the interrupt */
340 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
341 }
342 
343 static void gic_it_raise_sgi(struct gic_data *gd, size_t it,
344 		uint8_t cpu_mask, uint8_t group)
345 {
346 	uint32_t mask_id = it & 0xf;
347 	uint32_t mask_group = group & 0x1;
348 	uint32_t mask_cpu = cpu_mask & 0xff;
349 	uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
350 		SHIFT_U32(mask_cpu, 16));
351 
352 	/* Should be Software Generated Interrupt */
353 	assert(it < NUM_SGI);
354 
355 	/* Raise the interrupt */
356 	io_write32(gd->gicd_base + GICD_SGIR, mask);
357 }
358 
359 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
360 {
361 #if defined(CFG_ARM_GICV3)
362 	return read_icc_iar1();
363 #else
364 	return io_read32(gd->gicc_base + GICC_IAR);
365 #endif
366 }
367 
368 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
369 {
370 #if defined(CFG_ARM_GICV3)
371 	write_icc_eoir1(eoir);
372 #else
373 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
374 #endif
375 }
376 
377 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
378 {
379 	size_t idx = it / NUM_INTS_PER_REG;
380 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
381 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
382 }
383 
384 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
385 {
386 	size_t idx = it / NUM_INTS_PER_REG;
387 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
388 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
389 }
390 
391 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
392 {
393 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
394 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
395 				ITARGETSR_FIELD_BITS;
396 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
397 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
398 
399 	return (target & target_mask) >> target_shift;
400 }
401 
402 void gic_dump_state(struct gic_data *gd)
403 {
404 	int i;
405 
406 #if defined(CFG_ARM_GICV3)
407 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
408 #else
409 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
410 #endif
411 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
412 
413 	for (i = 0; i <= (int)gd->max_it; i++) {
414 		if (gic_it_is_enabled(gd, i)) {
415 			DMSG("irq%d: enabled, group:%d, target:%x", i,
416 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
417 		}
418 	}
419 }
420 
421 void gic_it_handle(struct gic_data *gd)
422 {
423 	uint32_t iar;
424 	uint32_t id;
425 
426 	iar = gic_read_iar(gd);
427 	id = iar & GICC_IAR_IT_ID_MASK;
428 
429 	if (id <= gd->max_it)
430 		itr_handle(id);
431 	else
432 		DMSG("ignoring interrupt %" PRIu32, id);
433 
434 	gic_write_eoir(gd, iar);
435 }
436 
437 static void gic_op_add(struct itr_chip *chip, size_t it,
438 		       uint32_t flags __unused)
439 {
440 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
441 
442 	if (it > gd->max_it)
443 		panic();
444 
445 	gic_it_add(gd, it);
446 	/* Set the CPU mask to deliver interrupts to any online core */
447 	gic_it_set_cpu_mask(gd, it, 0xff);
448 	gic_it_set_prio(gd, it, 0x1);
449 }
450 
451 static void gic_op_enable(struct itr_chip *chip, size_t it)
452 {
453 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
454 
455 	if (it > gd->max_it)
456 		panic();
457 
458 	gic_it_enable(gd, it);
459 }
460 
461 static void gic_op_disable(struct itr_chip *chip, size_t it)
462 {
463 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
464 
465 	if (it > gd->max_it)
466 		panic();
467 
468 	gic_it_disable(gd, it);
469 }
470 
471 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
472 {
473 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
474 
475 	if (it > gd->max_it)
476 		panic();
477 
478 	gic_it_set_pending(gd, it);
479 }
480 
481 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
482 			uint8_t cpu_mask)
483 {
484 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
485 
486 	if (it > gd->max_it)
487 		panic();
488 
489 	if (it < NUM_NS_SGI)
490 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
491 	else
492 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
493 }
494 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
495 			uint8_t cpu_mask)
496 {
497 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
498 
499 	if (it > gd->max_it)
500 		panic();
501 
502 	gic_it_set_cpu_mask(gd, it, cpu_mask);
503 }
504