xref: /optee_os/core/drivers/gic.c (revision 817466cb476de705a8e3dabe1ef165fe27a18c2f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <drivers/gic.h>
10 #include <kernel/interrupt.h>
11 #include <kernel/panic.h>
12 #include <util.h>
13 #include <io.h>
14 #include <trace.h>
15 
16 /* Offsets from gic.gicc_base */
17 #define GICC_CTLR		(0x000)
18 #define GICC_PMR		(0x004)
19 #define GICC_IAR		(0x00C)
20 #define GICC_EOIR		(0x010)
21 
22 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
23 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
24 #define GICC_CTLR_FIQEN		(1 << 3)
25 
26 /* Offsets from gic.gicd_base */
27 #define GICD_CTLR		(0x000)
28 #define GICD_TYPER		(0x004)
29 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
30 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
31 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
32 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
33 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
34 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
35 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
36 #define GICD_SGIR		(0xF00)
37 
38 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
39 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
40 
41 /* Number of Private Peripheral Interrupt */
42 #define NUM_PPI	32
43 
44 /* Number of Software Generated Interrupt */
45 #define NUM_SGI			16
46 
47 /* Number of Non-secure Software Generated Interrupt */
48 #define NUM_NS_SGI		8
49 
50 /* Number of interrupts in one register */
51 #define NUM_INTS_PER_REG	32
52 
53 /* Number of targets in one register */
54 #define NUM_TARGETS_PER_REG	4
55 
56 /* Accessors to access ITARGETSRn */
57 #define ITARGETSR_FIELD_BITS	8
58 #define ITARGETSR_FIELD_MASK	0xff
59 
60 /* Maximum number of interrups a GIC can support */
61 #define GIC_MAX_INTS		1020
62 
63 #define GICC_IAR_IT_ID_MASK	0x3ff
64 #define GICC_IAR_CPU_ID_MASK	0x7
65 #define GICC_IAR_CPU_ID_SHIFT	10
66 
67 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t flags);
68 static void gic_op_enable(struct itr_chip *chip, size_t it);
69 static void gic_op_disable(struct itr_chip *chip, size_t it);
70 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
71 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
72 			uint8_t cpu_mask);
73 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
74 			uint8_t cpu_mask);
75 
76 static const struct itr_ops gic_ops = {
77 	.add = gic_op_add,
78 	.enable = gic_op_enable,
79 	.disable = gic_op_disable,
80 	.raise_pi = gic_op_raise_pi,
81 	.raise_sgi = gic_op_raise_sgi,
82 	.set_affinity = gic_op_set_affinity,
83 };
84 
85 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
86 {
87 	int i;
88 	uint32_t old_ctlr;
89 	size_t ret = 0;
90 	const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) /
91 					NUM_INTS_PER_REG) - 1;
92 
93 	/*
94 	 * Probe which interrupt number is the largest.
95 	 */
96 #if defined(CFG_ARM_GICV3)
97 	old_ctlr = read_icc_ctlr();
98 	write_icc_ctlr(0);
99 #else
100 	old_ctlr = read32(gicc_base + GICC_CTLR);
101 	write32(0, gicc_base + GICC_CTLR);
102 #endif
103 	for (i = max_regs; i >= 0; i--) {
104 		uint32_t old_reg;
105 		uint32_t reg;
106 		int b;
107 
108 		old_reg = read32(gicd_base + GICD_ISENABLER(i));
109 		write32(0xffffffff, gicd_base + GICD_ISENABLER(i));
110 		reg = read32(gicd_base + GICD_ISENABLER(i));
111 		write32(old_reg, gicd_base + GICD_ICENABLER(i));
112 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
113 			if (BIT32(b) & reg) {
114 				ret = i * NUM_INTS_PER_REG + b;
115 				goto out;
116 			}
117 		}
118 	}
119 out:
120 #if defined(CFG_ARM_GICV3)
121 	write_icc_ctlr(old_ctlr);
122 #else
123 	write32(old_ctlr, gicc_base + GICC_CTLR);
124 #endif
125 	return ret;
126 }
127 
128 void gic_cpu_init(struct gic_data *gd)
129 {
130 #if defined(CFG_ARM_GICV3)
131 	assert(gd->gicd_base);
132 #else
133 	assert(gd->gicd_base && gd->gicc_base);
134 #endif
135 
136 	/* per-CPU interrupts config:
137 	 * ID0-ID7(SGI)   for Non-secure interrupts
138 	 * ID8-ID15(SGI)  for Secure interrupts.
139 	 * All PPI config as Non-secure interrupts.
140 	 */
141 	write32(0xffff00ff, gd->gicd_base + GICD_IGROUPR(0));
142 
143 	/* Set the priority mask to permit Non-secure interrupts, and to
144 	 * allow the Non-secure world to adjust the priority mask itself
145 	 */
146 #if defined(CFG_ARM_GICV3)
147 	write_icc_pmr(0x80);
148 	write_icc_ctlr(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
149 		       GICC_CTLR_FIQEN);
150 #else
151 	write32(0x80, gd->gicc_base + GICC_PMR);
152 
153 	/* Enable GIC */
154 	write32(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN,
155 		gd->gicc_base + GICC_CTLR);
156 #endif
157 }
158 
159 void gic_init(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
160 	      vaddr_t gicd_base)
161 {
162 	size_t n;
163 
164 	gic_init_base_addr(gd, gicc_base, gicd_base);
165 
166 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
167 		/* Disable interrupts */
168 		write32(0xffffffff, gd->gicd_base + GICD_ICENABLER(n));
169 
170 		/* Make interrupts non-pending */
171 		write32(0xffffffff, gd->gicd_base + GICD_ICPENDR(n));
172 
173 		/* Mark interrupts non-secure */
174 		if (n == 0) {
175 			/* per-CPU inerrupts config:
176                          * ID0-ID7(SGI)   for Non-secure interrupts
177                          * ID8-ID15(SGI)  for Secure interrupts.
178                          * All PPI config as Non-secure interrupts.
179 			 */
180 			write32(0xffff00ff, gd->gicd_base + GICD_IGROUPR(n));
181 		} else {
182 			write32(0xffffffff, gd->gicd_base + GICD_IGROUPR(n));
183 		}
184 	}
185 
186 	/* Set the priority mask to permit Non-secure interrupts, and to
187 	 * allow the Non-secure world to adjust the priority mask itself
188 	 */
189 #if defined(CFG_ARM_GICV3)
190 	write_icc_pmr(0x80);
191 	write_icc_ctlr(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
192 		       GICC_CTLR_FIQEN);
193 #else
194 	write32(0x80, gd->gicc_base + GICC_PMR);
195 
196 	/* Enable GIC */
197 	write32(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN,
198 		gd->gicc_base + GICC_CTLR);
199 #endif
200 	write32(read32(gd->gicd_base + GICD_CTLR) | GICD_CTLR_ENABLEGRP0 |
201 		GICD_CTLR_ENABLEGRP1, gd->gicd_base + GICD_CTLR);
202 }
203 
204 void gic_init_base_addr(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
205 			vaddr_t gicd_base)
206 {
207 	gd->gicc_base = gicc_base;
208 	gd->gicd_base = gicd_base;
209 	gd->max_it = probe_max_it(gicc_base, gicd_base);
210 	gd->chip.ops = &gic_ops;
211 }
212 
213 static void gic_it_add(struct gic_data *gd, size_t it)
214 {
215 	size_t idx = it / NUM_INTS_PER_REG;
216 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
217 
218 	/* Disable the interrupt */
219 	write32(mask, gd->gicd_base + GICD_ICENABLER(idx));
220 	/* Make it non-pending */
221 	write32(mask, gd->gicd_base + GICD_ICPENDR(idx));
222 	/* Assign it to group0 */
223 	write32(read32(gd->gicd_base + GICD_IGROUPR(idx)) & ~mask,
224 			gd->gicd_base + GICD_IGROUPR(idx));
225 }
226 
227 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
228 				uint8_t cpu_mask)
229 {
230 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
231 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
232 	uint32_t target, target_shift;
233 
234 	/* Assigned to group0 */
235 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
236 
237 	/* Route it to selected CPUs */
238 	target = read32(gd->gicd_base +
239 			GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
240 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
241 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
242 	target |= cpu_mask << target_shift;
243 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA,
244 	     target, gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
245 	write32(target,
246 		gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
247 	DMSG("cpu_mask: 0x%x\n",
248 	     read32(gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)));
249 }
250 
251 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
252 {
253 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
254 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
255 
256 	/* Assigned to group0 */
257 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
258 
259 	/* Set prio it to selected CPUs */
260 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
261 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
262 	write8(prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
263 }
264 
265 static void gic_it_enable(struct gic_data *gd, size_t it)
266 {
267 	size_t idx = it / NUM_INTS_PER_REG;
268 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
269 
270 	/* Assigned to group0 */
271 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
272 	if (it >= NUM_SGI) {
273 		/*
274 		 * Not enabled yet, except Software Generated Interrupt
275 		 * which is implementation defined
276 		 */
277 		assert(!(read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask));
278 	}
279 
280 	/* Enable the interrupt */
281 	write32(mask, gd->gicd_base + GICD_ISENABLER(idx));
282 }
283 
284 static void gic_it_disable(struct gic_data *gd, size_t it)
285 {
286 	size_t idx = it / NUM_INTS_PER_REG;
287 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
288 
289 	/* Assigned to group0 */
290 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
291 
292 	/* Disable the interrupt */
293 	write32(mask, gd->gicd_base + GICD_ICENABLER(idx));
294 }
295 
296 static void gic_it_set_pending(struct gic_data *gd, size_t it)
297 {
298 	size_t idx = it / NUM_INTS_PER_REG;
299 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
300 
301 	/* Should be Peripheral Interrupt */
302 	assert(it >= NUM_SGI);
303 	/* Assigned to group0 */
304 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
305 
306 	/* Raise the interrupt */
307 	write32(mask, gd->gicd_base + GICD_ISPENDR(idx));
308 }
309 
310 static void gic_it_raise_sgi(struct gic_data *gd, size_t it,
311 		uint8_t cpu_mask, uint8_t group)
312 {
313 	uint32_t mask_id = it & 0xf;
314 	uint32_t mask_group = group & 0x1;
315 	uint32_t mask_cpu = cpu_mask & 0xff;
316 	uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
317 		SHIFT_U32(mask_cpu, 16));
318 
319 	/* Should be Software Generated Interrupt */
320 	assert(it < NUM_SGI);
321 
322 	/* Raise the interrupt */
323 	write32(mask, gd->gicd_base + GICD_SGIR);
324 }
325 
326 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
327 {
328 #if defined(CFG_ARM_GICV3)
329 	return read_icc_iar0();
330 #else
331 	return read32(gd->gicc_base + GICC_IAR);
332 #endif
333 }
334 
335 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
336 {
337 #if defined(CFG_ARM_GICV3)
338 	write_icc_eoir0(eoir);
339 #else
340 	write32(eoir, gd->gicc_base + GICC_EOIR);
341 #endif
342 }
343 
344 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
345 {
346 	size_t idx = it / NUM_INTS_PER_REG;
347 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
348 	return !!(read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
349 }
350 
351 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
352 {
353 	size_t idx = it / NUM_INTS_PER_REG;
354 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
355 	return !!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
356 }
357 
358 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
359 {
360 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
361 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
362 				ITARGETSR_FIELD_BITS;
363 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
364 	uint32_t target =
365 		read32(gd->gicd_base + GICD_ITARGETSR(reg_idx)) & target_mask;
366 
367 	target = target >> target_shift;
368 	return target;
369 }
370 
371 void gic_dump_state(struct gic_data *gd)
372 {
373 	int i;
374 
375 #if defined(CFG_ARM_GICV3)
376 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
377 #else
378 	DMSG("GICC_CTLR: 0x%x", read32(gd->gicc_base + GICC_CTLR));
379 #endif
380 	DMSG("GICD_CTLR: 0x%x", read32(gd->gicd_base + GICD_CTLR));
381 
382 	for (i = 0; i < (int)gd->max_it; i++) {
383 		if (gic_it_is_enabled(gd, i)) {
384 			DMSG("irq%d: enabled, group:%d, target:%x", i,
385 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
386 		}
387 	}
388 }
389 
390 void gic_it_handle(struct gic_data *gd)
391 {
392 	uint32_t iar;
393 	uint32_t id;
394 
395 	iar = gic_read_iar(gd);
396 	id = iar & GICC_IAR_IT_ID_MASK;
397 
398 	if (id < gd->max_it)
399 		itr_handle(id);
400 	else
401 		DMSG("ignoring interrupt %" PRIu32, id);
402 
403 	gic_write_eoir(gd, iar);
404 }
405 
406 static void gic_op_add(struct itr_chip *chip, size_t it,
407 		       uint32_t flags __unused)
408 {
409 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
410 
411 	if (it >= gd->max_it)
412 		panic();
413 
414 	gic_it_add(gd, it);
415 	/* Set the CPU mask to deliver interrupts to any online core */
416 	gic_it_set_cpu_mask(gd, it, 0xff);
417 	gic_it_set_prio(gd, it, 0x1);
418 }
419 
420 static void gic_op_enable(struct itr_chip *chip, size_t it)
421 {
422 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
423 
424 	if (it >= gd->max_it)
425 		panic();
426 
427 	gic_it_enable(gd, it);
428 }
429 
430 static void gic_op_disable(struct itr_chip *chip, size_t it)
431 {
432 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
433 
434 	if (it >= gd->max_it)
435 		panic();
436 
437 	gic_it_disable(gd, it);
438 }
439 
440 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
441 {
442 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
443 
444 	if (it >= gd->max_it)
445 		panic();
446 
447 	gic_it_set_pending(gd, it);
448 }
449 
450 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
451 			uint8_t cpu_mask)
452 {
453 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
454 
455 	if (it >= gd->max_it)
456 		panic();
457 
458 	if (it < NUM_NS_SGI)
459 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
460 	else
461 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
462 }
463 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
464 			uint8_t cpu_mask)
465 {
466 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
467 
468 	if (it >= gd->max_it)
469 		panic();
470 
471 	gic_it_set_cpu_mask(gd, it, cpu_mask);
472 }
473