xref: /optee_os/core/drivers/gic.c (revision 3f66fc74aa2726fabcf5f294831c0c5b88c717cb)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <drivers/gic.h>
10 #include <keep.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/panic.h>
13 #include <util.h>
14 #include <io.h>
15 #include <trace.h>
16 
17 /* Offsets from gic.gicc_base */
18 #define GICC_CTLR		(0x000)
19 #define GICC_PMR		(0x004)
20 #define GICC_IAR		(0x00C)
21 #define GICC_EOIR		(0x010)
22 
23 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
24 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
25 #define GICC_CTLR_FIQEN		(1 << 3)
26 
27 /* Offsets from gic.gicd_base */
28 #define GICD_CTLR		(0x000)
29 #define GICD_TYPER		(0x004)
30 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
31 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
32 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
33 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
34 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
35 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
36 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
37 #define GICD_SGIR		(0xF00)
38 
39 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
40 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
41 
42 /* Number of Private Peripheral Interrupt */
43 #define NUM_PPI	32
44 
45 /* Number of Software Generated Interrupt */
46 #define NUM_SGI			16
47 
48 /* Number of Non-secure Software Generated Interrupt */
49 #define NUM_NS_SGI		8
50 
51 /* Number of interrupts in one register */
52 #define NUM_INTS_PER_REG	32
53 
54 /* Number of targets in one register */
55 #define NUM_TARGETS_PER_REG	4
56 
57 /* Accessors to access ITARGETSRn */
58 #define ITARGETSR_FIELD_BITS	8
59 #define ITARGETSR_FIELD_MASK	0xff
60 
61 /* Maximum number of interrups a GIC can support */
62 #define GIC_MAX_INTS		1020
63 
64 #define GICC_IAR_IT_ID_MASK	0x3ff
65 #define GICC_IAR_CPU_ID_MASK	0x7
66 #define GICC_IAR_CPU_ID_SHIFT	10
67 
68 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t flags);
69 static void gic_op_enable(struct itr_chip *chip, size_t it);
70 static void gic_op_disable(struct itr_chip *chip, size_t it);
71 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
72 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
73 			uint8_t cpu_mask);
74 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
75 			uint8_t cpu_mask);
76 
77 static const struct itr_ops gic_ops = {
78 	.add = gic_op_add,
79 	.enable = gic_op_enable,
80 	.disable = gic_op_disable,
81 	.raise_pi = gic_op_raise_pi,
82 	.raise_sgi = gic_op_raise_sgi,
83 	.set_affinity = gic_op_set_affinity,
84 };
85 KEEP_PAGER(gic_ops);
86 
87 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
88 {
89 	int i;
90 	uint32_t old_ctlr;
91 	size_t ret = 0;
92 	const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) /
93 					NUM_INTS_PER_REG) - 1;
94 
95 	/*
96 	 * Probe which interrupt number is the largest.
97 	 */
98 #if defined(CFG_ARM_GICV3)
99 	old_ctlr = read_icc_ctlr();
100 	write_icc_ctlr(0);
101 #else
102 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
103 	io_write32(gicc_base + GICC_CTLR, 0);
104 #endif
105 	for (i = max_regs; i >= 0; i--) {
106 		uint32_t old_reg;
107 		uint32_t reg;
108 		int b;
109 
110 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
111 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
112 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
113 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
114 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
115 			if (BIT32(b) & reg) {
116 				ret = i * NUM_INTS_PER_REG + b;
117 				goto out;
118 			}
119 		}
120 	}
121 out:
122 #if defined(CFG_ARM_GICV3)
123 	write_icc_ctlr(old_ctlr);
124 #else
125 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
126 #endif
127 	return ret;
128 }
129 
130 void gic_cpu_init(struct gic_data *gd)
131 {
132 #if defined(CFG_ARM_GICV3)
133 	assert(gd->gicd_base);
134 #else
135 	assert(gd->gicd_base && gd->gicc_base);
136 #endif
137 
138 	/* per-CPU interrupts config:
139 	 * ID0-ID7(SGI)   for Non-secure interrupts
140 	 * ID8-ID15(SGI)  for Secure interrupts.
141 	 * All PPI config as Non-secure interrupts.
142 	 */
143 	io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
144 
145 	/* Set the priority mask to permit Non-secure interrupts, and to
146 	 * allow the Non-secure world to adjust the priority mask itself
147 	 */
148 #if defined(CFG_ARM_GICV3)
149 	write_icc_pmr(0x80);
150 	write_icc_ctlr(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
151 		       GICC_CTLR_FIQEN);
152 #else
153 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
154 
155 	/* Enable GIC */
156 	io_write32(gd->gicc_base + GICC_CTLR,
157 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
158 		   GICC_CTLR_FIQEN);
159 #endif
160 }
161 
162 void gic_init(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
163 	      vaddr_t gicd_base)
164 {
165 	size_t n;
166 
167 	gic_init_base_addr(gd, gicc_base, gicd_base);
168 
169 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
170 		/* Disable interrupts */
171 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
172 
173 		/* Make interrupts non-pending */
174 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
175 
176 		/* Mark interrupts non-secure */
177 		if (n == 0) {
178 			/* per-CPU inerrupts config:
179                          * ID0-ID7(SGI)   for Non-secure interrupts
180                          * ID8-ID15(SGI)  for Secure interrupts.
181                          * All PPI config as Non-secure interrupts.
182 			 */
183 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
184 		} else {
185 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
186 		}
187 	}
188 
189 	/* Set the priority mask to permit Non-secure interrupts, and to
190 	 * allow the Non-secure world to adjust the priority mask itself
191 	 */
192 #if defined(CFG_ARM_GICV3)
193 	write_icc_pmr(0x80);
194 	write_icc_ctlr(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
195 		       GICC_CTLR_FIQEN);
196 #else
197 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
198 
199 	/* Enable GIC */
200 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
201 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
202 #endif
203 	io_setbits32(gd->gicd_base + GICD_CTLR,
204 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
205 }
206 
207 void gic_init_base_addr(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
208 			vaddr_t gicd_base)
209 {
210 	gd->gicc_base = gicc_base;
211 	gd->gicd_base = gicd_base;
212 	gd->max_it = probe_max_it(gicc_base, gicd_base);
213 	gd->chip.ops = &gic_ops;
214 }
215 
216 static void gic_it_add(struct gic_data *gd, size_t it)
217 {
218 	size_t idx = it / NUM_INTS_PER_REG;
219 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
220 
221 	/* Disable the interrupt */
222 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
223 	/* Make it non-pending */
224 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
225 	/* Assign it to group0 */
226 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
227 }
228 
229 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
230 				uint8_t cpu_mask)
231 {
232 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
233 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
234 	uint32_t target, target_shift;
235 	vaddr_t itargetsr = gd->gicd_base +
236 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
237 
238 	/* Assigned to group0 */
239 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
240 
241 	/* Route it to selected CPUs */
242 	target = io_read32(itargetsr);
243 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
244 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
245 	target |= cpu_mask << target_shift;
246 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
247 	io_write32(itargetsr, target);
248 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
249 }
250 
251 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
252 {
253 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
254 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
255 
256 	/* Assigned to group0 */
257 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
258 
259 	/* Set prio it to selected CPUs */
260 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
261 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
262 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
263 }
264 
265 static void gic_it_enable(struct gic_data *gd, size_t it)
266 {
267 	size_t idx = it / NUM_INTS_PER_REG;
268 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
269 	vaddr_t base = gd->gicd_base;
270 
271 	/* Assigned to group0 */
272 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
273 	if (it >= NUM_SGI) {
274 		/*
275 		 * Not enabled yet, except Software Generated Interrupt
276 		 * which is implementation defined
277 		 */
278 		assert(!(io_read32(base + GICD_ISENABLER(idx)) & mask));
279 	}
280 
281 	/* Enable the interrupt */
282 	io_write32(base + GICD_ISENABLER(idx), mask);
283 }
284 
285 static void gic_it_disable(struct gic_data *gd, size_t it)
286 {
287 	size_t idx = it / NUM_INTS_PER_REG;
288 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
289 
290 	/* Assigned to group0 */
291 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
292 
293 	/* Disable the interrupt */
294 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
295 }
296 
297 static void gic_it_set_pending(struct gic_data *gd, size_t it)
298 {
299 	size_t idx = it / NUM_INTS_PER_REG;
300 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
301 
302 	/* Should be Peripheral Interrupt */
303 	assert(it >= NUM_SGI);
304 	/* Assigned to group0 */
305 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
306 
307 	/* Raise the interrupt */
308 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
309 }
310 
311 static void gic_it_raise_sgi(struct gic_data *gd, size_t it,
312 		uint8_t cpu_mask, uint8_t group)
313 {
314 	uint32_t mask_id = it & 0xf;
315 	uint32_t mask_group = group & 0x1;
316 	uint32_t mask_cpu = cpu_mask & 0xff;
317 	uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
318 		SHIFT_U32(mask_cpu, 16));
319 
320 	/* Should be Software Generated Interrupt */
321 	assert(it < NUM_SGI);
322 
323 	/* Raise the interrupt */
324 	io_write32(gd->gicd_base + GICD_SGIR, mask);
325 }
326 
327 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
328 {
329 #if defined(CFG_ARM_GICV3)
330 	return read_icc_iar1();
331 #else
332 	return io_read32(gd->gicc_base + GICC_IAR);
333 #endif
334 }
335 
336 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
337 {
338 #if defined(CFG_ARM_GICV3)
339 	write_icc_eoir1(eoir);
340 #else
341 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
342 #endif
343 }
344 
345 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
346 {
347 	size_t idx = it / NUM_INTS_PER_REG;
348 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
349 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
350 }
351 
352 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
353 {
354 	size_t idx = it / NUM_INTS_PER_REG;
355 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
356 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
357 }
358 
359 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
360 {
361 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
362 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
363 				ITARGETSR_FIELD_BITS;
364 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
365 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
366 
367 	return (target & target_mask) >> target_shift;
368 }
369 
370 void gic_dump_state(struct gic_data *gd)
371 {
372 	int i;
373 
374 #if defined(CFG_ARM_GICV3)
375 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
376 #else
377 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
378 #endif
379 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
380 
381 	for (i = 0; i < (int)gd->max_it; i++) {
382 		if (gic_it_is_enabled(gd, i)) {
383 			DMSG("irq%d: enabled, group:%d, target:%x", i,
384 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
385 		}
386 	}
387 }
388 
389 void gic_it_handle(struct gic_data *gd)
390 {
391 	uint32_t iar;
392 	uint32_t id;
393 
394 	iar = gic_read_iar(gd);
395 	id = iar & GICC_IAR_IT_ID_MASK;
396 
397 	if (id < gd->max_it)
398 		itr_handle(id);
399 	else
400 		DMSG("ignoring interrupt %" PRIu32, id);
401 
402 	gic_write_eoir(gd, iar);
403 }
404 
405 static void gic_op_add(struct itr_chip *chip, size_t it,
406 		       uint32_t flags __unused)
407 {
408 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
409 
410 	if (it >= gd->max_it)
411 		panic();
412 
413 	gic_it_add(gd, it);
414 	/* Set the CPU mask to deliver interrupts to any online core */
415 	gic_it_set_cpu_mask(gd, it, 0xff);
416 	gic_it_set_prio(gd, it, 0x1);
417 }
418 
419 static void gic_op_enable(struct itr_chip *chip, size_t it)
420 {
421 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
422 
423 	if (it >= gd->max_it)
424 		panic();
425 
426 	gic_it_enable(gd, it);
427 }
428 
429 static void gic_op_disable(struct itr_chip *chip, size_t it)
430 {
431 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
432 
433 	if (it >= gd->max_it)
434 		panic();
435 
436 	gic_it_disable(gd, it);
437 }
438 
439 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
440 {
441 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
442 
443 	if (it >= gd->max_it)
444 		panic();
445 
446 	gic_it_set_pending(gd, it);
447 }
448 
449 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
450 			uint8_t cpu_mask)
451 {
452 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
453 
454 	if (it >= gd->max_it)
455 		panic();
456 
457 	if (it < NUM_NS_SGI)
458 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
459 	else
460 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
461 }
462 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
463 			uint8_t cpu_mask)
464 {
465 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
466 
467 	if (it >= gd->max_it)
468 		panic();
469 
470 	gic_it_set_cpu_mask(gd, it, cpu_mask);
471 }
472