xref: /optee_os/core/drivers/gic.c (revision 5a913ee74d3c71af2a2860ce8a4e7aeab2916f9b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <drivers/gic.h>
10 #include <keep.h>
11 #include <kernel/interrupt.h>
12 #include <kernel/panic.h>
13 #include <util.h>
14 #include <io.h>
15 #include <trace.h>
16 
17 /* Offsets from gic.gicc_base */
18 #define GICC_CTLR		(0x000)
19 #define GICC_PMR		(0x004)
20 #define GICC_IAR		(0x00C)
21 #define GICC_EOIR		(0x010)
22 
23 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
24 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
25 #define GICD_CTLR_ENABLEGRP1S	(1 << 2)
26 #define GICC_CTLR_FIQEN		(1 << 3)
27 
28 /* Offsets from gic.gicd_base */
29 #define GICD_CTLR		(0x000)
30 #define GICD_TYPER		(0x004)
31 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
32 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
33 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
34 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
35 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
36 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
37 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
38 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
39 #define GICD_SGIR		(0xF00)
40 
41 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
42 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
43 
44 /* Number of Private Peripheral Interrupt */
45 #define NUM_PPI	32
46 
47 /* Number of Software Generated Interrupt */
48 #define NUM_SGI			16
49 
50 /* Number of Non-secure Software Generated Interrupt */
51 #define NUM_NS_SGI		8
52 
53 /* Number of interrupts in one register */
54 #define NUM_INTS_PER_REG	32
55 
56 /* Number of targets in one register */
57 #define NUM_TARGETS_PER_REG	4
58 
59 /* Accessors to access ITARGETSRn */
60 #define ITARGETSR_FIELD_BITS	8
61 #define ITARGETSR_FIELD_MASK	0xff
62 
63 /* Maximum number of interrups a GIC can support */
64 #define GIC_MAX_INTS		1020
65 
66 #define GICC_IAR_IT_ID_MASK	0x3ff
67 #define GICC_IAR_CPU_ID_MASK	0x7
68 #define GICC_IAR_CPU_ID_SHIFT	10
69 
70 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t flags);
71 static void gic_op_enable(struct itr_chip *chip, size_t it);
72 static void gic_op_disable(struct itr_chip *chip, size_t it);
73 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
74 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
75 			uint8_t cpu_mask);
76 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
77 			uint8_t cpu_mask);
78 
79 static const struct itr_ops gic_ops = {
80 	.add = gic_op_add,
81 	.enable = gic_op_enable,
82 	.disable = gic_op_disable,
83 	.raise_pi = gic_op_raise_pi,
84 	.raise_sgi = gic_op_raise_sgi,
85 	.set_affinity = gic_op_set_affinity,
86 };
87 KEEP_PAGER(gic_ops);
88 
89 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
90 {
91 	int i;
92 	uint32_t old_ctlr;
93 	size_t ret = 0;
94 	const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) /
95 					NUM_INTS_PER_REG) - 1;
96 
97 	/*
98 	 * Probe which interrupt number is the largest.
99 	 */
100 #if defined(CFG_ARM_GICV3)
101 	old_ctlr = read_icc_ctlr();
102 	write_icc_ctlr(0);
103 #else
104 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
105 	io_write32(gicc_base + GICC_CTLR, 0);
106 #endif
107 	for (i = max_regs; i >= 0; i--) {
108 		uint32_t old_reg;
109 		uint32_t reg;
110 		int b;
111 
112 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
113 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
114 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
115 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
116 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
117 			if (BIT32(b) & reg) {
118 				ret = i * NUM_INTS_PER_REG + b;
119 				goto out;
120 			}
121 		}
122 	}
123 out:
124 #if defined(CFG_ARM_GICV3)
125 	write_icc_ctlr(old_ctlr);
126 #else
127 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
128 #endif
129 	return ret;
130 }
131 
132 void gic_cpu_init(struct gic_data *gd)
133 {
134 #if defined(CFG_ARM_GICV3)
135 	assert(gd->gicd_base);
136 #else
137 	assert(gd->gicd_base && gd->gicc_base);
138 #endif
139 
140 	/* per-CPU interrupts config:
141 	 * ID0-ID7(SGI)   for Non-secure interrupts
142 	 * ID8-ID15(SGI)  for Secure interrupts.
143 	 * All PPI config as Non-secure interrupts.
144 	 */
145 	io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
146 
147 	/* Set the priority mask to permit Non-secure interrupts, and to
148 	 * allow the Non-secure world to adjust the priority mask itself
149 	 */
150 #if defined(CFG_ARM_GICV3)
151 	write_icc_pmr(0x80);
152 	write_icc_igrpen1(1);
153 #else
154 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
155 
156 	/* Enable GIC */
157 	io_write32(gd->gicc_base + GICC_CTLR,
158 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
159 		   GICC_CTLR_FIQEN);
160 #endif
161 }
162 
163 void gic_init(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
164 	      vaddr_t gicd_base)
165 {
166 	size_t n;
167 
168 	gic_init_base_addr(gd, gicc_base, gicd_base);
169 
170 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
171 		/* Disable interrupts */
172 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
173 
174 		/* Make interrupts non-pending */
175 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
176 
177 		/* Mark interrupts non-secure */
178 		if (n == 0) {
179 			/* per-CPU inerrupts config:
180                          * ID0-ID7(SGI)   for Non-secure interrupts
181                          * ID8-ID15(SGI)  for Secure interrupts.
182                          * All PPI config as Non-secure interrupts.
183 			 */
184 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
185 		} else {
186 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
187 		}
188 	}
189 
190 	/* Set the priority mask to permit Non-secure interrupts, and to
191 	 * allow the Non-secure world to adjust the priority mask itself
192 	 */
193 #if defined(CFG_ARM_GICV3)
194 	write_icc_pmr(0x80);
195 	write_icc_igrpen1(1);
196 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
197 #else
198 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
199 
200 	/* Enable GIC */
201 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
202 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
203 	io_setbits32(gd->gicd_base + GICD_CTLR,
204 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
205 #endif
206 }
207 
208 void gic_init_base_addr(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
209 			vaddr_t gicd_base)
210 {
211 	gd->gicc_base = gicc_base;
212 	gd->gicd_base = gicd_base;
213 	gd->max_it = probe_max_it(gicc_base, gicd_base);
214 	gd->chip.ops = &gic_ops;
215 }
216 
217 static void gic_it_add(struct gic_data *gd, size_t it)
218 {
219 	size_t idx = it / NUM_INTS_PER_REG;
220 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
221 
222 	/* Disable the interrupt */
223 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
224 	/* Make it non-pending */
225 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
226 	/* Assign it to group0 */
227 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
228 #if defined(CFG_ARM_GICV3)
229 	/* Assign it to group1S */
230 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
231 #endif
232 }
233 
234 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
235 				uint8_t cpu_mask)
236 {
237 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
238 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
239 	uint32_t target, target_shift;
240 	vaddr_t itargetsr = gd->gicd_base +
241 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
242 
243 	/* Assigned to group0 */
244 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
245 
246 	/* Route it to selected CPUs */
247 	target = io_read32(itargetsr);
248 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
249 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
250 	target |= cpu_mask << target_shift;
251 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
252 	io_write32(itargetsr, target);
253 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
254 }
255 
256 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
257 {
258 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
259 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
260 
261 	/* Assigned to group0 */
262 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
263 
264 	/* Set prio it to selected CPUs */
265 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
266 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
267 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
268 }
269 
270 static void gic_it_enable(struct gic_data *gd, size_t it)
271 {
272 	size_t idx = it / NUM_INTS_PER_REG;
273 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
274 	vaddr_t base = gd->gicd_base;
275 
276 	/* Assigned to group0 */
277 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
278 	if (it >= NUM_SGI) {
279 		/*
280 		 * Not enabled yet, except Software Generated Interrupt
281 		 * which is implementation defined
282 		 */
283 		assert(!(io_read32(base + GICD_ISENABLER(idx)) & mask));
284 	}
285 
286 	/* Enable the interrupt */
287 	io_write32(base + GICD_ISENABLER(idx), mask);
288 }
289 
290 static void gic_it_disable(struct gic_data *gd, size_t it)
291 {
292 	size_t idx = it / NUM_INTS_PER_REG;
293 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
294 
295 	/* Assigned to group0 */
296 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
297 
298 	/* Disable the interrupt */
299 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
300 }
301 
302 static void gic_it_set_pending(struct gic_data *gd, size_t it)
303 {
304 	size_t idx = it / NUM_INTS_PER_REG;
305 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
306 
307 	/* Should be Peripheral Interrupt */
308 	assert(it >= NUM_SGI);
309 	/* Assigned to group0 */
310 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
311 
312 	/* Raise the interrupt */
313 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
314 }
315 
316 static void gic_it_raise_sgi(struct gic_data *gd, size_t it,
317 		uint8_t cpu_mask, uint8_t group)
318 {
319 	uint32_t mask_id = it & 0xf;
320 	uint32_t mask_group = group & 0x1;
321 	uint32_t mask_cpu = cpu_mask & 0xff;
322 	uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
323 		SHIFT_U32(mask_cpu, 16));
324 
325 	/* Should be Software Generated Interrupt */
326 	assert(it < NUM_SGI);
327 
328 	/* Raise the interrupt */
329 	io_write32(gd->gicd_base + GICD_SGIR, mask);
330 }
331 
332 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
333 {
334 #if defined(CFG_ARM_GICV3)
335 	return read_icc_iar1();
336 #else
337 	return io_read32(gd->gicc_base + GICC_IAR);
338 #endif
339 }
340 
341 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
342 {
343 #if defined(CFG_ARM_GICV3)
344 	write_icc_eoir1(eoir);
345 #else
346 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
347 #endif
348 }
349 
350 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
351 {
352 	size_t idx = it / NUM_INTS_PER_REG;
353 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
354 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
355 }
356 
357 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
358 {
359 	size_t idx = it / NUM_INTS_PER_REG;
360 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
361 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
362 }
363 
364 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
365 {
366 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
367 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
368 				ITARGETSR_FIELD_BITS;
369 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
370 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
371 
372 	return (target & target_mask) >> target_shift;
373 }
374 
375 void gic_dump_state(struct gic_data *gd)
376 {
377 	int i;
378 
379 #if defined(CFG_ARM_GICV3)
380 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
381 #else
382 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
383 #endif
384 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
385 
386 	for (i = 0; i < (int)gd->max_it; i++) {
387 		if (gic_it_is_enabled(gd, i)) {
388 			DMSG("irq%d: enabled, group:%d, target:%x", i,
389 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
390 		}
391 	}
392 }
393 
394 void gic_it_handle(struct gic_data *gd)
395 {
396 	uint32_t iar;
397 	uint32_t id;
398 
399 	iar = gic_read_iar(gd);
400 	id = iar & GICC_IAR_IT_ID_MASK;
401 
402 	if (id < gd->max_it)
403 		itr_handle(id);
404 	else
405 		DMSG("ignoring interrupt %" PRIu32, id);
406 
407 	gic_write_eoir(gd, iar);
408 }
409 
410 static void gic_op_add(struct itr_chip *chip, size_t it,
411 		       uint32_t flags __unused)
412 {
413 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
414 
415 	if (it >= gd->max_it)
416 		panic();
417 
418 	gic_it_add(gd, it);
419 	/* Set the CPU mask to deliver interrupts to any online core */
420 	gic_it_set_cpu_mask(gd, it, 0xff);
421 	gic_it_set_prio(gd, it, 0x1);
422 }
423 
424 static void gic_op_enable(struct itr_chip *chip, size_t it)
425 {
426 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
427 
428 	if (it >= gd->max_it)
429 		panic();
430 
431 	gic_it_enable(gd, it);
432 }
433 
434 static void gic_op_disable(struct itr_chip *chip, size_t it)
435 {
436 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
437 
438 	if (it >= gd->max_it)
439 		panic();
440 
441 	gic_it_disable(gd, it);
442 }
443 
444 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
445 {
446 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
447 
448 	if (it >= gd->max_it)
449 		panic();
450 
451 	gic_it_set_pending(gd, it);
452 }
453 
454 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
455 			uint8_t cpu_mask)
456 {
457 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
458 
459 	if (it >= gd->max_it)
460 		panic();
461 
462 	if (it < NUM_NS_SGI)
463 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
464 	else
465 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
466 }
467 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
468 			uint8_t cpu_mask)
469 {
470 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
471 
472 	if (it >= gd->max_it)
473 		panic();
474 
475 	gic_it_set_cpu_mask(gd, it, cpu_mask);
476 }
477