xref: /optee_os/core/drivers/gic.c (revision 8e81e2f5366a971afdd2ac47fb8529d1def5feb0)
1 /*
2  * Copyright (c) 2016-2017, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <assert.h>
31 #include <drivers/gic.h>
32 #include <kernel/interrupt.h>
33 #include <kernel/panic.h>
34 #include <util.h>
35 #include <io.h>
36 #include <trace.h>
37 
38 /* Offsets from gic.gicc_base */
39 #define GICC_CTLR		(0x000)
40 #define GICC_PMR		(0x004)
41 #define GICC_IAR		(0x00C)
42 #define GICC_EOIR		(0x010)
43 
44 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
45 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
46 #define GICC_CTLR_FIQEN		(1 << 3)
47 
48 /* Offsets from gic.gicd_base */
49 #define GICD_CTLR		(0x000)
50 #define GICD_TYPER		(0x004)
51 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
52 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
53 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
54 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
55 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
56 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
57 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
58 #define GICD_SGIR		(0xF00)
59 
60 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
61 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
62 
63 /* Number of Private Peripheral Interrupt */
64 #define NUM_PPI	32
65 
66 /* Number of Software Generated Interrupt */
67 #define NUM_SGI			16
68 
69 /* Number of Non-secure Software Generated Interrupt */
70 #define NUM_NS_SGI		8
71 
72 /* Number of interrupts in one register */
73 #define NUM_INTS_PER_REG	32
74 
75 /* Number of targets in one register */
76 #define NUM_TARGETS_PER_REG	4
77 
78 /* Accessors to access ITARGETSRn */
79 #define ITARGETSR_FIELD_BITS	8
80 #define ITARGETSR_FIELD_MASK	0xff
81 
82 /* Maximum number of interrups a GIC can support */
83 #define GIC_MAX_INTS		1020
84 
85 #define GICC_IAR_IT_ID_MASK	0x3ff
86 #define GICC_IAR_CPU_ID_MASK	0x7
87 #define GICC_IAR_CPU_ID_SHIFT	10
88 
89 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t flags);
90 static void gic_op_enable(struct itr_chip *chip, size_t it);
91 static void gic_op_disable(struct itr_chip *chip, size_t it);
92 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
93 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
94 			uint8_t cpu_mask);
95 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
96 			uint8_t cpu_mask);
97 
98 static const struct itr_ops gic_ops = {
99 	.add = gic_op_add,
100 	.enable = gic_op_enable,
101 	.disable = gic_op_disable,
102 	.raise_pi = gic_op_raise_pi,
103 	.raise_sgi = gic_op_raise_sgi,
104 	.set_affinity = gic_op_set_affinity,
105 };
106 
107 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
108 {
109 	int i;
110 	uint32_t old_ctlr;
111 	size_t ret = 0;
112 	const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) /
113 					NUM_INTS_PER_REG) - 1;
114 
115 	/*
116 	 * Probe which interrupt number is the largest.
117 	 */
118 #if defined(CFG_ARM_GICV3)
119 	old_ctlr = read_icc_ctlr();
120 	write_icc_ctlr(0);
121 #else
122 	old_ctlr = read32(gicc_base + GICC_CTLR);
123 	write32(0, gicc_base + GICC_CTLR);
124 #endif
125 	for (i = max_regs; i >= 0; i--) {
126 		uint32_t old_reg;
127 		uint32_t reg;
128 		int b;
129 
130 		old_reg = read32(gicd_base + GICD_ISENABLER(i));
131 		write32(0xffffffff, gicd_base + GICD_ISENABLER(i));
132 		reg = read32(gicd_base + GICD_ISENABLER(i));
133 		write32(old_reg, gicd_base + GICD_ICENABLER(i));
134 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
135 			if (BIT32(b) & reg) {
136 				ret = i * NUM_INTS_PER_REG + b;
137 				goto out;
138 			}
139 		}
140 	}
141 out:
142 #if defined(CFG_ARM_GICV3)
143 	write_icc_ctlr(old_ctlr);
144 #else
145 	write32(old_ctlr, gicc_base + GICC_CTLR);
146 #endif
147 	return ret;
148 }
149 
150 void gic_cpu_init(struct gic_data *gd)
151 {
152 #if defined(CFG_ARM_GICV3)
153 	assert(gd->gicd_base);
154 #else
155 	assert(gd->gicd_base && gd->gicc_base);
156 #endif
157 
158 	/* per-CPU interrupts config:
159 	 * ID0-ID7(SGI)   for Non-secure interrupts
160 	 * ID8-ID15(SGI)  for Secure interrupts.
161 	 * All PPI config as Non-secure interrupts.
162 	 */
163 	write32(0xffff00ff, gd->gicd_base + GICD_IGROUPR(0));
164 
165 	/* Set the priority mask to permit Non-secure interrupts, and to
166 	 * allow the Non-secure world to adjust the priority mask itself
167 	 */
168 #if defined(CFG_ARM_GICV3)
169 	write_icc_pmr(0x80);
170 	write_icc_ctlr(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
171 		       GICC_CTLR_FIQEN);
172 #else
173 	write32(0x80, gd->gicc_base + GICC_PMR);
174 
175 	/* Enable GIC */
176 	write32(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN,
177 		gd->gicc_base + GICC_CTLR);
178 #endif
179 }
180 
181 void gic_init(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
182 	      vaddr_t gicd_base)
183 {
184 	size_t n;
185 
186 	gic_init_base_addr(gd, gicc_base, gicd_base);
187 
188 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
189 		/* Disable interrupts */
190 		write32(0xffffffff, gd->gicd_base + GICD_ICENABLER(n));
191 
192 		/* Make interrupts non-pending */
193 		write32(0xffffffff, gd->gicd_base + GICD_ICPENDR(n));
194 
195 		/* Mark interrupts non-secure */
196 		if (n == 0) {
197 			/* per-CPU inerrupts config:
198                          * ID0-ID7(SGI)   for Non-secure interrupts
199                          * ID8-ID15(SGI)  for Secure interrupts.
200                          * All PPI config as Non-secure interrupts.
201 			 */
202 			write32(0xffff00ff, gd->gicd_base + GICD_IGROUPR(n));
203 		} else {
204 			write32(0xffffffff, gd->gicd_base + GICD_IGROUPR(n));
205 		}
206 	}
207 
208 	/* Set the priority mask to permit Non-secure interrupts, and to
209 	 * allow the Non-secure world to adjust the priority mask itself
210 	 */
211 #if defined(CFG_ARM_GICV3)
212 	write_icc_pmr(0x80);
213 	write_icc_ctlr(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
214 		       GICC_CTLR_FIQEN);
215 #else
216 	write32(0x80, gd->gicc_base + GICC_PMR);
217 
218 	/* Enable GIC */
219 	write32(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN,
220 		gd->gicc_base + GICC_CTLR);
221 #endif
222 	write32(read32(gd->gicd_base + GICD_CTLR) | GICD_CTLR_ENABLEGRP0 |
223 		GICD_CTLR_ENABLEGRP1, gd->gicd_base + GICD_CTLR);
224 }
225 
226 void gic_init_base_addr(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
227 			vaddr_t gicd_base)
228 {
229 	gd->gicc_base = gicc_base;
230 	gd->gicd_base = gicd_base;
231 	gd->max_it = probe_max_it(gicc_base, gicd_base);
232 	gd->chip.ops = &gic_ops;
233 }
234 
235 static void gic_it_add(struct gic_data *gd, size_t it)
236 {
237 	size_t idx = it / NUM_INTS_PER_REG;
238 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
239 
240 	/* Disable the interrupt */
241 	write32(mask, gd->gicd_base + GICD_ICENABLER(idx));
242 	/* Make it non-pending */
243 	write32(mask, gd->gicd_base + GICD_ICPENDR(idx));
244 	/* Assign it to group0 */
245 	write32(read32(gd->gicd_base + GICD_IGROUPR(idx)) & ~mask,
246 			gd->gicd_base + GICD_IGROUPR(idx));
247 }
248 
249 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
250 				uint8_t cpu_mask)
251 {
252 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
253 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
254 	uint32_t target, target_shift;
255 
256 	/* Assigned to group0 */
257 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
258 
259 	/* Route it to selected CPUs */
260 	target = read32(gd->gicd_base +
261 			GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
262 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
263 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
264 	target |= cpu_mask << target_shift;
265 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA,
266 	     target, gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
267 	write32(target,
268 		gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
269 	DMSG("cpu_mask: 0x%x\n",
270 	     read32(gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)));
271 }
272 
273 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
274 {
275 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
276 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
277 
278 	/* Assigned to group0 */
279 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
280 
281 	/* Set prio it to selected CPUs */
282 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
283 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
284 	write8(prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
285 }
286 
287 static void gic_it_enable(struct gic_data *gd, size_t it)
288 {
289 	size_t idx = it / NUM_INTS_PER_REG;
290 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
291 
292 	/* Assigned to group0 */
293 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
294 	if (it >= NUM_SGI) {
295 		/*
296 		 * Not enabled yet, except Software Generated Interrupt
297 		 * which is implementation defined
298 		 */
299 		assert(!(read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask));
300 	}
301 
302 	/* Enable the interrupt */
303 	write32(mask, gd->gicd_base + GICD_ISENABLER(idx));
304 }
305 
306 static void gic_it_disable(struct gic_data *gd, size_t it)
307 {
308 	size_t idx = it / NUM_INTS_PER_REG;
309 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
310 
311 	/* Assigned to group0 */
312 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
313 
314 	/* Disable the interrupt */
315 	write32(mask, gd->gicd_base + GICD_ICENABLER(idx));
316 }
317 
318 static void gic_it_set_pending(struct gic_data *gd, size_t it)
319 {
320 	size_t idx = it / NUM_INTS_PER_REG;
321 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
322 
323 	/* Should be Peripheral Interrupt */
324 	assert(it >= NUM_SGI);
325 	/* Assigned to group0 */
326 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
327 
328 	/* Raise the interrupt */
329 	write32(mask, gd->gicd_base + GICD_ISPENDR(idx));
330 }
331 
332 static void gic_it_raise_sgi(struct gic_data *gd, size_t it,
333 		uint8_t cpu_mask, uint8_t group)
334 {
335 	uint32_t mask_id = it & 0xf;
336 	uint32_t mask_group = group & 0x1;
337 	uint32_t mask_cpu = cpu_mask & 0xff;
338 	uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
339 		SHIFT_U32(mask_cpu, 16));
340 
341 	/* Should be Software Generated Interrupt */
342 	assert(it < NUM_SGI);
343 
344 	/* Raise the interrupt */
345 	write32(mask, gd->gicd_base + GICD_SGIR);
346 }
347 
348 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
349 {
350 #if defined(CFG_ARM_GICV3)
351 	return read_icc_iar0();
352 #else
353 	return read32(gd->gicc_base + GICC_IAR);
354 #endif
355 }
356 
357 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
358 {
359 #if defined(CFG_ARM_GICV3)
360 	write_icc_eoir0(eoir);
361 #else
362 	write32(eoir, gd->gicc_base + GICC_EOIR);
363 #endif
364 }
365 
366 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
367 {
368 	size_t idx = it / NUM_INTS_PER_REG;
369 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
370 	return !!(read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
371 }
372 
373 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
374 {
375 	size_t idx = it / NUM_INTS_PER_REG;
376 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
377 	return !!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
378 }
379 
380 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
381 {
382 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
383 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
384 				ITARGETSR_FIELD_BITS;
385 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
386 	uint32_t target =
387 		read32(gd->gicd_base + GICD_ITARGETSR(reg_idx)) & target_mask;
388 
389 	target = target >> target_shift;
390 	return target;
391 }
392 
393 void gic_dump_state(struct gic_data *gd)
394 {
395 	int i;
396 
397 #if defined(CFG_ARM_GICV3)
398 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
399 #else
400 	DMSG("GICC_CTLR: 0x%x", read32(gd->gicc_base + GICC_CTLR));
401 #endif
402 	DMSG("GICD_CTLR: 0x%x", read32(gd->gicd_base + GICD_CTLR));
403 
404 	for (i = 0; i < (int)gd->max_it; i++) {
405 		if (gic_it_is_enabled(gd, i)) {
406 			DMSG("irq%d: enabled, group:%d, target:%x", i,
407 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
408 		}
409 	}
410 }
411 
412 void gic_it_handle(struct gic_data *gd)
413 {
414 	uint32_t iar;
415 	uint32_t id;
416 
417 	iar = gic_read_iar(gd);
418 	id = iar & GICC_IAR_IT_ID_MASK;
419 
420 	if (id < gd->max_it)
421 		itr_handle(id);
422 	else
423 		DMSG("ignoring interrupt %" PRIu32, id);
424 
425 	gic_write_eoir(gd, iar);
426 }
427 
428 static void gic_op_add(struct itr_chip *chip, size_t it,
429 		       uint32_t flags __unused)
430 {
431 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
432 
433 	if (it >= gd->max_it)
434 		panic();
435 
436 	gic_it_add(gd, it);
437 	/* Set the CPU mask to deliver interrupts to any online core */
438 	gic_it_set_cpu_mask(gd, it, 0xff);
439 	gic_it_set_prio(gd, it, 0x1);
440 }
441 
442 static void gic_op_enable(struct itr_chip *chip, size_t it)
443 {
444 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
445 
446 	if (it >= gd->max_it)
447 		panic();
448 
449 	gic_it_enable(gd, it);
450 }
451 
452 static void gic_op_disable(struct itr_chip *chip, size_t it)
453 {
454 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
455 
456 	if (it >= gd->max_it)
457 		panic();
458 
459 	gic_it_disable(gd, it);
460 }
461 
462 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
463 {
464 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
465 
466 	if (it >= gd->max_it)
467 		panic();
468 
469 	gic_it_set_pending(gd, it);
470 }
471 
472 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
473 			uint8_t cpu_mask)
474 {
475 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
476 
477 	if (it >= gd->max_it)
478 		panic();
479 
480 	if (it < NUM_NS_SGI)
481 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
482 	else
483 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
484 }
485 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
486 			uint8_t cpu_mask)
487 {
488 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
489 
490 	if (it >= gd->max_it)
491 		panic();
492 
493 	gic_it_set_cpu_mask(gd, it, cpu_mask);
494 }
495