xref: /optee_os/core/drivers/gic.c (revision 1bb929836182ecb96d2d9d268daa807c67596396)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  * this list of conditions and the following disclaimer in the documentation
15  * and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
21  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <arm.h>
31 #include <assert.h>
32 #include <drivers/gic.h>
33 #include <kernel/interrupt.h>
34 #include <kernel/panic.h>
35 #include <util.h>
36 #include <io.h>
37 #include <trace.h>
38 
39 /* Offsets from gic.gicc_base */
40 #define GICC_CTLR		(0x000)
41 #define GICC_PMR		(0x004)
42 #define GICC_IAR		(0x00C)
43 #define GICC_EOIR		(0x010)
44 
45 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
46 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
47 #define GICC_CTLR_FIQEN		(1 << 3)
48 
49 /* Offsets from gic.gicd_base */
50 #define GICD_CTLR		(0x000)
51 #define GICD_TYPER		(0x004)
52 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
53 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
54 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
55 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
56 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
57 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
58 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
59 #define GICD_SGIR		(0xF00)
60 
61 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
62 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
63 
64 /* Number of Private Peripheral Interrupt */
65 #define NUM_PPI	32
66 
67 /* Number of Software Generated Interrupt */
68 #define NUM_SGI			16
69 
70 /* Number of Non-secure Software Generated Interrupt */
71 #define NUM_NS_SGI		8
72 
73 /* Number of interrupts in one register */
74 #define NUM_INTS_PER_REG	32
75 
76 /* Number of targets in one register */
77 #define NUM_TARGETS_PER_REG	4
78 
79 /* Accessors to access ITARGETSRn */
80 #define ITARGETSR_FIELD_BITS	8
81 #define ITARGETSR_FIELD_MASK	0xff
82 
83 /* Maximum number of interrups a GIC can support */
84 #define GIC_MAX_INTS		1020
85 
86 #define GICC_IAR_IT_ID_MASK	0x3ff
87 #define GICC_IAR_CPU_ID_MASK	0x7
88 #define GICC_IAR_CPU_ID_SHIFT	10
89 
90 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t flags);
91 static void gic_op_enable(struct itr_chip *chip, size_t it);
92 static void gic_op_disable(struct itr_chip *chip, size_t it);
93 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
94 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
95 			uint8_t cpu_mask);
96 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
97 			uint8_t cpu_mask);
98 
99 static const struct itr_ops gic_ops = {
100 	.add = gic_op_add,
101 	.enable = gic_op_enable,
102 	.disable = gic_op_disable,
103 	.raise_pi = gic_op_raise_pi,
104 	.raise_sgi = gic_op_raise_sgi,
105 	.set_affinity = gic_op_set_affinity,
106 };
107 
108 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
109 {
110 	int i;
111 	uint32_t old_ctlr;
112 	size_t ret = 0;
113 	const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) /
114 					NUM_INTS_PER_REG) - 1;
115 
116 	/*
117 	 * Probe which interrupt number is the largest.
118 	 */
119 #if defined(CFG_ARM_GICV3)
120 	old_ctlr = read_icc_ctlr();
121 	write_icc_ctlr(0);
122 #else
123 	old_ctlr = read32(gicc_base + GICC_CTLR);
124 	write32(0, gicc_base + GICC_CTLR);
125 #endif
126 	for (i = max_regs; i >= 0; i--) {
127 		uint32_t old_reg;
128 		uint32_t reg;
129 		int b;
130 
131 		old_reg = read32(gicd_base + GICD_ISENABLER(i));
132 		write32(0xffffffff, gicd_base + GICD_ISENABLER(i));
133 		reg = read32(gicd_base + GICD_ISENABLER(i));
134 		write32(old_reg, gicd_base + GICD_ICENABLER(i));
135 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
136 			if (BIT32(b) & reg) {
137 				ret = i * NUM_INTS_PER_REG + b;
138 				goto out;
139 			}
140 		}
141 	}
142 out:
143 #if defined(CFG_ARM_GICV3)
144 	write_icc_ctlr(old_ctlr);
145 #else
146 	write32(old_ctlr, gicc_base + GICC_CTLR);
147 #endif
148 	return ret;
149 }
150 
151 void gic_cpu_init(struct gic_data *gd)
152 {
153 #if defined(CFG_ARM_GICV3)
154 	assert(gd->gicd_base);
155 #else
156 	assert(gd->gicd_base && gd->gicc_base);
157 #endif
158 
159 	/* per-CPU interrupts config:
160 	 * ID0-ID7(SGI)   for Non-secure interrupts
161 	 * ID8-ID15(SGI)  for Secure interrupts.
162 	 * All PPI config as Non-secure interrupts.
163 	 */
164 	write32(0xffff00ff, gd->gicd_base + GICD_IGROUPR(0));
165 
166 	/* Set the priority mask to permit Non-secure interrupts, and to
167 	 * allow the Non-secure world to adjust the priority mask itself
168 	 */
169 #if defined(CFG_ARM_GICV3)
170 	write_icc_pmr(0x80);
171 	write_icc_ctlr(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
172 		       GICC_CTLR_FIQEN);
173 #else
174 	write32(0x80, gd->gicc_base + GICC_PMR);
175 
176 	/* Enable GIC */
177 	write32(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN,
178 		gd->gicc_base + GICC_CTLR);
179 #endif
180 }
181 
182 void gic_init(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
183 	      vaddr_t gicd_base)
184 {
185 	size_t n;
186 
187 	gic_init_base_addr(gd, gicc_base, gicd_base);
188 
189 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
190 		/* Disable interrupts */
191 		write32(0xffffffff, gd->gicd_base + GICD_ICENABLER(n));
192 
193 		/* Make interrupts non-pending */
194 		write32(0xffffffff, gd->gicd_base + GICD_ICPENDR(n));
195 
196 		/* Mark interrupts non-secure */
197 		if (n == 0) {
198 			/* per-CPU inerrupts config:
199                          * ID0-ID7(SGI)   for Non-secure interrupts
200                          * ID8-ID15(SGI)  for Secure interrupts.
201                          * All PPI config as Non-secure interrupts.
202 			 */
203 			write32(0xffff00ff, gd->gicd_base + GICD_IGROUPR(n));
204 		} else {
205 			write32(0xffffffff, gd->gicd_base + GICD_IGROUPR(n));
206 		}
207 	}
208 
209 	/* Set the priority mask to permit Non-secure interrupts, and to
210 	 * allow the Non-secure world to adjust the priority mask itself
211 	 */
212 #if defined(CFG_ARM_GICV3)
213 	write_icc_pmr(0x80);
214 	write_icc_ctlr(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
215 		       GICC_CTLR_FIQEN);
216 #else
217 	write32(0x80, gd->gicc_base + GICC_PMR);
218 
219 	/* Enable GIC */
220 	write32(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN,
221 		gd->gicc_base + GICC_CTLR);
222 #endif
223 	write32(read32(gd->gicd_base + GICD_CTLR) | GICD_CTLR_ENABLEGRP0 |
224 		GICD_CTLR_ENABLEGRP1, gd->gicd_base + GICD_CTLR);
225 }
226 
227 void gic_init_base_addr(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
228 			vaddr_t gicd_base)
229 {
230 	gd->gicc_base = gicc_base;
231 	gd->gicd_base = gicd_base;
232 	gd->max_it = probe_max_it(gicc_base, gicd_base);
233 	gd->chip.ops = &gic_ops;
234 }
235 
236 static void gic_it_add(struct gic_data *gd, size_t it)
237 {
238 	size_t idx = it / NUM_INTS_PER_REG;
239 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
240 
241 	/* Disable the interrupt */
242 	write32(mask, gd->gicd_base + GICD_ICENABLER(idx));
243 	/* Make it non-pending */
244 	write32(mask, gd->gicd_base + GICD_ICPENDR(idx));
245 	/* Assign it to group0 */
246 	write32(read32(gd->gicd_base + GICD_IGROUPR(idx)) & ~mask,
247 			gd->gicd_base + GICD_IGROUPR(idx));
248 }
249 
250 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
251 				uint8_t cpu_mask)
252 {
253 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
254 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
255 	uint32_t target, target_shift;
256 
257 	/* Assigned to group0 */
258 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
259 
260 	/* Route it to selected CPUs */
261 	target = read32(gd->gicd_base +
262 			GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
263 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
264 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
265 	target |= cpu_mask << target_shift;
266 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA,
267 	     target, gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
268 	write32(target,
269 		gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
270 	DMSG("cpu_mask: 0x%x\n",
271 	     read32(gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)));
272 }
273 
274 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
275 {
276 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
277 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
278 
279 	/* Assigned to group0 */
280 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
281 
282 	/* Set prio it to selected CPUs */
283 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
284 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
285 	write8(prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
286 }
287 
288 static void gic_it_enable(struct gic_data *gd, size_t it)
289 {
290 	size_t idx = it / NUM_INTS_PER_REG;
291 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
292 
293 	/* Assigned to group0 */
294 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
295 	if (it >= NUM_SGI) {
296 		/*
297 		 * Not enabled yet, except Software Generated Interrupt
298 		 * which is implementation defined
299 		 */
300 		assert(!(read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask));
301 	}
302 
303 	/* Enable the interrupt */
304 	write32(mask, gd->gicd_base + GICD_ISENABLER(idx));
305 }
306 
307 static void gic_it_disable(struct gic_data *gd, size_t it)
308 {
309 	size_t idx = it / NUM_INTS_PER_REG;
310 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
311 
312 	/* Assigned to group0 */
313 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
314 
315 	/* Disable the interrupt */
316 	write32(mask, gd->gicd_base + GICD_ICENABLER(idx));
317 }
318 
319 static void gic_it_set_pending(struct gic_data *gd, size_t it)
320 {
321 	size_t idx = it / NUM_INTS_PER_REG;
322 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
323 
324 	/* Should be Peripheral Interrupt */
325 	assert(it >= NUM_SGI);
326 	/* Assigned to group0 */
327 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
328 
329 	/* Raise the interrupt */
330 	write32(mask, gd->gicd_base + GICD_ISPENDR(idx));
331 }
332 
333 static void gic_it_raise_sgi(struct gic_data *gd, size_t it,
334 		uint8_t cpu_mask, uint8_t group)
335 {
336 	uint32_t mask_id = it & 0xf;
337 	uint32_t mask_group = group & 0x1;
338 	uint32_t mask_cpu = cpu_mask & 0xff;
339 	uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
340 		SHIFT_U32(mask_cpu, 16));
341 
342 	/* Should be Software Generated Interrupt */
343 	assert(it < NUM_SGI);
344 
345 	/* Raise the interrupt */
346 	write32(mask, gd->gicd_base + GICD_SGIR);
347 }
348 
349 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
350 {
351 #if defined(CFG_ARM_GICV3)
352 	return read_icc_iar0();
353 #else
354 	return read32(gd->gicc_base + GICC_IAR);
355 #endif
356 }
357 
358 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
359 {
360 #if defined(CFG_ARM_GICV3)
361 	write_icc_eoir0(eoir);
362 #else
363 	write32(eoir, gd->gicc_base + GICC_EOIR);
364 #endif
365 }
366 
367 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
368 {
369 	size_t idx = it / NUM_INTS_PER_REG;
370 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
371 	return !!(read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
372 }
373 
374 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
375 {
376 	size_t idx = it / NUM_INTS_PER_REG;
377 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
378 	return !!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
379 }
380 
381 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
382 {
383 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
384 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
385 				ITARGETSR_FIELD_BITS;
386 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
387 	uint32_t target =
388 		read32(gd->gicd_base + GICD_ITARGETSR(reg_idx)) & target_mask;
389 
390 	target = target >> target_shift;
391 	return target;
392 }
393 
394 void gic_dump_state(struct gic_data *gd)
395 {
396 	int i;
397 
398 #if defined(CFG_ARM_GICV3)
399 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
400 #else
401 	DMSG("GICC_CTLR: 0x%x", read32(gd->gicc_base + GICC_CTLR));
402 #endif
403 	DMSG("GICD_CTLR: 0x%x", read32(gd->gicd_base + GICD_CTLR));
404 
405 	for (i = 0; i < (int)gd->max_it; i++) {
406 		if (gic_it_is_enabled(gd, i)) {
407 			DMSG("irq%d: enabled, group:%d, target:%x", i,
408 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
409 		}
410 	}
411 }
412 
413 void gic_it_handle(struct gic_data *gd)
414 {
415 	uint32_t iar;
416 	uint32_t id;
417 
418 	iar = gic_read_iar(gd);
419 	id = iar & GICC_IAR_IT_ID_MASK;
420 
421 	if (id < gd->max_it)
422 		itr_handle(id);
423 	else
424 		DMSG("ignoring interrupt %" PRIu32, id);
425 
426 	gic_write_eoir(gd, iar);
427 }
428 
429 static void gic_op_add(struct itr_chip *chip, size_t it,
430 		       uint32_t flags __unused)
431 {
432 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
433 
434 	if (it >= gd->max_it)
435 		panic();
436 
437 	gic_it_add(gd, it);
438 	/* Set the CPU mask to deliver interrupts to any online core */
439 	gic_it_set_cpu_mask(gd, it, 0xff);
440 	gic_it_set_prio(gd, it, 0x1);
441 }
442 
443 static void gic_op_enable(struct itr_chip *chip, size_t it)
444 {
445 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
446 
447 	if (it >= gd->max_it)
448 		panic();
449 
450 	gic_it_enable(gd, it);
451 }
452 
453 static void gic_op_disable(struct itr_chip *chip, size_t it)
454 {
455 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
456 
457 	if (it >= gd->max_it)
458 		panic();
459 
460 	gic_it_disable(gd, it);
461 }
462 
463 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
464 {
465 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
466 
467 	if (it >= gd->max_it)
468 		panic();
469 
470 	gic_it_set_pending(gd, it);
471 }
472 
473 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
474 			uint8_t cpu_mask)
475 {
476 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
477 
478 	if (it >= gd->max_it)
479 		panic();
480 
481 	if (it < NUM_NS_SGI)
482 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
483 	else
484 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
485 }
486 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
487 			uint8_t cpu_mask)
488 {
489 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
490 
491 	if (it >= gd->max_it)
492 		panic();
493 
494 	gic_it_set_cpu_mask(gd, it, cpu_mask);
495 }
496