xref: /optee_os/core/drivers/gic.c (revision ef4bc451c262f007562867ea4e5f4ca9f26459fd)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <assert.h>
30 #include <drivers/gic.h>
31 #include <kernel/interrupt.h>
32 #include <kernel/panic.h>
33 #include <util.h>
34 #include <io.h>
35 #include <trace.h>
36 
37 /* Offsets from gic.gicc_base */
38 #define GICC_CTLR		(0x000)
39 #define GICC_PMR		(0x004)
40 #define GICC_IAR		(0x00C)
41 #define GICC_EOIR		(0x010)
42 
43 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
44 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
45 #define GICC_CTLR_FIQEN		(1 << 3)
46 
47 /* Offsets from gic.gicd_base */
48 #define GICD_CTLR		(0x000)
49 #define GICD_TYPER		(0x004)
50 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
51 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
52 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
53 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
54 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
55 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
56 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
57 #define GICD_SGIR		(0xF00)
58 
59 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
60 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
61 
62 /* Number of Private Peripheral Interrupt */
63 #define NUM_PPI	32
64 
65 /* Number of Software Generated Interrupt */
66 #define NUM_SGI			16
67 
68 /* Number of Non-secure Software Generated Interrupt */
69 #define NUM_NS_SGI		8
70 
71 /* Number of interrupts in one register */
72 #define NUM_INTS_PER_REG	32
73 
74 /* Number of targets in one register */
75 #define NUM_TARGETS_PER_REG	4
76 
77 /* Accessors to access ITARGETSRn */
78 #define ITARGETSR_FIELD_BITS	8
79 #define ITARGETSR_FIELD_MASK	0xff
80 
81 /* Maximum number of interrups a GIC can support */
82 #define GIC_MAX_INTS		1020
83 
84 #define GIC_SPURIOUS_ID		1023
85 
86 #define GICC_IAR_IT_ID_MASK	0x3ff
87 #define GICC_IAR_CPU_ID_MASK	0x7
88 #define GICC_IAR_CPU_ID_SHIFT	10
89 
90 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t flags);
91 static void gic_op_enable(struct itr_chip *chip, size_t it);
92 static void gic_op_disable(struct itr_chip *chip, size_t it);
93 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
94 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
95 			uint8_t cpu_mask);
96 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
97 			uint8_t cpu_mask);
98 
99 static const struct itr_ops gic_ops = {
100 	.add = gic_op_add,
101 	.enable = gic_op_enable,
102 	.disable = gic_op_disable,
103 	.raise_pi = gic_op_raise_pi,
104 	.raise_sgi = gic_op_raise_sgi,
105 	.set_affinity = gic_op_set_affinity,
106 };
107 
108 static size_t probe_max_it(vaddr_t gicc_base, vaddr_t gicd_base)
109 {
110 	int i;
111 	uint32_t old_ctlr;
112 	size_t ret = 0;
113 	const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) /
114 					NUM_INTS_PER_REG) - 1;
115 
116 	/*
117 	 * Probe which interrupt number is the largest.
118 	 */
119 	old_ctlr = read32(gicc_base + GICC_CTLR);
120 	write32(0, gicc_base + GICC_CTLR);
121 	for (i = max_regs; i >= 0; i--) {
122 		uint32_t old_reg;
123 		uint32_t reg;
124 		int b;
125 
126 		old_reg = read32(gicd_base + GICD_ISENABLER(i));
127 		write32(0xffffffff, gicd_base + GICD_ISENABLER(i));
128 		reg = read32(gicd_base + GICD_ISENABLER(i));
129 		write32(old_reg, gicd_base + GICD_ICENABLER(i));
130 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
131 			if (BIT32(b) & reg) {
132 				ret = i * NUM_INTS_PER_REG + b;
133 				goto out;
134 			}
135 		}
136 	}
137 out:
138 	write32(old_ctlr, gicc_base + GICC_CTLR);
139 	return ret;
140 }
141 
142 void gic_cpu_init(struct gic_data *gd)
143 {
144 	assert(gd->gicd_base && gd->gicc_base);
145 
146 	/* per-CPU interrupts config:
147 	 * ID0-ID7(SGI)   for Non-secure interrupts
148 	 * ID8-ID15(SGI)  for Secure interrupts.
149 	 * All PPI config as Non-secure interrupts.
150 	 */
151 	write32(0xffff00ff, gd->gicd_base + GICD_IGROUPR(0));
152 
153 	/* Set the priority mask to permit Non-secure interrupts, and to
154 	 * allow the Non-secure world to adjust the priority mask itself
155 	 */
156 	write32(0x80, gd->gicc_base + GICC_PMR);
157 
158 	/* Enable GIC */
159 	write32(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN,
160 		gd->gicc_base + GICC_CTLR);
161 }
162 
163 void gic_init(struct gic_data *gd, vaddr_t gicc_base, vaddr_t gicd_base)
164 {
165 	size_t n;
166 
167 	gic_init_base_addr(gd, gicc_base, gicd_base);
168 
169 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
170 		/* Disable interrupts */
171 		write32(0xffffffff, gd->gicd_base + GICD_ICENABLER(n));
172 
173 		/* Make interrupts non-pending */
174 		write32(0xffffffff, gd->gicd_base + GICD_ICPENDR(n));
175 
176 		/* Mark interrupts non-secure */
177 		if (n == 0) {
178 			/* per-CPU inerrupts config:
179                          * ID0-ID7(SGI)   for Non-secure interrupts
180                          * ID8-ID15(SGI)  for Secure interrupts.
181                          * All PPI config as Non-secure interrupts.
182 			 */
183 			write32(0xffff00ff, gd->gicd_base + GICD_IGROUPR(n));
184 		} else {
185 			write32(0xffffffff, gd->gicd_base + GICD_IGROUPR(n));
186 		}
187 	}
188 
189 	/* Set the priority mask to permit Non-secure interrupts, and to
190 	 * allow the Non-secure world to adjust the priority mask itself
191 	 */
192 	write32(0x80, gd->gicc_base + GICC_PMR);
193 
194 	/* Enable GIC */
195 	write32(GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 | GICC_CTLR_FIQEN,
196 		gd->gicc_base + GICC_CTLR);
197 	write32(GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1,
198 		gd->gicd_base + GICD_CTLR);
199 }
200 
201 void gic_init_base_addr(struct gic_data *gd, vaddr_t gicc_base,
202 			vaddr_t gicd_base)
203 {
204 	gd->gicc_base = gicc_base;
205 	gd->gicd_base = gicd_base;
206 	gd->max_it = probe_max_it(gicc_base, gicd_base);
207 	gd->chip.ops = &gic_ops;
208 }
209 
210 static void gic_it_add(struct gic_data *gd, size_t it)
211 {
212 	size_t idx = it / NUM_INTS_PER_REG;
213 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
214 
215 	/* Disable the interrupt */
216 	write32(mask, gd->gicd_base + GICD_ICENABLER(idx));
217 	/* Make it non-pending */
218 	write32(mask, gd->gicd_base + GICD_ICPENDR(idx));
219 	/* Assign it to group0 */
220 	write32(read32(gd->gicd_base + GICD_IGROUPR(idx)) & ~mask,
221 			gd->gicd_base + GICD_IGROUPR(idx));
222 }
223 
224 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
225 				uint8_t cpu_mask)
226 {
227 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
228 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
229 	uint32_t target, target_shift;
230 
231 	/* Assigned to group0 */
232 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
233 
234 	/* Route it to selected CPUs */
235 	target = read32(gd->gicd_base +
236 			GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
237 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
238 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
239 	target |= cpu_mask << target_shift;
240 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA,
241 	     target, gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
242 	write32(target,
243 		gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG));
244 	DMSG("cpu_mask: 0x%x\n",
245 	     read32(gd->gicd_base + GICD_ITARGETSR(it / NUM_TARGETS_PER_REG)));
246 }
247 
248 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
249 {
250 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
251 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
252 
253 	/* Assigned to group0 */
254 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
255 
256 	/* Set prio it to selected CPUs */
257 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
258 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
259 	write8(prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
260 }
261 
262 static void gic_it_enable(struct gic_data *gd, size_t it)
263 {
264 	size_t idx = it / NUM_INTS_PER_REG;
265 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
266 
267 	/* Assigned to group0 */
268 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
269 	if (it >= NUM_SGI) {
270 		/*
271 		 * Not enabled yet, except Software Generated Interrupt
272 		 * which is implementation defined
273 		 */
274 		assert(!(read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask));
275 	}
276 
277 	/* Enable the interrupt */
278 	write32(mask, gd->gicd_base + GICD_ISENABLER(idx));
279 }
280 
281 static void gic_it_disable(struct gic_data *gd, size_t it)
282 {
283 	size_t idx = it / NUM_INTS_PER_REG;
284 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
285 
286 	/* Assigned to group0 */
287 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
288 
289 	/* Disable the interrupt */
290 	write32(mask, gd->gicd_base + GICD_ICENABLER(idx));
291 }
292 
293 static void gic_it_set_pending(struct gic_data *gd, size_t it)
294 {
295 	size_t idx = it / NUM_INTS_PER_REG;
296 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
297 
298 	/* Should be Peripheral Interrupt */
299 	assert(it >= NUM_SGI);
300 	/* Assigned to group0 */
301 	assert(!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
302 
303 	/* Raise the interrupt */
304 	write32(mask, gd->gicd_base + GICD_ISPENDR(idx));
305 }
306 
307 static void gic_it_raise_sgi(struct gic_data *gd, size_t it,
308 		uint8_t cpu_mask, uint8_t group)
309 {
310 	uint32_t mask_id = it & 0xf;
311 	uint32_t mask_group = group & 0x1;
312 	uint32_t mask_cpu = cpu_mask & 0xff;
313 	uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
314 		SHIFT_U32(mask_cpu, 16));
315 
316 	/* Should be Software Generated Interrupt */
317 	assert(it < NUM_SGI);
318 
319 	/* Raise the interrupt */
320 	write32(mask, gd->gicd_base + GICD_SGIR);
321 }
322 
323 static uint32_t gic_read_iar(struct gic_data *gd)
324 {
325 	return read32(gd->gicc_base + GICC_IAR);
326 }
327 
328 static void gic_write_eoir(struct gic_data *gd, uint32_t eoir)
329 {
330 	write32(eoir, gd->gicc_base + GICC_EOIR);
331 }
332 
333 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
334 {
335 	size_t idx = it / NUM_INTS_PER_REG;
336 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
337 	return !!(read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
338 }
339 
340 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
341 {
342 	size_t idx = it / NUM_INTS_PER_REG;
343 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
344 	return !!(read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
345 }
346 
347 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
348 {
349 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
350 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
351 				ITARGETSR_FIELD_BITS;
352 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
353 	uint32_t target =
354 		read32(gd->gicd_base + GICD_ITARGETSR(reg_idx)) & target_mask;
355 
356 	target = target >> target_shift;
357 	return target;
358 }
359 
360 void gic_dump_state(struct gic_data *gd)
361 {
362 	int i;
363 
364 	DMSG("GICC_CTLR: 0x%x", read32(gd->gicc_base + GICC_CTLR));
365 	DMSG("GICD_CTLR: 0x%x", read32(gd->gicd_base + GICD_CTLR));
366 
367 	for (i = 0; i < (int)gd->max_it; i++) {
368 		if (gic_it_is_enabled(gd, i)) {
369 			DMSG("irq%d: enabled, group:%d, target:%x", i,
370 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
371 		}
372 	}
373 }
374 
375 void gic_it_handle(struct gic_data *gd)
376 {
377 	uint32_t iar;
378 	uint32_t id;
379 
380 	iar = gic_read_iar(gd);
381 	id = iar & GICC_IAR_IT_ID_MASK;
382 
383 	if (id == GIC_SPURIOUS_ID)
384 		DMSG("ignoring spurious interrupt");
385 	else
386 		itr_handle(id);
387 
388 	gic_write_eoir(gd, iar);
389 }
390 
391 static void gic_op_add(struct itr_chip *chip, size_t it,
392 		       uint32_t flags __unused)
393 {
394 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
395 
396 	if (it >= gd->max_it)
397 		panic();
398 
399 	gic_it_add(gd, it);
400 	/* Set the CPU mask to deliver interrupts to any online core */
401 	gic_it_set_cpu_mask(gd, it, 0xff);
402 	gic_it_set_prio(gd, it, 0x1);
403 }
404 
405 static void gic_op_enable(struct itr_chip *chip, size_t it)
406 {
407 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
408 
409 	if (it >= gd->max_it)
410 		panic();
411 
412 	gic_it_enable(gd, it);
413 }
414 
415 static void gic_op_disable(struct itr_chip *chip, size_t it)
416 {
417 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
418 
419 	if (it >= gd->max_it)
420 		panic();
421 
422 	gic_it_disable(gd, it);
423 }
424 
425 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
426 {
427 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
428 
429 	if (it >= gd->max_it)
430 		panic();
431 
432 	gic_it_set_pending(gd, it);
433 }
434 
435 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
436 			uint8_t cpu_mask)
437 {
438 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
439 
440 	if (it >= gd->max_it)
441 		panic();
442 
443 	if (it < NUM_NS_SGI)
444 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
445 	else
446 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
447 }
448 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
449 			uint8_t cpu_mask)
450 {
451 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
452 
453 	if (it >= gd->max_it)
454 		panic();
455 
456 	gic_it_set_cpu_mask(gd, it, cpu_mask);
457 }
458