xref: /optee_os/core/drivers/gic.c (revision 2e02a7374b864506d2d244e64303b104ca41a05c)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, 2023 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <dt-bindings/interrupt-controller/arm-gic.h>
10 #include <compiler.h>
11 #include <config.h>
12 #include <drivers/gic.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/dt.h>
16 #include <kernel/dt_driver.h>
17 #include <kernel/interrupt.h>
18 #include <kernel/misc.h>
19 #include <kernel/panic.h>
20 #include <libfdt.h>
21 #include <mm/core_memprot.h>
22 #include <mm/core_mmu.h>
23 #include <trace.h>
24 #include <util.h>
25 
26 /* Offsets from gic.gicc_base */
27 #define GICC_CTLR		(0x000)
28 #define GICC_PMR		(0x004)
29 #define GICC_IAR		(0x00C)
30 #define GICC_EOIR		(0x010)
31 
32 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
33 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
34 #define GICC_CTLR_FIQEN		(1 << 3)
35 
36 /* Offsets from gic.gicd_base */
37 #define GICD_CTLR		(0x000)
38 #define GICD_TYPER		(0x004)
39 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
40 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
41 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
42 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
43 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
44 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
45 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
46 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
47 #define GICD_SGIR		(0xF00)
48 
49 #ifdef CFG_ARM_GICV3
50 #define GICD_PIDR2		(0xFFE8)
51 #else
52 /* Called ICPIDR2 in GICv2 specification */
53 #define GICD_PIDR2		(0xFE8)
54 #endif
55 
56 #define GICD_CTLR_ENABLEGRP0	BIT32(0)
57 #define GICD_CTLR_ENABLEGRP1NS	BIT32(1)
58 #define GICD_CTLR_ENABLEGRP1S	BIT32(2)
59 #define GICD_CTLR_ARE_S		BIT32(4)
60 #define GICD_CTLR_ARE_NS	BIT32(5)
61 
62 /* Offsets from gic.gicr_base[core_pos] */
63 #define GICR_V3_PCPUBASE_SIZE	(2 * 64 * 1024)
64 #define GICR_SGI_BASE_OFFSET	(64 * 1024)
65 #define GICR_CTLR		(0x00)
66 #define GICR_TYPER		(0x08)
67 
68 #define GICR_IGROUPR0		(GICR_SGI_BASE_OFFSET + 0x080)
69 #define GICR_IGRPMODR0		(GICR_SGI_BASE_OFFSET + 0xD00)
70 #define GICR_ICENABLER0		(GICR_SGI_BASE_OFFSET + 0x180)
71 #define GICR_ICPENDR0		(GICR_SGI_BASE_OFFSET + 0x280)
72 
73 #define GICR_TYPER_LAST		BIT64(4)
74 #define GICR_TYPER_AFF3_SHIFT	56
75 #define GICR_TYPER_AFF2_SHIFT	48
76 #define GICR_TYPER_AFF1_SHIFT	40
77 #define GICR_TYPER_AFF0_SHIFT	32
78 
79 /* GICD IDR2 name differs on GICv3 and GICv2 but uses same bit map */
80 #define GICD_PIDR2_ARCHREV_SHIFT	4
81 #define GICD_PIDR2_ARCHREV_MASK		0xF
82 
83 /* Number of Private Peripheral Interrupt */
84 #define NUM_PPI	32
85 
86 /* Number of Software Generated Interrupt */
87 #define NUM_SGI			16
88 
89 /* Number of Non-secure Software Generated Interrupt */
90 #define NUM_NS_SGI		8
91 
92 /* Number of interrupts in one register */
93 #define NUM_INTS_PER_REG	32
94 
95 /* Number of targets in one register */
96 #define NUM_TARGETS_PER_REG	4
97 
98 /* Accessors to access ITARGETSRn */
99 #define ITARGETSR_FIELD_BITS	8
100 #define ITARGETSR_FIELD_MASK	0xff
101 
102 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
103 #define GICC_IAR_IT_ID_MASK	0x3ff
104 #define GICC_IAR_CPU_ID_MASK	0x7
105 #define GICC_IAR_CPU_ID_SHIFT	10
106 
107 #define GICC_SGI_IRM_BIT	40
108 #define GICC_SGI_AFF1_SHIFT	16
109 #define GICC_SGI_AFF2_SHIFT	32
110 #define GICC_SGI_AFF3_SHIFT	48
111 
112 #define GICD_SGIR_SIGINTID_MASK			0xf
113 #define GICD_SGIR_TO_OTHER_CPUS			0x1
114 #define GICD_SGIR_TO_THIS_CPU			0x2
115 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
116 #define GICD_SGIR_NSATT_SHIFT			15
117 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
118 
119 struct gic_data {
120 	vaddr_t gicc_base;
121 	vaddr_t gicd_base;
122 #if defined(CFG_ARM_GICV3)
123 	vaddr_t gicr_base[CFG_TEE_CORE_NB_CORE];
124 #endif
125 	size_t max_it;
126 	uint32_t per_cpu_group_status;
127 	uint32_t per_cpu_group_modifier;
128 	struct itr_chip chip;
129 };
130 
131 static struct gic_data gic_data __nex_bss;
132 
133 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
134 		       uint32_t prio);
135 static void gic_op_enable(struct itr_chip *chip, size_t it);
136 static void gic_op_disable(struct itr_chip *chip, size_t it);
137 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
138 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
139 			     uint32_t cpu_mask);
140 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
141 			uint8_t cpu_mask);
142 
143 static const struct itr_ops gic_ops = {
144 	.add = gic_op_add,
145 	.mask = gic_op_disable,
146 	.unmask = gic_op_enable,
147 	.enable = gic_op_enable,
148 	.disable = gic_op_disable,
149 	.raise_pi = gic_op_raise_pi,
150 	.raise_sgi = gic_op_raise_sgi,
151 	.set_affinity = gic_op_set_affinity,
152 };
153 DECLARE_KEEP_PAGER(gic_ops);
154 
155 static vaddr_t __maybe_unused get_gicr_base(struct gic_data *gd __maybe_unused)
156 {
157 #if defined(CFG_ARM_GICV3)
158 	return gd->gicr_base[get_core_pos()];
159 #else
160 	return 0;
161 #endif
162 }
163 
164 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
165 {
166 	int i;
167 	uint32_t old_ctlr;
168 	size_t ret = 0;
169 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
170 			  GICD_TYPER_IT_LINES_NUM_MASK;
171 
172 	/*
173 	 * Probe which interrupt number is the largest.
174 	 */
175 #if defined(CFG_ARM_GICV3)
176 	old_ctlr = read_icc_ctlr();
177 	write_icc_ctlr(0);
178 #else
179 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
180 	io_write32(gicc_base + GICC_CTLR, 0);
181 #endif
182 	for (i = max_regs; i >= 0; i--) {
183 		uint32_t old_reg;
184 		uint32_t reg;
185 		int b;
186 
187 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
188 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
189 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
190 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
191 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
192 			if (BIT32(b) & reg) {
193 				ret = i * NUM_INTS_PER_REG + b;
194 				goto out;
195 			}
196 		}
197 	}
198 out:
199 #if defined(CFG_ARM_GICV3)
200 	write_icc_ctlr(old_ctlr);
201 #else
202 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
203 #endif
204 	return ret;
205 }
206 
207 static void gicv3_sync_sgi_config(struct gic_data *gd)
208 {
209 	vaddr_t gicr_base = get_gicr_base(gd);
210 	bool need_sync = false;
211 	uint32_t gmod0 = 0;
212 	uint32_t grp0 = 0;
213 	size_t n = 0;
214 
215 	if (!gicr_base)
216 		panic("GICR_BASE missing for affinity routing");
217 
218 	grp0 = io_read32(gicr_base + GICR_IGROUPR0);
219 	gmod0 = io_read32(gicr_base + GICR_IGRPMODR0);
220 	for (n = GIC_SGI_SEC_BASE; n <= GIC_SGI_SEC_MAX; n++) {
221 		/* Ignore matching bits */
222 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)) &&
223 		    !(BIT32(n) & (gmod0 ^ gd->per_cpu_group_modifier)))
224 			continue;
225 		/*
226 		 * SGI-n differs from primary CPU configuration,
227 		 * let's sync up.
228 		 */
229 		need_sync = true;
230 
231 		/* Disable interrupt */
232 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(n));
233 
234 		/* Make interrupt non-pending */
235 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(n));
236 
237 		if (BIT32(n) & gd->per_cpu_group_status)
238 			grp0 |= BIT32(n);
239 		else
240 			grp0 &= ~BIT32(n);
241 		if (BIT32(n) & gd->per_cpu_group_modifier)
242 			gmod0 |= BIT32(n);
243 		else
244 			gmod0 &= ~BIT32(n);
245 	}
246 
247 	if (need_sync) {
248 		io_write32(gicr_base + GICR_IGROUPR0, grp0);
249 		io_write32(gicr_base + GICR_IGRPMODR0, gmod0);
250 	}
251 }
252 
253 static void gic_legacy_sync_sgi_config(struct gic_data *gd)
254 {
255 	bool need_sync = false;
256 	uint32_t grp0 = 0;
257 	size_t n = 0;
258 
259 	grp0 = io_read32(gd->gicd_base + GICD_IGROUPR(0));
260 	for (n = GIC_SGI_SEC_BASE; n <= GIC_SGI_SEC_MAX; n++) {
261 		/* Ignore matching bits */
262 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)))
263 			continue;
264 		/*
265 		 * SGI-n differs from primary CPU configuration,
266 		 * let's sync up.
267 		 */
268 		need_sync = true;
269 
270 		/* Disable interrupt */
271 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(n));
272 
273 		/* Make interrupt non-pending */
274 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(n));
275 
276 		if (BIT32(n) & gd->per_cpu_group_status)
277 			grp0 |= BIT32(n);
278 		else
279 			grp0 &= ~BIT32(n);
280 	}
281 
282 	if (need_sync)
283 		io_write32(gd->gicd_base + GICD_IGROUPR(0), grp0);
284 }
285 
286 static void init_gic_per_cpu(struct gic_data *gd)
287 {
288 	io_write32(gd->gicd_base + GICD_IGROUPR(0), gd->per_cpu_group_status);
289 
290 	/*
291 	 * Set the priority mask to permit Non-secure interrupts, and to
292 	 * allow the Non-secure world to adjust the priority mask itself
293 	 */
294 #if defined(CFG_ARM_GICV3)
295 	write_icc_pmr(0x80);
296 	write_icc_igrpen1(1);
297 #else
298 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
299 
300 	/* Enable GIC */
301 	io_write32(gd->gicc_base + GICC_CTLR,
302 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
303 		   GICC_CTLR_FIQEN);
304 #endif
305 }
306 
307 void gic_init_per_cpu(void)
308 {
309 	struct gic_data *gd = &gic_data;
310 
311 #if defined(CFG_ARM_GICV3)
312 	assert(gd->gicd_base);
313 #else
314 	assert(gd->gicd_base && gd->gicc_base);
315 #endif
316 
317 	if (IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
318 		/*
319 		 * GIC is already initialized by TF-A, we only need to
320 		 * handle eventual SGI configuration changes.
321 		 */
322 		if (IS_ENABLED(CFG_ARM_GICV3) &&
323 		    io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S)
324 			gicv3_sync_sgi_config(gd);
325 		else
326 			gic_legacy_sync_sgi_config(gd);
327 	} else {
328 		/*
329 		 * Non-TF-A case where all CPU specific configuration
330 		 * of GIC must be done here.
331 		 */
332 		init_gic_per_cpu(gd);
333 	}
334 }
335 
336 void gic_cpu_init(void)
337 {
338 	struct gic_data *gd = &gic_data;
339 
340 #if defined(CFG_ARM_GICV3)
341 	assert(gd->gicd_base);
342 #else
343 	assert(gd->gicd_base && gd->gicc_base);
344 #endif
345 	IMSG("%s is deprecated, please use gic_init_per_cpu()", __func__);
346 
347 	init_gic_per_cpu(gd);
348 }
349 
350 void gic_init_donate_sgi_to_ns(size_t it)
351 {
352 	struct gic_data *gd = &gic_data;
353 
354 	assert(it >= GIC_SGI_SEC_BASE && it <= GIC_SGI_SEC_MAX);
355 
356 	/* Assert it's secure to start with. */
357 	assert(!(gd->per_cpu_group_status & BIT32(it)) &&
358 	       (gd->per_cpu_group_modifier & BIT32(it)));
359 
360 	gd->per_cpu_group_modifier &= ~BIT32(it);
361 	gd->per_cpu_group_status |= BIT32(it);
362 
363 	if (IS_ENABLED(CFG_ARM_GICV3) &&
364 	    (io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S)) {
365 		vaddr_t gicr_base = get_gicr_base(gd);
366 
367 		/* Disable interrupt */
368 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(it));
369 
370 		/* Make interrupt non-pending */
371 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(it));
372 
373 		/* Make it to non-secure */
374 		io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status);
375 		io_write32(gicr_base + GICR_IGRPMODR0,
376 			   gd->per_cpu_group_modifier);
377 	} else {
378 		/* Disable interrupt */
379 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(it));
380 
381 		/* Make interrupt non-pending */
382 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(it));
383 
384 		/* Make it to non-secure */
385 		io_write32(gd->gicd_base + GICD_IGROUPR(0),
386 			   gd->per_cpu_group_status);
387 	}
388 }
389 
390 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
391 			  uint32_t *prio)
392 {
393 	int it_num = DT_INFO_INVALID_INTERRUPT;
394 
395 	if (type)
396 		*type = IRQ_TYPE_NONE;
397 
398 	if (prio)
399 		*prio = 0;
400 
401 	if (!properties || count < 2)
402 		return DT_INFO_INVALID_INTERRUPT;
403 
404 	it_num = fdt32_to_cpu(properties[1]);
405 
406 	switch (fdt32_to_cpu(properties[0])) {
407 	case GIC_PPI:
408 		it_num += 16;
409 		break;
410 	case GIC_SPI:
411 		it_num += 32;
412 		break;
413 	default:
414 		it_num = DT_INFO_INVALID_INTERRUPT;
415 	}
416 
417 	return it_num;
418 }
419 
420 static void __maybe_unused probe_redist_base_addrs(vaddr_t *gicr_base_addrs,
421 						   paddr_t gicr_base_pa)
422 {
423 	size_t sz = GICR_V3_PCPUBASE_SIZE;
424 	paddr_t pa = gicr_base_pa;
425 	size_t core_pos = 0;
426 	uint64_t mt_bit = 0;
427 	uint64_t mpidr = 0;
428 	uint64_t tv = 0;
429 	vaddr_t va = 0;
430 
431 #ifdef ARM64
432 	mt_bit = read_mpidr_el1() & MPIDR_MT_MASK;
433 #endif
434 	do {
435 		va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz);
436 		if (!va)
437 			panic();
438 		tv = io_read64(va + GICR_TYPER);
439 
440 		/*
441 		 * Extract an mpidr from the Type register to calculate the
442 		 * core position of this redistributer instance.
443 		 */
444 		mpidr = mt_bit;
445 		mpidr |= SHIFT_U64((tv >> GICR_TYPER_AFF3_SHIFT) &
446 				   MPIDR_AFFLVL_MASK, MPIDR_AFF3_SHIFT);
447 		mpidr |= (tv >> GICR_TYPER_AFF0_SHIFT) &
448 			 (MPIDR_AFF0_MASK | MPIDR_AFF1_MASK | MPIDR_AFF2_MASK);
449 		core_pos = get_core_pos_mpidr(mpidr);
450 		if (core_pos < CFG_TEE_CORE_NB_CORE) {
451 			DMSG("GICR_BASE[%zu] at %#"PRIxVA, core_pos, va);
452 			gicr_base_addrs[core_pos] = va;
453 		} else {
454 			EMSG("Skipping too large core_pos %zu from GICR_TYPER",
455 			     core_pos);
456 		}
457 		pa += sz;
458 	} while (!(tv & GICR_TYPER_LAST));
459 }
460 
461 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
462 			       paddr_t gicr_base_pa __maybe_unused)
463 {
464 	struct gic_data *gd = &gic_data;
465 	vaddr_t gicc_base = 0;
466 	vaddr_t gicd_base = 0;
467 	uint32_t vers __maybe_unused = 0;
468 
469 	assert(cpu_mmu_enabled());
470 
471 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
472 				    GIC_DIST_REG_SIZE);
473 	if (!gicd_base)
474 		panic();
475 
476 	vers = io_read32(gicd_base + GICD_PIDR2);
477 	vers >>= GICD_PIDR2_ARCHREV_SHIFT;
478 	vers &= GICD_PIDR2_ARCHREV_MASK;
479 
480 	if (IS_ENABLED(CFG_ARM_GICV3)) {
481 		assert(vers == 3);
482 	} else {
483 		assert(vers == 2);
484 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
485 					    GIC_CPU_REG_SIZE);
486 		if (!gicc_base)
487 			panic();
488 	}
489 
490 	gd->gicc_base = gicc_base;
491 	gd->gicd_base = gicd_base;
492 	gd->max_it = probe_max_it(gicc_base, gicd_base);
493 #if defined(CFG_ARM_GICV3)
494 	probe_redist_base_addrs(gd->gicr_base, gicr_base_pa);
495 #endif
496 	gd->chip.ops = &gic_ops;
497 
498 	if (IS_ENABLED(CFG_DT))
499 		gd->chip.dt_get_irq = gic_dt_get_irq;
500 }
501 
502 void gic_init_v3(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
503 		 paddr_t gicr_base_pa)
504 {
505 	struct gic_data __maybe_unused *gd = &gic_data;
506 	size_t __maybe_unused n = 0;
507 
508 	gic_init_base_addr(gicc_base_pa, gicd_base_pa, gicr_base_pa);
509 
510 #if defined(CFG_WITH_ARM_TRUSTED_FW)
511 	/* GIC configuration is initialized from TF-A when embedded */
512 	if (io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S) {
513 		vaddr_t gicr_base = get_gicr_base(gd);
514 
515 		if (!gicr_base)
516 			panic("GICR_BASE missing for affinity routing");
517 		/* Secure affinity routing enabled */
518 		gd->per_cpu_group_status = io_read32(gicr_base + GICR_IGROUPR0);
519 		gd->per_cpu_group_modifier = io_read32(gicr_base +
520 						       GICR_IGRPMODR0);
521 	} else {
522 		/* Legacy operation with secure affinity routing disabled */
523 		gd->per_cpu_group_status = io_read32(gd->gicd_base +
524 						     GICD_IGROUPR(0));
525 		gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
526 	}
527 #else /*!CFG_WITH_ARM_TRUSTED_FW*/
528 	/*
529 	 * Without TF-A, GIC is always configured in for legacy operation
530 	 * with secure affinity routing disabled.
531 	 */
532 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
533 		/* Disable interrupts */
534 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
535 
536 		/* Make interrupts non-pending */
537 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
538 
539 		/* Mark interrupts non-secure */
540 		if (n == 0) {
541 			/* per-CPU inerrupts config:
542 			 * ID0-ID7(SGI)	  for Non-secure interrupts
543 			 * ID8-ID15(SGI)  for Secure interrupts.
544 			 * All PPI config as Non-secure interrupts.
545 			 */
546 			gd->per_cpu_group_status = 0xffff00ff;
547 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
548 			io_write32(gd->gicd_base + GICD_IGROUPR(n),
549 				   gd->per_cpu_group_status);
550 		} else {
551 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
552 		}
553 	}
554 
555 	/* Set the priority mask to permit Non-secure interrupts, and to
556 	 * allow the Non-secure world to adjust the priority mask itself
557 	 */
558 #if defined(CFG_ARM_GICV3)
559 	write_icc_pmr(0x80);
560 	write_icc_igrpen1(1);
561 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
562 #else
563 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
564 
565 	/* Enable GIC */
566 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
567 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
568 	io_setbits32(gd->gicd_base + GICD_CTLR,
569 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1NS);
570 #endif
571 #endif /*!CFG_WITH_ARM_TRUSTED_FW*/
572 
573 	interrupt_main_init(&gic_data.chip);
574 }
575 
576 static void gic_it_add(struct gic_data *gd, size_t it)
577 {
578 	size_t idx = it / NUM_INTS_PER_REG;
579 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
580 
581 	assert(gd == &gic_data);
582 
583 	/* Disable the interrupt */
584 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
585 	/* Make it non-pending */
586 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
587 	/* Assign it to group0 */
588 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
589 #if defined(CFG_ARM_GICV3)
590 	/* Assign it to group1S */
591 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
592 #endif
593 }
594 
595 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
596 				uint8_t cpu_mask)
597 {
598 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
599 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
600 	uint32_t target, target_shift;
601 	vaddr_t itargetsr = gd->gicd_base +
602 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
603 
604 	assert(gd == &gic_data);
605 
606 	/* Assigned to group0 */
607 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
608 
609 	/* Route it to selected CPUs */
610 	target = io_read32(itargetsr);
611 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
612 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
613 	target |= cpu_mask << target_shift;
614 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
615 	io_write32(itargetsr, target);
616 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
617 }
618 
619 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
620 {
621 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
622 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
623 
624 	assert(gd == &gic_data);
625 
626 	/* Assigned to group0 */
627 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
628 
629 	/* Set prio it to selected CPUs */
630 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
631 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
632 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
633 }
634 
635 static void gic_it_enable(struct gic_data *gd, size_t it)
636 {
637 	size_t idx = it / NUM_INTS_PER_REG;
638 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
639 	vaddr_t base = gd->gicd_base;
640 
641 	assert(gd == &gic_data);
642 
643 	/* Assigned to group0 */
644 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
645 
646 	/* Enable the interrupt */
647 	io_write32(base + GICD_ISENABLER(idx), mask);
648 }
649 
650 static void gic_it_disable(struct gic_data *gd, size_t it)
651 {
652 	size_t idx = it / NUM_INTS_PER_REG;
653 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
654 
655 	assert(gd == &gic_data);
656 
657 	/* Assigned to group0 */
658 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
659 
660 	/* Disable the interrupt */
661 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
662 }
663 
664 static void gic_it_set_pending(struct gic_data *gd, size_t it)
665 {
666 	size_t idx = it / NUM_INTS_PER_REG;
667 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
668 
669 	assert(gd == &gic_data);
670 
671 	/* Should be Peripheral Interrupt */
672 	assert(it >= NUM_SGI);
673 
674 	/* Raise the interrupt */
675 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
676 }
677 
678 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
679 {
680 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
681 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
682 	bool __maybe_unused to_list = cpu_mask & 0xff;
683 
684 	/* One and only one of the bit fields shall be non-zero */
685 	assert(to_others + to_current + to_list == 1);
686 }
687 
688 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
689 			     uint32_t cpu_mask, bool ns)
690 {
691 #if defined(CFG_ARM_GICV3)
692 	uint32_t mask_id = it & 0xf;
693 	uint64_t mask = SHIFT_U64(mask_id, 24);
694 
695 	assert_cpu_mask_is_valid(cpu_mask);
696 
697 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
698 		mask |= BIT64(GICC_SGI_IRM_BIT);
699 	} else {
700 		uint64_t mpidr = read_mpidr();
701 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
702 				     MPIDR_AFF1_SHIFT;
703 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
704 				     MPIDR_AFF2_SHIFT;
705 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
706 				     MPIDR_AFF3_SHIFT;
707 
708 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
709 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
710 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
711 
712 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
713 			mask |= BIT32(mpidr & 0xf);
714 		} else {
715 			/*
716 			 * Only support sending SGI to the cores in the
717 			 * same cluster now.
718 			 */
719 			mask |= cpu_mask & 0xff;
720 		}
721 	}
722 
723 	/* Raise the interrupt */
724 	if (ns)
725 		write_icc_asgi1r(mask);
726 	else
727 		write_icc_sgi1r(mask);
728 #else
729 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
730 	uint32_t mask_group = ns;
731 	uint32_t mask = mask_id;
732 
733 	assert_cpu_mask_is_valid(cpu_mask);
734 
735 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
736 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
737 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
738 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
739 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
740 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
741 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
742 	} else {
743 		mask |= SHIFT_U32(cpu_mask & 0xff,
744 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
745 	}
746 
747 	/* Raise the interrupt */
748 	io_write32(gd->gicd_base + GICD_SGIR, mask);
749 #endif
750 }
751 
752 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
753 {
754 	assert(gd == &gic_data);
755 
756 #if defined(CFG_ARM_GICV3)
757 	return read_icc_iar1();
758 #else
759 	return io_read32(gd->gicc_base + GICC_IAR);
760 #endif
761 }
762 
763 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
764 {
765 	assert(gd == &gic_data);
766 
767 #if defined(CFG_ARM_GICV3)
768 	write_icc_eoir1(eoir);
769 #else
770 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
771 #endif
772 }
773 
774 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
775 {
776 	size_t idx = it / NUM_INTS_PER_REG;
777 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
778 
779 	assert(gd == &gic_data);
780 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
781 }
782 
783 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
784 {
785 	size_t idx = it / NUM_INTS_PER_REG;
786 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
787 
788 	assert(gd == &gic_data);
789 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
790 }
791 
792 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
793 {
794 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
795 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
796 				ITARGETSR_FIELD_BITS;
797 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
798 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
799 
800 	assert(gd == &gic_data);
801 	return (target & target_mask) >> target_shift;
802 }
803 
804 void gic_dump_state(void)
805 {
806 	struct gic_data *gd = &gic_data;
807 	int i = 0;
808 
809 #if defined(CFG_ARM_GICV3)
810 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
811 #else
812 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
813 #endif
814 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
815 
816 	for (i = 0; i <= (int)gd->max_it; i++) {
817 		if (gic_it_is_enabled(gd, i)) {
818 			DMSG("irq%d: enabled, group:%d, target:%x", i,
819 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
820 		}
821 	}
822 }
823 
824 static void __maybe_unused gic_native_itr_handler(void)
825 {
826 	struct gic_data *gd = &gic_data;
827 	uint32_t iar = 0;
828 	uint32_t id = 0;
829 
830 	iar = gic_read_iar(gd);
831 	id = iar & GICC_IAR_IT_ID_MASK;
832 
833 	if (id <= gd->max_it)
834 		interrupt_call_handlers(&gd->chip, id);
835 	else
836 		DMSG("ignoring interrupt %" PRIu32, id);
837 
838 	gic_write_eoir(gd, iar);
839 }
840 
841 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
842 /* Override interrupt_main_handler() with driver implementation */
843 void interrupt_main_handler(void)
844 {
845 	gic_native_itr_handler();
846 }
847 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
848 
849 static void gic_op_add(struct itr_chip *chip, size_t it,
850 		       uint32_t type __unused,
851 		       uint32_t prio __unused)
852 {
853 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
854 
855 	assert(gd == &gic_data);
856 
857 	if (it > gd->max_it)
858 		panic();
859 
860 	gic_it_add(gd, it);
861 	/* Set the CPU mask to deliver interrupts to any online core */
862 	gic_it_set_cpu_mask(gd, it, 0xff);
863 	gic_it_set_prio(gd, it, 0x1);
864 }
865 
866 static void gic_op_enable(struct itr_chip *chip, size_t it)
867 {
868 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
869 
870 	assert(gd == &gic_data);
871 
872 	if (it > gd->max_it)
873 		panic();
874 
875 	gic_it_enable(gd, it);
876 }
877 
878 static void gic_op_disable(struct itr_chip *chip, size_t it)
879 {
880 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
881 
882 	assert(gd == &gic_data);
883 
884 	if (it > gd->max_it)
885 		panic();
886 
887 	gic_it_disable(gd, it);
888 }
889 
890 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
891 {
892 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
893 
894 	assert(gd == &gic_data);
895 
896 	if (it > gd->max_it)
897 		panic();
898 
899 	gic_it_set_pending(gd, it);
900 }
901 
902 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
903 			     uint32_t cpu_mask)
904 {
905 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
906 	bool ns = false;
907 
908 	assert(gd == &gic_data);
909 
910 	/* Should be Software Generated Interrupt */
911 	assert(it < NUM_SGI);
912 
913 	ns = BIT32(it) & gd->per_cpu_group_status;
914 	gic_it_raise_sgi(gd, it, cpu_mask, ns);
915 }
916 
917 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
918 			uint8_t cpu_mask)
919 {
920 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
921 
922 	assert(gd == &gic_data);
923 
924 	if (it > gd->max_it)
925 		panic();
926 
927 	gic_it_set_cpu_mask(gd, it, cpu_mask);
928 }
929 
930 #ifdef CFG_DT
931 /* Callback for "interrupts" and "interrupts-extended" DT node properties */
932 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data,
933 				     struct itr_desc *itr_desc)
934 {
935 	int itr_num = DT_INFO_INVALID_INTERRUPT;
936 	struct itr_chip *chip = priv_data;
937 	uint32_t phandle_args[2] = { };
938 	uint32_t type = 0;
939 	uint32_t prio = 0;
940 
941 	assert(arg && itr_desc);
942 
943 	/*
944 	 * gic_dt_get_irq() expects phandle arguments passed are still in DT
945 	 * format (big-endian) whereas struct dt_pargs carries converted
946 	 * formats. Therefore swap again phandle arguments. gic_dt_get_irq()
947 	 * consumes only the 2 first arguments.
948 	 */
949 	if (arg->args_count < 2)
950 		return TEE_ERROR_GENERIC;
951 	phandle_args[0] = cpu_to_fdt32(arg->args[0]);
952 	phandle_args[1] = cpu_to_fdt32(arg->args[1]);
953 
954 	itr_num = gic_dt_get_irq((const void *)phandle_args, 2, &type, &prio);
955 	if (itr_num == DT_INFO_INVALID_INTERRUPT)
956 		return TEE_ERROR_GENERIC;
957 
958 	gic_op_add(chip, itr_num, type, prio);
959 
960 	itr_desc->chip = chip;
961 	itr_desc->itr_num = itr_num;
962 
963 	return TEE_SUCCESS;
964 }
965 
966 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused)
967 {
968 	if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb,
969 					&gic_data.chip))
970 		panic();
971 
972 	return TEE_SUCCESS;
973 }
974 
975 static const struct dt_device_match gic_match_table[] = {
976 	{ .compatible = "arm,cortex-a15-gic" },
977 	{ .compatible = "arm,cortex-a7-gic" },
978 	{ .compatible = "arm,cortex-a5-gic" },
979 	{ .compatible = "arm,cortex-a9-gic" },
980 	{ .compatible = "arm,gic-400" },
981 	{ }
982 };
983 
984 DEFINE_DT_DRIVER(gic_dt_driver) = {
985 	.name = "gic",
986 	.match_table = gic_match_table,
987 	.probe = gic_probe,
988 };
989 #endif /*CFG_DT*/
990