xref: /optee_os/core/drivers/gic.c (revision 79f8990d9d28539864d8f97f9f1cb32e289e595f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, 2023 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <dt-bindings/interrupt-controller/arm-gic.h>
10 #include <compiler.h>
11 #include <config.h>
12 #include <drivers/gic.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/dt.h>
16 #include <kernel/dt_driver.h>
17 #include <kernel/interrupt.h>
18 #include <kernel/misc.h>
19 #include <kernel/panic.h>
20 #include <libfdt.h>
21 #include <mm/core_memprot.h>
22 #include <mm/core_mmu.h>
23 #include <trace.h>
24 #include <util.h>
25 
26 /* Offsets from gic.gicc_base */
27 #define GICC_CTLR		(0x000)
28 #define GICC_PMR		(0x004)
29 #define GICC_IAR		(0x00C)
30 #define GICC_EOIR		(0x010)
31 
32 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
33 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
34 #define GICC_CTLR_FIQEN		(1 << 3)
35 
36 /* Offsets from gic.gicd_base */
37 #define GICD_CTLR		(0x000)
38 #define GICD_TYPER		(0x004)
39 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
40 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
41 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
42 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
43 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
44 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
45 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
46 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
47 #define GICD_SGIR		(0xF00)
48 
49 #ifdef CFG_ARM_GICV3
50 #define GICD_PIDR2		(0xFFE8)
51 #else
52 /* Called ICPIDR2 in GICv2 specification */
53 #define GICD_PIDR2		(0xFE8)
54 #endif
55 
56 #define GICD_CTLR_ENABLEGRP0	BIT32(0)
57 #define GICD_CTLR_ENABLEGRP1NS	BIT32(1)
58 #define GICD_CTLR_ENABLEGRP1S	BIT32(2)
59 #define GICD_CTLR_ARE_S		BIT32(4)
60 #define GICD_CTLR_ARE_NS	BIT32(5)
61 
62 /* Offsets from gic.gicr_base[core_pos] */
63 #define GICR_V3_PCPUBASE_SIZE	(2 * 64 * 1024)
64 #define GICR_SGI_BASE_OFFSET	(64 * 1024)
65 #define GICR_CTLR		(0x00)
66 #define GICR_TYPER		(0x08)
67 
68 #define GICR_IGROUPR0		(GICR_SGI_BASE_OFFSET + 0x080)
69 #define GICR_IGRPMODR0		(GICR_SGI_BASE_OFFSET + 0xD00)
70 #define GICR_ICENABLER0		(GICR_SGI_BASE_OFFSET + 0x180)
71 #define GICR_ICPENDR0		(GICR_SGI_BASE_OFFSET + 0x280)
72 
73 #define GICR_TYPER_LAST		BIT64(4)
74 #define GICR_TYPER_AFF3_SHIFT	56
75 #define GICR_TYPER_AFF2_SHIFT	48
76 #define GICR_TYPER_AFF1_SHIFT	40
77 #define GICR_TYPER_AFF0_SHIFT	32
78 
79 /* GICD IDR2 name differs on GICv3 and GICv2 but uses same bit map */
80 #define GICD_PIDR2_ARCHREV_SHIFT	4
81 #define GICD_PIDR2_ARCHREV_MASK		0xF
82 
83 /* Number of Private Peripheral Interrupt */
84 #define NUM_PPI	32
85 
86 /* Number of Software Generated Interrupt */
87 #define NUM_SGI			16
88 
89 /* Number of Non-secure Software Generated Interrupt */
90 #define NUM_NS_SGI		8
91 
92 /* Number of interrupts in one register */
93 #define NUM_INTS_PER_REG	32
94 
95 /* Number of targets in one register */
96 #define NUM_TARGETS_PER_REG	4
97 
98 /* Accessors to access ITARGETSRn */
99 #define ITARGETSR_FIELD_BITS	8
100 #define ITARGETSR_FIELD_MASK	0xff
101 
102 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
103 #define GICC_IAR_IT_ID_MASK	0x3ff
104 #define GICC_IAR_CPU_ID_MASK	0x7
105 #define GICC_IAR_CPU_ID_SHIFT	10
106 
107 #define GICC_SGI_IRM_BIT	40
108 #define GICC_SGI_AFF1_SHIFT	16
109 #define GICC_SGI_AFF2_SHIFT	32
110 #define GICC_SGI_AFF3_SHIFT	48
111 
112 #define GICD_SGIR_SIGINTID_MASK			0xf
113 #define GICD_SGIR_TO_OTHER_CPUS			0x1
114 #define GICD_SGIR_TO_THIS_CPU			0x2
115 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
116 #define GICD_SGIR_NSATT_SHIFT			15
117 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
118 
119 struct gic_data {
120 	vaddr_t gicc_base;
121 	vaddr_t gicd_base;
122 #if defined(CFG_ARM_GICV3)
123 	vaddr_t gicr_base[CFG_TEE_CORE_NB_CORE];
124 #endif
125 	size_t max_it;
126 	uint32_t per_cpu_group_status;
127 	uint32_t per_cpu_group_modifier;
128 	struct itr_chip chip;
129 };
130 
131 static struct gic_data gic_data __nex_bss;
132 
133 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
134 		       uint32_t prio);
135 static void gic_op_enable(struct itr_chip *chip, size_t it);
136 static void gic_op_disable(struct itr_chip *chip, size_t it);
137 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
138 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
139 			     uint32_t cpu_mask);
140 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
141 			uint8_t cpu_mask);
142 
143 static const struct itr_ops gic_ops = {
144 	.add = gic_op_add,
145 	.mask = gic_op_disable,
146 	.unmask = gic_op_enable,
147 	.enable = gic_op_enable,
148 	.disable = gic_op_disable,
149 	.raise_pi = gic_op_raise_pi,
150 	.raise_sgi = gic_op_raise_sgi,
151 	.set_affinity = gic_op_set_affinity,
152 };
153 DECLARE_KEEP_PAGER(gic_ops);
154 
155 static vaddr_t __maybe_unused get_gicr_base(struct gic_data *gd __maybe_unused)
156 {
157 #if defined(CFG_ARM_GICV3)
158 	return gd->gicr_base[get_core_pos()];
159 #else
160 	return 0;
161 #endif
162 }
163 
164 static bool affinity_routing_is_enabled(struct gic_data *gd)
165 {
166 	return IS_ENABLED(CFG_ARM_GICV3) &&
167 	       io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S;
168 }
169 
170 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
171 {
172 	int i;
173 	uint32_t old_ctlr;
174 	size_t ret = 0;
175 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
176 			  GICD_TYPER_IT_LINES_NUM_MASK;
177 
178 	/*
179 	 * Probe which interrupt number is the largest.
180 	 */
181 #if defined(CFG_ARM_GICV3)
182 	old_ctlr = read_icc_ctlr();
183 	write_icc_ctlr(0);
184 #else
185 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
186 	io_write32(gicc_base + GICC_CTLR, 0);
187 #endif
188 	for (i = max_regs; i >= 0; i--) {
189 		uint32_t old_reg;
190 		uint32_t reg;
191 		int b;
192 
193 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
194 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
195 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
196 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
197 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
198 			if (BIT32(b) & reg) {
199 				ret = i * NUM_INTS_PER_REG + b;
200 				goto out;
201 			}
202 		}
203 	}
204 out:
205 #if defined(CFG_ARM_GICV3)
206 	write_icc_ctlr(old_ctlr);
207 #else
208 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
209 #endif
210 	return ret;
211 }
212 
213 static void gicv3_sync_sgi_config(struct gic_data *gd)
214 {
215 	vaddr_t gicr_base = get_gicr_base(gd);
216 	bool need_sync = false;
217 	uint32_t gmod0 = 0;
218 	uint32_t grp0 = 0;
219 	size_t n = 0;
220 
221 	/*
222 	 * If gicr_base isn't available there's no need to synchronize SGI
223 	 * configuration since gic_init_donate_sgi_to_ns() would panic.
224 	 */
225 	if (!gicr_base)
226 		return;
227 
228 	grp0 = io_read32(gicr_base + GICR_IGROUPR0);
229 	gmod0 = io_read32(gicr_base + GICR_IGRPMODR0);
230 	for (n = GIC_SGI_SEC_BASE; n <= GIC_SGI_SEC_MAX; n++) {
231 		/* Ignore matching bits */
232 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)) &&
233 		    !(BIT32(n) & (gmod0 ^ gd->per_cpu_group_modifier)))
234 			continue;
235 		/*
236 		 * SGI-n differs from primary CPU configuration,
237 		 * let's sync up.
238 		 */
239 		need_sync = true;
240 
241 		/* Disable interrupt */
242 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(n));
243 
244 		/* Make interrupt non-pending */
245 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(n));
246 
247 		if (BIT32(n) & gd->per_cpu_group_status)
248 			grp0 |= BIT32(n);
249 		else
250 			grp0 &= ~BIT32(n);
251 		if (BIT32(n) & gd->per_cpu_group_modifier)
252 			gmod0 |= BIT32(n);
253 		else
254 			gmod0 &= ~BIT32(n);
255 	}
256 
257 	if (need_sync) {
258 		io_write32(gicr_base + GICR_IGROUPR0, grp0);
259 		io_write32(gicr_base + GICR_IGRPMODR0, gmod0);
260 	}
261 }
262 
263 static void gic_legacy_sync_sgi_config(struct gic_data *gd)
264 {
265 	bool need_sync = false;
266 	uint32_t grp0 = 0;
267 	size_t n = 0;
268 
269 	grp0 = io_read32(gd->gicd_base + GICD_IGROUPR(0));
270 	for (n = GIC_SGI_SEC_BASE; n <= GIC_SGI_SEC_MAX; n++) {
271 		/* Ignore matching bits */
272 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)))
273 			continue;
274 		/*
275 		 * SGI-n differs from primary CPU configuration,
276 		 * let's sync up.
277 		 */
278 		need_sync = true;
279 
280 		/* Disable interrupt */
281 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(n));
282 
283 		/* Make interrupt non-pending */
284 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(n));
285 
286 		if (BIT32(n) & gd->per_cpu_group_status)
287 			grp0 |= BIT32(n);
288 		else
289 			grp0 &= ~BIT32(n);
290 	}
291 
292 	if (need_sync)
293 		io_write32(gd->gicd_base + GICD_IGROUPR(0), grp0);
294 }
295 
296 static void init_gic_per_cpu(struct gic_data *gd)
297 {
298 	io_write32(gd->gicd_base + GICD_IGROUPR(0), gd->per_cpu_group_status);
299 
300 	/*
301 	 * Set the priority mask to permit Non-secure interrupts, and to
302 	 * allow the Non-secure world to adjust the priority mask itself
303 	 */
304 #if defined(CFG_ARM_GICV3)
305 	write_icc_pmr(0x80);
306 	write_icc_igrpen1(1);
307 #else
308 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
309 
310 	/* Enable GIC */
311 	io_write32(gd->gicc_base + GICC_CTLR,
312 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
313 		   GICC_CTLR_FIQEN);
314 #endif
315 }
316 
317 void gic_init_per_cpu(void)
318 {
319 	struct gic_data *gd = &gic_data;
320 
321 #if defined(CFG_ARM_GICV3)
322 	assert(gd->gicd_base);
323 #else
324 	assert(gd->gicd_base && gd->gicc_base);
325 #endif
326 
327 	if (IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
328 		/*
329 		 * GIC is already initialized by TF-A, we only need to
330 		 * handle eventual SGI configuration changes.
331 		 */
332 		if (affinity_routing_is_enabled(gd))
333 			gicv3_sync_sgi_config(gd);
334 		else
335 			gic_legacy_sync_sgi_config(gd);
336 	} else {
337 		/*
338 		 * Non-TF-A case where all CPU specific configuration
339 		 * of GIC must be done here.
340 		 */
341 		init_gic_per_cpu(gd);
342 	}
343 }
344 
345 void gic_cpu_init(void)
346 {
347 	struct gic_data *gd = &gic_data;
348 
349 #if defined(CFG_ARM_GICV3)
350 	assert(gd->gicd_base);
351 #else
352 	assert(gd->gicd_base && gd->gicc_base);
353 #endif
354 	IMSG("%s is deprecated, please use gic_init_per_cpu()", __func__);
355 
356 	init_gic_per_cpu(gd);
357 }
358 
359 void gic_init_donate_sgi_to_ns(size_t it)
360 {
361 	struct gic_data *gd = &gic_data;
362 
363 	assert(it >= GIC_SGI_SEC_BASE && it <= GIC_SGI_SEC_MAX);
364 
365 	/* Assert it's secure to start with. */
366 	assert(!(gd->per_cpu_group_status & BIT32(it)) &&
367 	       (gd->per_cpu_group_modifier & BIT32(it)));
368 
369 	gd->per_cpu_group_modifier &= ~BIT32(it);
370 	gd->per_cpu_group_status |= BIT32(it);
371 
372 	if (affinity_routing_is_enabled(gd)) {
373 		vaddr_t gicr_base = get_gicr_base(gd);
374 
375 		if (!gicr_base)
376 			panic("GICR_BASE missing");
377 
378 		/* Disable interrupt */
379 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(it));
380 
381 		/* Make interrupt non-pending */
382 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(it));
383 
384 		/* Make it to non-secure */
385 		io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status);
386 		io_write32(gicr_base + GICR_IGRPMODR0,
387 			   gd->per_cpu_group_modifier);
388 	} else {
389 		/* Disable interrupt */
390 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(it));
391 
392 		/* Make interrupt non-pending */
393 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(it));
394 
395 		/* Make it to non-secure */
396 		io_write32(gd->gicd_base + GICD_IGROUPR(0),
397 			   gd->per_cpu_group_status);
398 	}
399 }
400 
401 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
402 			  uint32_t *prio)
403 {
404 	int it_num = DT_INFO_INVALID_INTERRUPT;
405 
406 	if (type)
407 		*type = IRQ_TYPE_NONE;
408 
409 	if (prio)
410 		*prio = 0;
411 
412 	if (!properties || count < 2)
413 		return DT_INFO_INVALID_INTERRUPT;
414 
415 	it_num = fdt32_to_cpu(properties[1]);
416 
417 	switch (fdt32_to_cpu(properties[0])) {
418 	case GIC_PPI:
419 		it_num += 16;
420 		break;
421 	case GIC_SPI:
422 		it_num += 32;
423 		break;
424 	default:
425 		it_num = DT_INFO_INVALID_INTERRUPT;
426 	}
427 
428 	return it_num;
429 }
430 
431 static void __maybe_unused probe_redist_base_addrs(vaddr_t *gicr_base_addrs,
432 						   paddr_t gicr_base_pa)
433 {
434 	size_t sz = GICR_V3_PCPUBASE_SIZE;
435 	paddr_t pa = gicr_base_pa;
436 	size_t core_pos = 0;
437 	uint64_t mt_bit = 0;
438 	uint64_t mpidr = 0;
439 	uint64_t tv = 0;
440 	vaddr_t va = 0;
441 
442 #ifdef ARM64
443 	mt_bit = read_mpidr_el1() & MPIDR_MT_MASK;
444 #endif
445 	do {
446 		va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz);
447 		if (!va)
448 			panic();
449 		tv = io_read64(va + GICR_TYPER);
450 
451 		/*
452 		 * Extract an mpidr from the Type register to calculate the
453 		 * core position of this redistributer instance.
454 		 */
455 		mpidr = mt_bit;
456 		mpidr |= SHIFT_U64((tv >> GICR_TYPER_AFF3_SHIFT) &
457 				   MPIDR_AFFLVL_MASK, MPIDR_AFF3_SHIFT);
458 		mpidr |= (tv >> GICR_TYPER_AFF0_SHIFT) &
459 			 (MPIDR_AFF0_MASK | MPIDR_AFF1_MASK | MPIDR_AFF2_MASK);
460 		core_pos = get_core_pos_mpidr(mpidr);
461 		if (core_pos < CFG_TEE_CORE_NB_CORE) {
462 			DMSG("GICR_BASE[%zu] at %#"PRIxVA, core_pos, va);
463 			gicr_base_addrs[core_pos] = va;
464 		} else {
465 			EMSG("Skipping too large core_pos %zu from GICR_TYPER",
466 			     core_pos);
467 		}
468 		pa += sz;
469 	} while (!(tv & GICR_TYPER_LAST));
470 }
471 
472 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
473 			       paddr_t gicr_base_pa __maybe_unused)
474 {
475 	struct gic_data *gd = &gic_data;
476 	vaddr_t gicc_base = 0;
477 	vaddr_t gicd_base = 0;
478 	uint32_t vers __maybe_unused = 0;
479 
480 	assert(cpu_mmu_enabled());
481 
482 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
483 				    GIC_DIST_REG_SIZE);
484 	if (!gicd_base)
485 		panic();
486 
487 	vers = io_read32(gicd_base + GICD_PIDR2);
488 	vers >>= GICD_PIDR2_ARCHREV_SHIFT;
489 	vers &= GICD_PIDR2_ARCHREV_MASK;
490 
491 	if (IS_ENABLED(CFG_ARM_GICV3)) {
492 		assert(vers == 3);
493 	} else {
494 		assert(vers == 2 || vers == 1);
495 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
496 					    GIC_CPU_REG_SIZE);
497 		if (!gicc_base)
498 			panic();
499 	}
500 
501 	gd->gicc_base = gicc_base;
502 	gd->gicd_base = gicd_base;
503 	gd->max_it = probe_max_it(gicc_base, gicd_base);
504 #if defined(CFG_ARM_GICV3)
505 	if (affinity_routing_is_enabled(gd) && gicr_base_pa)
506 		probe_redist_base_addrs(gd->gicr_base, gicr_base_pa);
507 #endif
508 	gd->chip.ops = &gic_ops;
509 
510 	if (IS_ENABLED(CFG_DT))
511 		gd->chip.dt_get_irq = gic_dt_get_irq;
512 }
513 
514 void gic_init_v3(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
515 		 paddr_t gicr_base_pa)
516 {
517 	struct gic_data __maybe_unused *gd = &gic_data;
518 	size_t __maybe_unused n = 0;
519 
520 	gic_init_base_addr(gicc_base_pa, gicd_base_pa, gicr_base_pa);
521 
522 #if defined(CFG_WITH_ARM_TRUSTED_FW)
523 	/* GIC configuration is initialized from TF-A when embedded */
524 	if (affinity_routing_is_enabled(gd)) {
525 		/* Secure affinity routing enabled */
526 		vaddr_t gicr_base = get_gicr_base(gd);
527 
528 		if (gicr_base) {
529 			gd->per_cpu_group_status = io_read32(gicr_base +
530 							     GICR_IGROUPR0);
531 			gd->per_cpu_group_modifier = io_read32(gicr_base +
532 							       GICR_IGRPMODR0);
533 		} else {
534 			IMSG("GIC redistributor base address not provided");
535 			IMSG("Assuming default GIC group status and modifier");
536 			gd->per_cpu_group_status = 0xffff00ff;
537 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
538 		}
539 	} else {
540 		/* Legacy operation with secure affinity routing disabled */
541 		gd->per_cpu_group_status = io_read32(gd->gicd_base +
542 						     GICD_IGROUPR(0));
543 		gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
544 	}
545 #else /*!CFG_WITH_ARM_TRUSTED_FW*/
546 	/*
547 	 * Without TF-A, GIC is always configured in for legacy operation
548 	 * with secure affinity routing disabled.
549 	 */
550 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
551 		/* Disable interrupts */
552 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
553 
554 		/* Make interrupts non-pending */
555 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
556 
557 		/* Mark interrupts non-secure */
558 		if (n == 0) {
559 			/* per-CPU inerrupts config:
560 			 * ID0-ID7(SGI)	  for Non-secure interrupts
561 			 * ID8-ID15(SGI)  for Secure interrupts.
562 			 * All PPI config as Non-secure interrupts.
563 			 */
564 			gd->per_cpu_group_status = 0xffff00ff;
565 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
566 			io_write32(gd->gicd_base + GICD_IGROUPR(n),
567 				   gd->per_cpu_group_status);
568 		} else {
569 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
570 		}
571 	}
572 
573 	/* Set the priority mask to permit Non-secure interrupts, and to
574 	 * allow the Non-secure world to adjust the priority mask itself
575 	 */
576 #if defined(CFG_ARM_GICV3)
577 	write_icc_pmr(0x80);
578 	write_icc_igrpen1(1);
579 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
580 #else
581 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
582 
583 	/* Enable GIC */
584 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
585 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
586 	io_setbits32(gd->gicd_base + GICD_CTLR,
587 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1NS);
588 #endif
589 #endif /*!CFG_WITH_ARM_TRUSTED_FW*/
590 
591 	interrupt_main_init(&gic_data.chip);
592 }
593 
594 static void gic_it_add(struct gic_data *gd, size_t it)
595 {
596 	size_t idx = it / NUM_INTS_PER_REG;
597 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
598 
599 	assert(gd == &gic_data);
600 
601 	/* Disable the interrupt */
602 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
603 	/* Make it non-pending */
604 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
605 	/* Assign it to group0 */
606 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
607 #if defined(CFG_ARM_GICV3)
608 	/* Assign it to group1S */
609 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
610 #endif
611 }
612 
613 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
614 				uint8_t cpu_mask)
615 {
616 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
617 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
618 	uint32_t target, target_shift;
619 	vaddr_t itargetsr = gd->gicd_base +
620 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
621 
622 	assert(gd == &gic_data);
623 
624 	/* Assigned to group0 */
625 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
626 
627 	/* Route it to selected CPUs */
628 	target = io_read32(itargetsr);
629 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
630 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
631 	target |= cpu_mask << target_shift;
632 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
633 	io_write32(itargetsr, target);
634 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
635 }
636 
637 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
638 {
639 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
640 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
641 
642 	assert(gd == &gic_data);
643 
644 	/* Assigned to group0 */
645 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
646 
647 	/* Set prio it to selected CPUs */
648 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
649 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
650 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
651 }
652 
653 static void gic_it_enable(struct gic_data *gd, size_t it)
654 {
655 	size_t idx = it / NUM_INTS_PER_REG;
656 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
657 	vaddr_t base = gd->gicd_base;
658 
659 	assert(gd == &gic_data);
660 
661 	/* Assigned to group0 */
662 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
663 
664 	/* Enable the interrupt */
665 	io_write32(base + GICD_ISENABLER(idx), mask);
666 }
667 
668 static void gic_it_disable(struct gic_data *gd, size_t it)
669 {
670 	size_t idx = it / NUM_INTS_PER_REG;
671 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
672 
673 	assert(gd == &gic_data);
674 
675 	/* Assigned to group0 */
676 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
677 
678 	/* Disable the interrupt */
679 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
680 }
681 
682 static void gic_it_set_pending(struct gic_data *gd, size_t it)
683 {
684 	size_t idx = it / NUM_INTS_PER_REG;
685 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
686 
687 	assert(gd == &gic_data);
688 
689 	/* Should be Peripheral Interrupt */
690 	assert(it >= NUM_SGI);
691 
692 	/* Raise the interrupt */
693 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
694 }
695 
696 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
697 {
698 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
699 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
700 	bool __maybe_unused to_list = cpu_mask & 0xff;
701 
702 	/* One and only one of the bit fields shall be non-zero */
703 	assert(to_others + to_current + to_list == 1);
704 }
705 
706 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
707 			     uint32_t cpu_mask, bool ns)
708 {
709 #if defined(CFG_ARM_GICV3)
710 	uint32_t mask_id = it & 0xf;
711 	uint64_t mask = SHIFT_U64(mask_id, 24);
712 
713 	assert_cpu_mask_is_valid(cpu_mask);
714 
715 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
716 		mask |= BIT64(GICC_SGI_IRM_BIT);
717 	} else {
718 		uint64_t mpidr = read_mpidr();
719 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
720 				     MPIDR_AFF1_SHIFT;
721 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
722 				     MPIDR_AFF2_SHIFT;
723 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
724 				     MPIDR_AFF3_SHIFT;
725 
726 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
727 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
728 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
729 
730 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
731 			mask |= BIT32(mpidr & 0xf);
732 		} else {
733 			/*
734 			 * Only support sending SGI to the cores in the
735 			 * same cluster now.
736 			 */
737 			mask |= cpu_mask & 0xff;
738 		}
739 	}
740 
741 	/* Raise the interrupt */
742 	if (ns)
743 		write_icc_asgi1r(mask);
744 	else
745 		write_icc_sgi1r(mask);
746 #else
747 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
748 	uint32_t mask_group = ns;
749 	uint32_t mask = mask_id;
750 
751 	assert_cpu_mask_is_valid(cpu_mask);
752 
753 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
754 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
755 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
756 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
757 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
758 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
759 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
760 	} else {
761 		mask |= SHIFT_U32(cpu_mask & 0xff,
762 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
763 	}
764 
765 	/* Raise the interrupt */
766 	io_write32(gd->gicd_base + GICD_SGIR, mask);
767 #endif
768 }
769 
770 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
771 {
772 	assert(gd == &gic_data);
773 
774 #if defined(CFG_ARM_GICV3)
775 	return read_icc_iar1();
776 #else
777 	return io_read32(gd->gicc_base + GICC_IAR);
778 #endif
779 }
780 
781 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
782 {
783 	assert(gd == &gic_data);
784 
785 #if defined(CFG_ARM_GICV3)
786 	write_icc_eoir1(eoir);
787 #else
788 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
789 #endif
790 }
791 
792 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
793 {
794 	size_t idx = it / NUM_INTS_PER_REG;
795 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
796 
797 	assert(gd == &gic_data);
798 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
799 }
800 
801 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
802 {
803 	size_t idx = it / NUM_INTS_PER_REG;
804 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
805 
806 	assert(gd == &gic_data);
807 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
808 }
809 
810 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
811 {
812 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
813 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
814 				ITARGETSR_FIELD_BITS;
815 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
816 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
817 
818 	assert(gd == &gic_data);
819 	return (target & target_mask) >> target_shift;
820 }
821 
822 void gic_dump_state(void)
823 {
824 	struct gic_data *gd = &gic_data;
825 	int i = 0;
826 
827 #if defined(CFG_ARM_GICV3)
828 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
829 #else
830 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
831 #endif
832 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
833 
834 	for (i = 0; i <= (int)gd->max_it; i++) {
835 		if (gic_it_is_enabled(gd, i)) {
836 			DMSG("irq%d: enabled, group:%d, target:%x", i,
837 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
838 		}
839 	}
840 }
841 
842 static void __maybe_unused gic_native_itr_handler(void)
843 {
844 	struct gic_data *gd = &gic_data;
845 	uint32_t iar = 0;
846 	uint32_t id = 0;
847 
848 	iar = gic_read_iar(gd);
849 	id = iar & GICC_IAR_IT_ID_MASK;
850 
851 	if (id <= gd->max_it)
852 		interrupt_call_handlers(&gd->chip, id);
853 	else
854 		DMSG("ignoring interrupt %" PRIu32, id);
855 
856 	gic_write_eoir(gd, iar);
857 }
858 
859 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
860 /* Override interrupt_main_handler() with driver implementation */
861 void interrupt_main_handler(void)
862 {
863 	gic_native_itr_handler();
864 }
865 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
866 
867 static void gic_op_add(struct itr_chip *chip, size_t it,
868 		       uint32_t type __unused,
869 		       uint32_t prio __unused)
870 {
871 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
872 
873 	assert(gd == &gic_data);
874 
875 	if (it > gd->max_it)
876 		panic();
877 
878 	gic_it_add(gd, it);
879 	/* Set the CPU mask to deliver interrupts to any online core */
880 	gic_it_set_cpu_mask(gd, it, 0xff);
881 	gic_it_set_prio(gd, it, 0x1);
882 }
883 
884 static void gic_op_enable(struct itr_chip *chip, size_t it)
885 {
886 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
887 
888 	assert(gd == &gic_data);
889 
890 	if (it > gd->max_it)
891 		panic();
892 
893 	gic_it_enable(gd, it);
894 }
895 
896 static void gic_op_disable(struct itr_chip *chip, size_t it)
897 {
898 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
899 
900 	assert(gd == &gic_data);
901 
902 	if (it > gd->max_it)
903 		panic();
904 
905 	gic_it_disable(gd, it);
906 }
907 
908 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
909 {
910 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
911 
912 	assert(gd == &gic_data);
913 
914 	if (it > gd->max_it)
915 		panic();
916 
917 	gic_it_set_pending(gd, it);
918 }
919 
920 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
921 			     uint32_t cpu_mask)
922 {
923 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
924 	bool ns = false;
925 
926 	assert(gd == &gic_data);
927 
928 	/* Should be Software Generated Interrupt */
929 	assert(it < NUM_SGI);
930 
931 	ns = BIT32(it) & gd->per_cpu_group_status;
932 	gic_it_raise_sgi(gd, it, cpu_mask, ns);
933 }
934 
935 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
936 			uint8_t cpu_mask)
937 {
938 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
939 
940 	assert(gd == &gic_data);
941 
942 	if (it > gd->max_it)
943 		panic();
944 
945 	gic_it_set_cpu_mask(gd, it, cpu_mask);
946 }
947 
948 #ifdef CFG_DT
949 /* Callback for "interrupts" and "interrupts-extended" DT node properties */
950 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data,
951 				     struct itr_desc *itr_desc)
952 {
953 	int itr_num = DT_INFO_INVALID_INTERRUPT;
954 	struct itr_chip *chip = priv_data;
955 	uint32_t phandle_args[2] = { };
956 	uint32_t type = 0;
957 	uint32_t prio = 0;
958 
959 	assert(arg && itr_desc);
960 
961 	/*
962 	 * gic_dt_get_irq() expects phandle arguments passed are still in DT
963 	 * format (big-endian) whereas struct dt_pargs carries converted
964 	 * formats. Therefore swap again phandle arguments. gic_dt_get_irq()
965 	 * consumes only the 2 first arguments.
966 	 */
967 	if (arg->args_count < 2)
968 		return TEE_ERROR_GENERIC;
969 	phandle_args[0] = cpu_to_fdt32(arg->args[0]);
970 	phandle_args[1] = cpu_to_fdt32(arg->args[1]);
971 
972 	itr_num = gic_dt_get_irq((const void *)phandle_args, 2, &type, &prio);
973 	if (itr_num == DT_INFO_INVALID_INTERRUPT)
974 		return TEE_ERROR_GENERIC;
975 
976 	gic_op_add(chip, itr_num, type, prio);
977 
978 	itr_desc->chip = chip;
979 	itr_desc->itr_num = itr_num;
980 
981 	return TEE_SUCCESS;
982 }
983 
984 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused)
985 {
986 	if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb,
987 					&gic_data.chip))
988 		panic();
989 
990 	return TEE_SUCCESS;
991 }
992 
993 static const struct dt_device_match gic_match_table[] = {
994 	{ .compatible = "arm,cortex-a15-gic" },
995 	{ .compatible = "arm,cortex-a7-gic" },
996 	{ .compatible = "arm,cortex-a5-gic" },
997 	{ .compatible = "arm,cortex-a9-gic" },
998 	{ .compatible = "arm,gic-400" },
999 	{ }
1000 };
1001 
1002 DEFINE_DT_DRIVER(gic_dt_driver) = {
1003 	.name = "gic",
1004 	.match_table = gic_match_table,
1005 	.probe = gic_probe,
1006 };
1007 #endif /*CFG_DT*/
1008