xref: /optee_os/core/drivers/gic.c (revision 5f7f88c6b9d618d1e068166bbf2b07757350791d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, 2023 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <dt-bindings/interrupt-controller/arm-gic.h>
10 #include <compiler.h>
11 #include <config.h>
12 #include <drivers/gic.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/dt.h>
16 #include <kernel/dt_driver.h>
17 #include <kernel/interrupt.h>
18 #include <kernel/misc.h>
19 #include <kernel/panic.h>
20 #include <libfdt.h>
21 #include <mm/core_memprot.h>
22 #include <mm/core_mmu.h>
23 #include <trace.h>
24 #include <util.h>
25 
26 /* Offsets from gic.gicc_base */
27 #define GICC_CTLR		(0x000)
28 #define GICC_PMR		(0x004)
29 #define GICC_IAR		(0x00C)
30 #define GICC_EOIR		(0x010)
31 
32 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
33 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
34 #define GICC_CTLR_FIQEN		(1 << 3)
35 
36 /* Offsets from gic.gicd_base */
37 #define GICD_CTLR		(0x000)
38 #define GICD_TYPER		(0x004)
39 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
40 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
41 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
42 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
43 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
44 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
45 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
46 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
47 #define GICD_SGIR		(0xF00)
48 
49 #ifdef CFG_ARM_GICV3
50 #define GICD_PIDR2		(0xFFE8)
51 #else
52 /* Called ICPIDR2 in GICv2 specification */
53 #define GICD_PIDR2		(0xFE8)
54 #endif
55 
56 #define GICD_CTLR_ENABLEGRP0	BIT32(0)
57 #define GICD_CTLR_ENABLEGRP1NS	BIT32(1)
58 #define GICD_CTLR_ENABLEGRP1S	BIT32(2)
59 #define GICD_CTLR_ARE_S		BIT32(4)
60 #define GICD_CTLR_ARE_NS	BIT32(5)
61 
62 /* Offsets from gic.gicr_base[core_pos] */
63 #define GICR_V3_PCPUBASE_SIZE	(2 * 64 * 1024)
64 #define GICR_SGI_BASE_OFFSET	(64 * 1024)
65 #define GICR_CTLR		(0x00)
66 #define GICR_TYPER		(0x08)
67 
68 #define GICR_IGROUPR0		(GICR_SGI_BASE_OFFSET + 0x080)
69 #define GICR_IGRPMODR0		(GICR_SGI_BASE_OFFSET + 0xD00)
70 #define GICR_ICENABLER0		(GICR_SGI_BASE_OFFSET + 0x180)
71 #define GICR_ICPENDR0		(GICR_SGI_BASE_OFFSET + 0x280)
72 
73 #define GICR_TYPER_LAST		BIT64(4)
74 #define GICR_TYPER_AFF3_SHIFT	56
75 #define GICR_TYPER_AFF2_SHIFT	48
76 #define GICR_TYPER_AFF1_SHIFT	40
77 #define GICR_TYPER_AFF0_SHIFT	32
78 
79 /* GICD IDR2 name differs on GICv3 and GICv2 but uses same bit map */
80 #define GICD_PIDR2_ARCHREV_SHIFT	4
81 #define GICD_PIDR2_ARCHREV_MASK		0xF
82 
83 /* Number of Private Peripheral Interrupt */
84 #define NUM_PPI	32
85 
86 /* Number of Software Generated Interrupt */
87 #define NUM_SGI			16
88 
89 /* Number of Non-secure Software Generated Interrupt */
90 #define NUM_NS_SGI		8
91 
92 /* Number of interrupts in one register */
93 #define NUM_INTS_PER_REG	32
94 
95 /* Number of targets in one register */
96 #define NUM_TARGETS_PER_REG	4
97 
98 /* Accessors to access ITARGETSRn */
99 #define ITARGETSR_FIELD_BITS	8
100 #define ITARGETSR_FIELD_MASK	0xff
101 
102 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
103 #define GICC_IAR_IT_ID_MASK	0x3ff
104 #define GICC_IAR_CPU_ID_MASK	0x7
105 #define GICC_IAR_CPU_ID_SHIFT	10
106 
107 #define GICC_SGI_IRM_BIT	40
108 #define GICC_SGI_AFF1_SHIFT	16
109 #define GICC_SGI_AFF2_SHIFT	32
110 #define GICC_SGI_AFF3_SHIFT	48
111 
112 #define GICD_SGIR_SIGINTID_MASK			0xf
113 #define GICD_SGIR_TO_OTHER_CPUS			0x1
114 #define GICD_SGIR_TO_THIS_CPU			0x2
115 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
116 #define GICD_SGIR_NSATT_SHIFT			15
117 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
118 
119 struct gic_data {
120 	vaddr_t gicc_base;
121 	vaddr_t gicd_base;
122 #if defined(CFG_ARM_GICV3)
123 	vaddr_t gicr_base[CFG_TEE_CORE_NB_CORE];
124 #endif
125 	size_t max_it;
126 	uint32_t per_cpu_group_status;
127 	uint32_t per_cpu_group_modifier;
128 	struct itr_chip chip;
129 };
130 
131 static struct gic_data gic_data __nex_bss;
132 
133 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
134 		       uint32_t prio);
135 static void gic_op_enable(struct itr_chip *chip, size_t it);
136 static void gic_op_disable(struct itr_chip *chip, size_t it);
137 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
138 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
139 			     uint32_t cpu_mask);
140 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
141 			uint8_t cpu_mask);
142 
143 static const struct itr_ops gic_ops = {
144 	.add = gic_op_add,
145 	.mask = gic_op_disable,
146 	.unmask = gic_op_enable,
147 	.enable = gic_op_enable,
148 	.disable = gic_op_disable,
149 	.raise_pi = gic_op_raise_pi,
150 	.raise_sgi = gic_op_raise_sgi,
151 	.set_affinity = gic_op_set_affinity,
152 };
153 DECLARE_KEEP_PAGER(gic_ops);
154 
155 static vaddr_t __maybe_unused get_gicr_base(struct gic_data *gd __maybe_unused)
156 {
157 #if defined(CFG_ARM_GICV3)
158 	return gd->gicr_base[get_core_pos()];
159 #else
160 	return 0;
161 #endif
162 }
163 
164 static bool affinity_routing_is_enabled(struct gic_data *gd)
165 {
166 	return IS_ENABLED(CFG_ARM_GICV3) &&
167 	       io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S;
168 }
169 
170 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
171 {
172 	int i;
173 	uint32_t old_ctlr;
174 	size_t ret = 0;
175 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
176 			  GICD_TYPER_IT_LINES_NUM_MASK;
177 
178 	/*
179 	 * Probe which interrupt number is the largest.
180 	 */
181 #if defined(CFG_ARM_GICV3)
182 	old_ctlr = read_icc_ctlr();
183 	write_icc_ctlr(0);
184 #else
185 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
186 	io_write32(gicc_base + GICC_CTLR, 0);
187 #endif
188 	for (i = max_regs; i >= 0; i--) {
189 		uint32_t old_reg;
190 		uint32_t reg;
191 		int b;
192 
193 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
194 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
195 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
196 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
197 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
198 			if (BIT32(b) & reg) {
199 				ret = i * NUM_INTS_PER_REG + b;
200 				goto out;
201 			}
202 		}
203 	}
204 out:
205 #if defined(CFG_ARM_GICV3)
206 	write_icc_ctlr(old_ctlr);
207 #else
208 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
209 #endif
210 	return ret;
211 }
212 
213 static void gicv3_sync_sgi_config(struct gic_data *gd)
214 {
215 	vaddr_t gicr_base = get_gicr_base(gd);
216 	bool need_sync = false;
217 	uint32_t gmod0 = 0;
218 	uint32_t grp0 = 0;
219 	size_t n = 0;
220 
221 	/*
222 	 * If gicr_base isn't available there's no need to synchronize SGI
223 	 * configuration since gic_init_donate_sgi_to_ns() would panic.
224 	 */
225 	if (!gicr_base)
226 		return;
227 
228 	grp0 = io_read32(gicr_base + GICR_IGROUPR0);
229 	gmod0 = io_read32(gicr_base + GICR_IGRPMODR0);
230 	for (n = GIC_SGI_SEC_BASE; n <= GIC_SGI_SEC_MAX; n++) {
231 		/* Ignore matching bits */
232 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)) &&
233 		    !(BIT32(n) & (gmod0 ^ gd->per_cpu_group_modifier)))
234 			continue;
235 		/*
236 		 * SGI-n differs from primary CPU configuration,
237 		 * let's sync up.
238 		 */
239 		need_sync = true;
240 
241 		/* Disable interrupt */
242 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(n));
243 
244 		/* Make interrupt non-pending */
245 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(n));
246 
247 		if (BIT32(n) & gd->per_cpu_group_status)
248 			grp0 |= BIT32(n);
249 		else
250 			grp0 &= ~BIT32(n);
251 		if (BIT32(n) & gd->per_cpu_group_modifier)
252 			gmod0 |= BIT32(n);
253 		else
254 			gmod0 &= ~BIT32(n);
255 	}
256 
257 	if (need_sync) {
258 		io_write32(gicr_base + GICR_IGROUPR0, grp0);
259 		io_write32(gicr_base + GICR_IGRPMODR0, gmod0);
260 	}
261 }
262 
263 static void gic_legacy_sync_sgi_config(struct gic_data *gd)
264 {
265 	bool need_sync = false;
266 	uint32_t grp0 = 0;
267 	size_t n = 0;
268 
269 	grp0 = io_read32(gd->gicd_base + GICD_IGROUPR(0));
270 	for (n = GIC_SGI_SEC_BASE; n <= GIC_SGI_SEC_MAX; n++) {
271 		/* Ignore matching bits */
272 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)))
273 			continue;
274 		/*
275 		 * SGI-n differs from primary CPU configuration,
276 		 * let's sync up.
277 		 */
278 		need_sync = true;
279 
280 		/* Disable interrupt */
281 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(n));
282 
283 		/* Make interrupt non-pending */
284 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(n));
285 
286 		if (BIT32(n) & gd->per_cpu_group_status)
287 			grp0 |= BIT32(n);
288 		else
289 			grp0 &= ~BIT32(n);
290 	}
291 
292 	if (need_sync)
293 		io_write32(gd->gicd_base + GICD_IGROUPR(0), grp0);
294 }
295 
296 static void init_gic_per_cpu(struct gic_data *gd)
297 {
298 	io_write32(gd->gicd_base + GICD_IGROUPR(0), gd->per_cpu_group_status);
299 
300 	/*
301 	 * Set the priority mask to permit Non-secure interrupts, and to
302 	 * allow the Non-secure world to adjust the priority mask itself
303 	 */
304 #if defined(CFG_ARM_GICV3)
305 	write_icc_pmr(0x80);
306 	write_icc_igrpen1(1);
307 #else
308 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
309 
310 	/* Enable GIC */
311 	io_write32(gd->gicc_base + GICC_CTLR,
312 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
313 		   GICC_CTLR_FIQEN);
314 #endif
315 }
316 
317 void gic_init_per_cpu(void)
318 {
319 	struct gic_data *gd = &gic_data;
320 
321 #if defined(CFG_ARM_GICV3)
322 	assert(gd->gicd_base);
323 #else
324 	assert(gd->gicd_base && gd->gicc_base);
325 #endif
326 
327 	if (IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
328 		/*
329 		 * GIC is already initialized by TF-A, we only need to
330 		 * handle eventual SGI configuration changes.
331 		 */
332 		if (affinity_routing_is_enabled(gd))
333 			gicv3_sync_sgi_config(gd);
334 		else
335 			gic_legacy_sync_sgi_config(gd);
336 	} else {
337 		/*
338 		 * Non-TF-A case where all CPU specific configuration
339 		 * of GIC must be done here.
340 		 */
341 		init_gic_per_cpu(gd);
342 	}
343 }
344 
345 void gic_init_donate_sgi_to_ns(size_t it)
346 {
347 	struct gic_data *gd = &gic_data;
348 
349 	assert(it >= GIC_SGI_SEC_BASE && it <= GIC_SGI_SEC_MAX);
350 
351 	/* Assert it's secure to start with. */
352 	assert(!(gd->per_cpu_group_status & BIT32(it)) &&
353 	       (gd->per_cpu_group_modifier & BIT32(it)));
354 
355 	gd->per_cpu_group_modifier &= ~BIT32(it);
356 	gd->per_cpu_group_status |= BIT32(it);
357 
358 	if (affinity_routing_is_enabled(gd)) {
359 		vaddr_t gicr_base = get_gicr_base(gd);
360 
361 		if (!gicr_base)
362 			panic("GICR_BASE missing");
363 
364 		/* Disable interrupt */
365 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(it));
366 
367 		/* Make interrupt non-pending */
368 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(it));
369 
370 		/* Make it to non-secure */
371 		io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status);
372 		io_write32(gicr_base + GICR_IGRPMODR0,
373 			   gd->per_cpu_group_modifier);
374 	} else {
375 		/* Disable interrupt */
376 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(it));
377 
378 		/* Make interrupt non-pending */
379 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(it));
380 
381 		/* Make it to non-secure */
382 		io_write32(gd->gicd_base + GICD_IGROUPR(0),
383 			   gd->per_cpu_group_status);
384 	}
385 }
386 
387 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
388 			  uint32_t *prio)
389 {
390 	int it_num = DT_INFO_INVALID_INTERRUPT;
391 
392 	if (type)
393 		*type = IRQ_TYPE_NONE;
394 
395 	if (prio)
396 		*prio = 0;
397 
398 	if (!properties || count < 2)
399 		return DT_INFO_INVALID_INTERRUPT;
400 
401 	it_num = fdt32_to_cpu(properties[1]);
402 
403 	switch (fdt32_to_cpu(properties[0])) {
404 	case GIC_PPI:
405 		it_num += 16;
406 		break;
407 	case GIC_SPI:
408 		it_num += 32;
409 		break;
410 	default:
411 		it_num = DT_INFO_INVALID_INTERRUPT;
412 	}
413 
414 	return it_num;
415 }
416 
417 static void __maybe_unused probe_redist_base_addrs(vaddr_t *gicr_base_addrs,
418 						   paddr_t gicr_base_pa)
419 {
420 	size_t sz = GICR_V3_PCPUBASE_SIZE;
421 	paddr_t pa = gicr_base_pa;
422 	size_t core_pos = 0;
423 	uint64_t mt_bit = 0;
424 	uint64_t mpidr = 0;
425 	uint64_t tv = 0;
426 	vaddr_t va = 0;
427 
428 #ifdef ARM64
429 	mt_bit = read_mpidr_el1() & MPIDR_MT_MASK;
430 #endif
431 	do {
432 		va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz);
433 		if (!va)
434 			panic();
435 		tv = io_read64(va + GICR_TYPER);
436 
437 		/*
438 		 * Extract an mpidr from the Type register to calculate the
439 		 * core position of this redistributer instance.
440 		 */
441 		mpidr = mt_bit;
442 		mpidr |= SHIFT_U64((tv >> GICR_TYPER_AFF3_SHIFT) &
443 				   MPIDR_AFFLVL_MASK, MPIDR_AFF3_SHIFT);
444 		mpidr |= (tv >> GICR_TYPER_AFF0_SHIFT) &
445 			 (MPIDR_AFF0_MASK | MPIDR_AFF1_MASK | MPIDR_AFF2_MASK);
446 		core_pos = get_core_pos_mpidr(mpidr);
447 		if (core_pos < CFG_TEE_CORE_NB_CORE) {
448 			DMSG("GICR_BASE[%zu] at %#"PRIxVA, core_pos, va);
449 			gicr_base_addrs[core_pos] = va;
450 		} else {
451 			EMSG("Skipping too large core_pos %zu from GICR_TYPER",
452 			     core_pos);
453 		}
454 		pa += sz;
455 	} while (!(tv & GICR_TYPER_LAST));
456 }
457 
458 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
459 			       paddr_t gicr_base_pa __maybe_unused)
460 {
461 	struct gic_data *gd = &gic_data;
462 	vaddr_t gicc_base = 0;
463 	vaddr_t gicd_base = 0;
464 	uint32_t vers __maybe_unused = 0;
465 
466 	assert(cpu_mmu_enabled());
467 
468 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
469 				    GIC_DIST_REG_SIZE);
470 	if (!gicd_base)
471 		panic();
472 
473 	vers = io_read32(gicd_base + GICD_PIDR2);
474 	vers >>= GICD_PIDR2_ARCHREV_SHIFT;
475 	vers &= GICD_PIDR2_ARCHREV_MASK;
476 
477 	if (IS_ENABLED(CFG_ARM_GICV3)) {
478 		assert(vers == 3);
479 	} else {
480 		assert(vers == 2 || vers == 1);
481 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
482 					    GIC_CPU_REG_SIZE);
483 		if (!gicc_base)
484 			panic();
485 	}
486 
487 	gd->gicc_base = gicc_base;
488 	gd->gicd_base = gicd_base;
489 	gd->max_it = probe_max_it(gicc_base, gicd_base);
490 #if defined(CFG_ARM_GICV3)
491 	if (affinity_routing_is_enabled(gd) && gicr_base_pa)
492 		probe_redist_base_addrs(gd->gicr_base, gicr_base_pa);
493 #endif
494 	gd->chip.ops = &gic_ops;
495 
496 	if (IS_ENABLED(CFG_DT))
497 		gd->chip.dt_get_irq = gic_dt_get_irq;
498 }
499 
500 void gic_init_v3(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
501 		 paddr_t gicr_base_pa)
502 {
503 	struct gic_data __maybe_unused *gd = &gic_data;
504 	size_t __maybe_unused n = 0;
505 
506 	gic_init_base_addr(gicc_base_pa, gicd_base_pa, gicr_base_pa);
507 
508 #if defined(CFG_WITH_ARM_TRUSTED_FW)
509 	/* GIC configuration is initialized from TF-A when embedded */
510 	if (affinity_routing_is_enabled(gd)) {
511 		/* Secure affinity routing enabled */
512 		vaddr_t gicr_base = get_gicr_base(gd);
513 
514 		if (gicr_base) {
515 			gd->per_cpu_group_status = io_read32(gicr_base +
516 							     GICR_IGROUPR0);
517 			gd->per_cpu_group_modifier = io_read32(gicr_base +
518 							       GICR_IGRPMODR0);
519 		} else {
520 			IMSG("GIC redistributor base address not provided");
521 			IMSG("Assuming default GIC group status and modifier");
522 			gd->per_cpu_group_status = 0xffff00ff;
523 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
524 		}
525 	} else {
526 		/* Legacy operation with secure affinity routing disabled */
527 		gd->per_cpu_group_status = io_read32(gd->gicd_base +
528 						     GICD_IGROUPR(0));
529 		gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
530 	}
531 #else /*!CFG_WITH_ARM_TRUSTED_FW*/
532 	/*
533 	 * Without TF-A, GIC is always configured in for legacy operation
534 	 * with secure affinity routing disabled.
535 	 */
536 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
537 		/* Disable interrupts */
538 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
539 
540 		/* Make interrupts non-pending */
541 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
542 
543 		/* Mark interrupts non-secure */
544 		if (n == 0) {
545 			/* per-CPU inerrupts config:
546 			 * ID0-ID7(SGI)	  for Non-secure interrupts
547 			 * ID8-ID15(SGI)  for Secure interrupts.
548 			 * All PPI config as Non-secure interrupts.
549 			 */
550 			gd->per_cpu_group_status = 0xffff00ff;
551 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
552 			io_write32(gd->gicd_base + GICD_IGROUPR(n),
553 				   gd->per_cpu_group_status);
554 		} else {
555 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
556 		}
557 	}
558 
559 	/* Set the priority mask to permit Non-secure interrupts, and to
560 	 * allow the Non-secure world to adjust the priority mask itself
561 	 */
562 #if defined(CFG_ARM_GICV3)
563 	write_icc_pmr(0x80);
564 	write_icc_igrpen1(1);
565 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
566 #else
567 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
568 
569 	/* Enable GIC */
570 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
571 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
572 	io_setbits32(gd->gicd_base + GICD_CTLR,
573 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1NS);
574 #endif
575 #endif /*!CFG_WITH_ARM_TRUSTED_FW*/
576 
577 	interrupt_main_init(&gic_data.chip);
578 }
579 
580 static void gic_it_add(struct gic_data *gd, size_t it)
581 {
582 	size_t idx = it / NUM_INTS_PER_REG;
583 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
584 
585 	assert(gd == &gic_data);
586 
587 	/* Disable the interrupt */
588 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
589 	/* Make it non-pending */
590 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
591 	/* Assign it to group0 */
592 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
593 #if defined(CFG_ARM_GICV3)
594 	/* Assign it to group1S */
595 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
596 #endif
597 }
598 
599 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
600 				uint8_t cpu_mask)
601 {
602 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
603 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
604 	uint32_t target, target_shift;
605 	vaddr_t itargetsr = gd->gicd_base +
606 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
607 
608 	assert(gd == &gic_data);
609 
610 	/* Assigned to group0 */
611 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
612 
613 	/* Route it to selected CPUs */
614 	target = io_read32(itargetsr);
615 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
616 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
617 	target |= cpu_mask << target_shift;
618 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
619 	io_write32(itargetsr, target);
620 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
621 }
622 
623 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
624 {
625 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
626 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
627 
628 	assert(gd == &gic_data);
629 
630 	/* Assigned to group0 */
631 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
632 
633 	/* Set prio it to selected CPUs */
634 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
635 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
636 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
637 }
638 
639 static void gic_it_enable(struct gic_data *gd, size_t it)
640 {
641 	size_t idx = it / NUM_INTS_PER_REG;
642 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
643 	vaddr_t base = gd->gicd_base;
644 
645 	assert(gd == &gic_data);
646 
647 	/* Assigned to group0 */
648 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
649 
650 	/* Enable the interrupt */
651 	io_write32(base + GICD_ISENABLER(idx), mask);
652 }
653 
654 static void gic_it_disable(struct gic_data *gd, size_t it)
655 {
656 	size_t idx = it / NUM_INTS_PER_REG;
657 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
658 
659 	assert(gd == &gic_data);
660 
661 	/* Assigned to group0 */
662 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
663 
664 	/* Disable the interrupt */
665 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
666 }
667 
668 static void gic_it_set_pending(struct gic_data *gd, size_t it)
669 {
670 	size_t idx = it / NUM_INTS_PER_REG;
671 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
672 
673 	assert(gd == &gic_data);
674 
675 	/* Should be Peripheral Interrupt */
676 	assert(it >= NUM_SGI);
677 
678 	/* Raise the interrupt */
679 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
680 }
681 
682 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
683 {
684 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
685 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
686 	bool __maybe_unused to_list = cpu_mask & 0xff;
687 
688 	/* One and only one of the bit fields shall be non-zero */
689 	assert(to_others + to_current + to_list == 1);
690 }
691 
692 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
693 			     uint32_t cpu_mask, bool ns)
694 {
695 #if defined(CFG_ARM_GICV3)
696 	uint32_t mask_id = it & 0xf;
697 	uint64_t mask = SHIFT_U64(mask_id, 24);
698 
699 	assert_cpu_mask_is_valid(cpu_mask);
700 
701 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
702 		mask |= BIT64(GICC_SGI_IRM_BIT);
703 	} else {
704 		uint64_t mpidr = read_mpidr();
705 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
706 				     MPIDR_AFF1_SHIFT;
707 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
708 				     MPIDR_AFF2_SHIFT;
709 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
710 				     MPIDR_AFF3_SHIFT;
711 
712 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
713 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
714 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
715 
716 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
717 			mask |= BIT32(mpidr & 0xf);
718 		} else {
719 			/*
720 			 * Only support sending SGI to the cores in the
721 			 * same cluster now.
722 			 */
723 			mask |= cpu_mask & 0xff;
724 		}
725 	}
726 
727 	/* Raise the interrupt */
728 	if (ns)
729 		write_icc_asgi1r(mask);
730 	else
731 		write_icc_sgi1r(mask);
732 #else
733 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
734 	uint32_t mask_group = ns;
735 	uint32_t mask = mask_id;
736 
737 	assert_cpu_mask_is_valid(cpu_mask);
738 
739 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
740 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
741 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
742 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
743 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
744 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
745 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
746 	} else {
747 		mask |= SHIFT_U32(cpu_mask & 0xff,
748 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
749 	}
750 
751 	/* Raise the interrupt */
752 	io_write32(gd->gicd_base + GICD_SGIR, mask);
753 #endif
754 }
755 
756 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
757 {
758 	assert(gd == &gic_data);
759 
760 #if defined(CFG_ARM_GICV3)
761 	return read_icc_iar1();
762 #else
763 	return io_read32(gd->gicc_base + GICC_IAR);
764 #endif
765 }
766 
767 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
768 {
769 	assert(gd == &gic_data);
770 
771 #if defined(CFG_ARM_GICV3)
772 	write_icc_eoir1(eoir);
773 #else
774 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
775 #endif
776 }
777 
778 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
779 {
780 	size_t idx = it / NUM_INTS_PER_REG;
781 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
782 
783 	assert(gd == &gic_data);
784 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
785 }
786 
787 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
788 {
789 	size_t idx = it / NUM_INTS_PER_REG;
790 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
791 
792 	assert(gd == &gic_data);
793 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
794 }
795 
796 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
797 {
798 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
799 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
800 				ITARGETSR_FIELD_BITS;
801 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
802 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
803 
804 	assert(gd == &gic_data);
805 	return (target & target_mask) >> target_shift;
806 }
807 
808 void gic_dump_state(void)
809 {
810 	struct gic_data *gd = &gic_data;
811 	int i = 0;
812 
813 #if defined(CFG_ARM_GICV3)
814 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
815 #else
816 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
817 #endif
818 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
819 
820 	for (i = 0; i <= (int)gd->max_it; i++) {
821 		if (gic_it_is_enabled(gd, i)) {
822 			DMSG("irq%d: enabled, group:%d, target:%x", i,
823 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
824 		}
825 	}
826 }
827 
828 static void __maybe_unused gic_native_itr_handler(void)
829 {
830 	struct gic_data *gd = &gic_data;
831 	uint32_t iar = 0;
832 	uint32_t id = 0;
833 
834 	iar = gic_read_iar(gd);
835 	id = iar & GICC_IAR_IT_ID_MASK;
836 
837 	if (id <= gd->max_it)
838 		interrupt_call_handlers(&gd->chip, id);
839 	else
840 		DMSG("ignoring interrupt %" PRIu32, id);
841 
842 	gic_write_eoir(gd, iar);
843 }
844 
845 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
846 /* Override interrupt_main_handler() with driver implementation */
847 void interrupt_main_handler(void)
848 {
849 	gic_native_itr_handler();
850 }
851 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
852 
853 static void gic_op_add(struct itr_chip *chip, size_t it,
854 		       uint32_t type __unused,
855 		       uint32_t prio __unused)
856 {
857 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
858 
859 	assert(gd == &gic_data);
860 
861 	if (it > gd->max_it)
862 		panic();
863 
864 	gic_it_add(gd, it);
865 	/* Set the CPU mask to deliver interrupts to any online core */
866 	gic_it_set_cpu_mask(gd, it, 0xff);
867 	gic_it_set_prio(gd, it, 0x1);
868 }
869 
870 static void gic_op_enable(struct itr_chip *chip, size_t it)
871 {
872 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
873 
874 	assert(gd == &gic_data);
875 
876 	if (it > gd->max_it)
877 		panic();
878 
879 	gic_it_enable(gd, it);
880 }
881 
882 static void gic_op_disable(struct itr_chip *chip, size_t it)
883 {
884 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
885 
886 	assert(gd == &gic_data);
887 
888 	if (it > gd->max_it)
889 		panic();
890 
891 	gic_it_disable(gd, it);
892 }
893 
894 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
895 {
896 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
897 
898 	assert(gd == &gic_data);
899 
900 	if (it > gd->max_it)
901 		panic();
902 
903 	gic_it_set_pending(gd, it);
904 }
905 
906 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
907 			     uint32_t cpu_mask)
908 {
909 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
910 	bool ns = false;
911 
912 	assert(gd == &gic_data);
913 
914 	/* Should be Software Generated Interrupt */
915 	assert(it < NUM_SGI);
916 
917 	ns = BIT32(it) & gd->per_cpu_group_status;
918 	gic_it_raise_sgi(gd, it, cpu_mask, ns);
919 }
920 
921 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
922 			uint8_t cpu_mask)
923 {
924 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
925 
926 	assert(gd == &gic_data);
927 
928 	if (it > gd->max_it)
929 		panic();
930 
931 	gic_it_set_cpu_mask(gd, it, cpu_mask);
932 }
933 
934 #ifdef CFG_DT
935 /* Callback for "interrupts" and "interrupts-extended" DT node properties */
936 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data,
937 				     struct itr_desc *itr_desc)
938 {
939 	int itr_num = DT_INFO_INVALID_INTERRUPT;
940 	struct itr_chip *chip = priv_data;
941 	uint32_t phandle_args[2] = { };
942 	uint32_t type = 0;
943 	uint32_t prio = 0;
944 
945 	assert(arg && itr_desc);
946 
947 	/*
948 	 * gic_dt_get_irq() expects phandle arguments passed are still in DT
949 	 * format (big-endian) whereas struct dt_pargs carries converted
950 	 * formats. Therefore swap again phandle arguments. gic_dt_get_irq()
951 	 * consumes only the 2 first arguments.
952 	 */
953 	if (arg->args_count < 2)
954 		return TEE_ERROR_GENERIC;
955 	phandle_args[0] = cpu_to_fdt32(arg->args[0]);
956 	phandle_args[1] = cpu_to_fdt32(arg->args[1]);
957 
958 	itr_num = gic_dt_get_irq((const void *)phandle_args, 2, &type, &prio);
959 	if (itr_num == DT_INFO_INVALID_INTERRUPT)
960 		return TEE_ERROR_GENERIC;
961 
962 	gic_op_add(chip, itr_num, type, prio);
963 
964 	itr_desc->chip = chip;
965 	itr_desc->itr_num = itr_num;
966 
967 	return TEE_SUCCESS;
968 }
969 
970 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused)
971 {
972 	if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb,
973 					&gic_data.chip))
974 		panic();
975 
976 	return TEE_SUCCESS;
977 }
978 
979 static const struct dt_device_match gic_match_table[] = {
980 	{ .compatible = "arm,cortex-a15-gic" },
981 	{ .compatible = "arm,cortex-a7-gic" },
982 	{ .compatible = "arm,cortex-a5-gic" },
983 	{ .compatible = "arm,cortex-a9-gic" },
984 	{ .compatible = "arm,gic-400" },
985 	{ }
986 };
987 
988 DEFINE_DT_DRIVER(gic_dt_driver) = {
989 	.name = "gic",
990 	.match_table = gic_match_table,
991 	.probe = gic_probe,
992 };
993 #endif /*CFG_DT*/
994