xref: /optee_os/core/drivers/gic.c (revision b0563631928755fe864b97785160fb3088e9efdc)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, 2023-2024 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <drivers/gic.h>
12 #include <dt-bindings/interrupt-controller/arm-gic.h>
13 #include <initcall.h>
14 #include <io.h>
15 #include <keep.h>
16 #include <kernel/dt.h>
17 #include <kernel/dt_driver.h>
18 #include <kernel/interrupt.h>
19 #include <kernel/misc.h>
20 #include <kernel/panic.h>
21 #include <libfdt.h>
22 #include <mm/core_memprot.h>
23 #include <mm/core_mmu.h>
24 #include <trace.h>
25 #include <util.h>
26 
27 /* Offsets from gic.gicc_base */
28 #define GICC_CTLR		(0x000)
29 #define GICC_PMR		(0x004)
30 #define GICC_IAR		(0x00C)
31 #define GICC_EOIR		(0x010)
32 
33 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
34 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
35 #define GICC_CTLR_FIQEN		(1 << 3)
36 
37 /* Offsets from gic.gicd_base */
38 #define GICD_CTLR		(0x000)
39 #define GICD_TYPER		(0x004)
40 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
41 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
42 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
43 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
44 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
45 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
46 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
47 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
48 #define GICD_SGIR		(0xF00)
49 
50 #ifdef CFG_ARM_GICV3
51 #define GICD_PIDR2		(0xFFE8)
52 #else
53 /* Called ICPIDR2 in GICv2 specification */
54 #define GICD_PIDR2		(0xFE8)
55 #endif
56 
57 #define GICD_CTLR_ENABLEGRP0	BIT32(0)
58 #define GICD_CTLR_ENABLEGRP1NS	BIT32(1)
59 #define GICD_CTLR_ENABLEGRP1S	BIT32(2)
60 #define GICD_CTLR_ARE_S		BIT32(4)
61 #define GICD_CTLR_ARE_NS	BIT32(5)
62 
63 /* Offsets from gic.gicr_base[core_pos] */
64 #define GICR_V3_PCPUBASE_SIZE	(2 * 64 * 1024)
65 #define GICR_SGI_BASE_OFFSET	(64 * 1024)
66 #define GICR_CTLR		(0x00)
67 #define GICR_TYPER		(0x08)
68 
69 #define GICR_IGROUPR0		(GICR_SGI_BASE_OFFSET + 0x080)
70 #define GICR_IGRPMODR0		(GICR_SGI_BASE_OFFSET + 0xD00)
71 #define GICR_ICENABLER0		(GICR_SGI_BASE_OFFSET + 0x180)
72 #define GICR_ICPENDR0		(GICR_SGI_BASE_OFFSET + 0x280)
73 #define GICR_ISENABLER0		(GICR_SGI_BASE_OFFSET + 0x100)
74 #define GICR_ICFGR0		(GICR_SGI_BASE_OFFSET + 0xC00)
75 #define GICR_ICFGR1		(GICR_SGI_BASE_OFFSET + 0xC04)
76 #define GICR_IPRIORITYR(n)	(GICR_SGI_BASE_OFFSET + 0x400 + (n) * 4)
77 
78 #define GICR_CTLR_RWP		BIT32(3)
79 
80 #define GICR_TYPER_LAST		BIT64(4)
81 #define GICR_TYPER_AFF3_SHIFT	56
82 #define GICR_TYPER_AFF2_SHIFT	48
83 #define GICR_TYPER_AFF1_SHIFT	40
84 #define GICR_TYPER_AFF0_SHIFT	32
85 
86 /* GICD IDR2 name differs on GICv3 and GICv2 but uses same bit map */
87 #define GICD_PIDR2_ARCHREV_SHIFT	4
88 #define GICD_PIDR2_ARCHREV_MASK		0xF
89 
90 /* Number of Private Peripheral Interrupt */
91 #define NUM_PPI	32
92 
93 /* Number of Software Generated Interrupt */
94 #define NUM_SGI			16
95 
96 /* Number of Non-secure Software Generated Interrupt */
97 #define NUM_NS_SGI		8
98 
99 /* Number of interrupts in one register */
100 #define NUM_INTS_PER_REG	32
101 
102 /* Number of targets in one register */
103 #define NUM_TARGETS_PER_REG	4
104 
105 /* Accessors to access ITARGETSRn */
106 #define ITARGETSR_FIELD_BITS	8
107 #define ITARGETSR_FIELD_MASK	0xff
108 
109 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
110 #define GICC_IAR_IT_ID_MASK	0x3ff
111 #define GICC_IAR_CPU_ID_MASK	0x7
112 #define GICC_IAR_CPU_ID_SHIFT	10
113 
114 #define GICC_SGI_IRM_BIT	40
115 #define GICC_SGI_AFF1_SHIFT	16
116 #define GICC_SGI_AFF2_SHIFT	32
117 #define GICC_SGI_AFF3_SHIFT	48
118 
119 #define GICD_SGIR_SIGINTID_MASK			0xf
120 #define GICD_SGIR_TO_OTHER_CPUS			0x1
121 #define GICD_SGIR_TO_THIS_CPU			0x2
122 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
123 #define GICD_SGIR_NSATT_SHIFT			15
124 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
125 
126 struct gic_data {
127 	vaddr_t gicc_base;
128 	vaddr_t gicd_base;
129 #if defined(CFG_ARM_GICV3)
130 	vaddr_t gicr_base[CFG_TEE_CORE_NB_CORE];
131 #endif
132 	size_t max_it;
133 	uint32_t per_cpu_group_status;
134 	uint32_t per_cpu_group_modifier;
135 	uint32_t per_cpu_enable;
136 	struct itr_chip chip;
137 };
138 
139 static bool gic_primary_done __nex_bss;
140 static struct gic_data gic_data __nex_bss;
141 
142 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
143 		       uint32_t prio);
144 static void gic_op_enable(struct itr_chip *chip, size_t it);
145 static void gic_op_disable(struct itr_chip *chip, size_t it);
146 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
147 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
148 			     uint32_t cpu_mask);
149 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
150 			uint8_t cpu_mask);
151 
152 static const struct itr_ops gic_ops = {
153 	.add = gic_op_add,
154 	.mask = gic_op_disable,
155 	.unmask = gic_op_enable,
156 	.enable = gic_op_enable,
157 	.disable = gic_op_disable,
158 	.raise_pi = gic_op_raise_pi,
159 	.raise_sgi = gic_op_raise_sgi,
160 	.set_affinity = gic_op_set_affinity,
161 };
162 DECLARE_KEEP_PAGER(gic_ops);
163 
164 static vaddr_t __maybe_unused get_gicr_base(struct gic_data *gd __maybe_unused)
165 {
166 #if defined(CFG_ARM_GICV3)
167 	return gd->gicr_base[get_core_pos()];
168 #else
169 	return 0;
170 #endif
171 }
172 
173 static bool affinity_routing_is_enabled(struct gic_data *gd)
174 {
175 	return IS_ENABLED(CFG_ARM_GICV3) &&
176 	       io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S;
177 }
178 
179 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
180 {
181 	int i;
182 	uint32_t old_ctlr;
183 	size_t ret = 0;
184 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
185 			  GICD_TYPER_IT_LINES_NUM_MASK;
186 
187 	/*
188 	 * Probe which interrupt number is the largest.
189 	 */
190 #if defined(CFG_ARM_GICV3)
191 	old_ctlr = read_icc_ctlr();
192 	write_icc_ctlr(0);
193 #else
194 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
195 	io_write32(gicc_base + GICC_CTLR, 0);
196 #endif
197 	for (i = max_regs; i >= 0; i--) {
198 		uint32_t old_reg;
199 		uint32_t reg;
200 		int b;
201 
202 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
203 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
204 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
205 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
206 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
207 			if (BIT32(b) & reg) {
208 				ret = i * NUM_INTS_PER_REG + b;
209 				goto out;
210 			}
211 		}
212 	}
213 out:
214 #if defined(CFG_ARM_GICV3)
215 	write_icc_ctlr(old_ctlr);
216 #else
217 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
218 #endif
219 	return ret;
220 }
221 
222 static void gicr_wait_for_pending_write(vaddr_t gicr_base)
223 {
224 	/*
225 	 * Wait for changes to
226 	 * - GICR_ICENABLER0
227 	 * - GICR_CTLR.DPG1S
228 	 * - GICR_CTLR.DPG1NS
229 	 * - GICR_CTLR.DPG0
230 	 * to be visible to all agents in the system.
231 	 */
232 	while (io_read32(gicr_base + GICR_CTLR) & GICR_CTLR_RWP)
233 		;
234 }
235 
236 static void gicv3_sync_redist_config(struct gic_data *gd)
237 {
238 	vaddr_t gicr_base = get_gicr_base(gd);
239 	bool need_sync = false;
240 	uint32_t gmod0 = 0;
241 	uint32_t grp0 = 0;
242 	size_t n = 0;
243 
244 	/*
245 	 * If gicr_base isn't available there's no need to synchronize SGI
246 	 * configuration since gic_init_donate_sgi_to_ns() would panic.
247 	 */
248 	if (!gicr_base)
249 		return;
250 
251 	grp0 = io_read32(gicr_base + GICR_IGROUPR0);
252 	gmod0 = io_read32(gicr_base + GICR_IGRPMODR0);
253 	for (n = GIC_SGI_SEC_BASE; n < GIC_SPI_BASE; n++) {
254 		/* Ignore matching bits */
255 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)) &&
256 		    !(BIT32(n) & (gmod0 ^ gd->per_cpu_group_modifier)))
257 			continue;
258 		/*
259 		 * SGI/PPI-n differs from primary CPU configuration,
260 		 * let's sync up.
261 		 */
262 		need_sync = true;
263 
264 		/* Disable interrupt */
265 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(n));
266 
267 		/* Wait for the write to GICR_ICENABLER0 to propagate */
268 		gicr_wait_for_pending_write(gicr_base);
269 
270 		/* Make interrupt non-pending */
271 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(n));
272 
273 		if (BIT32(n) & gd->per_cpu_group_status)
274 			grp0 |= BIT32(n);
275 		else
276 			grp0 &= ~BIT32(n);
277 		if (BIT32(n) & gd->per_cpu_group_modifier)
278 			gmod0 |= BIT32(n);
279 		else
280 			gmod0 &= ~BIT32(n);
281 	}
282 
283 	if (need_sync) {
284 		io_write32(gicr_base + GICR_IGROUPR0, grp0);
285 		io_write32(gicr_base + GICR_IGRPMODR0, gmod0);
286 		io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable);
287 	}
288 }
289 
290 static void gic_legacy_sync_dist_config(struct gic_data *gd)
291 {
292 	bool need_sync = false;
293 	uint32_t grp0 = 0;
294 	size_t n = 0;
295 
296 	grp0 = io_read32(gd->gicd_base + GICD_IGROUPR(0));
297 	for (n = GIC_SGI_SEC_BASE; n < GIC_SPI_BASE; n++) {
298 		/* Ignore matching bits */
299 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)))
300 			continue;
301 		/*
302 		 * SGI/PPI-n differs from primary CPU configuration,
303 		 * let's sync up.
304 		 */
305 		need_sync = true;
306 
307 		/* Disable interrupt */
308 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(n));
309 
310 		/* Make interrupt non-pending */
311 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(n));
312 
313 		if (BIT32(n) & gd->per_cpu_group_status)
314 			grp0 |= BIT32(n);
315 		else
316 			grp0 &= ~BIT32(n);
317 	}
318 
319 	if (need_sync) {
320 		io_write32(gd->gicd_base + GICD_IGROUPR(0), grp0);
321 		io_write32(gd->gicd_base + GICD_ISENABLER(0),
322 			   gd->per_cpu_enable);
323 	}
324 }
325 
326 static void init_gic_per_cpu(struct gic_data *gd)
327 {
328 	io_write32(gd->gicd_base + GICD_IGROUPR(0), gd->per_cpu_group_status);
329 
330 	/*
331 	 * Set the priority mask to permit Non-secure interrupts, and to
332 	 * allow the Non-secure world to adjust the priority mask itself
333 	 */
334 #if defined(CFG_ARM_GICV3)
335 	write_icc_pmr(0x80);
336 	write_icc_igrpen1(1);
337 #else
338 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
339 
340 	/* Enable GIC */
341 	io_write32(gd->gicc_base + GICC_CTLR,
342 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
343 		   GICC_CTLR_FIQEN);
344 #endif
345 }
346 
347 void gic_init_per_cpu(void)
348 {
349 	struct gic_data *gd = &gic_data;
350 
351 #if defined(CFG_ARM_GICV3)
352 	assert(gd->gicd_base);
353 #else
354 	assert(gd->gicd_base && gd->gicc_base);
355 #endif
356 
357 	if (IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
358 		/*
359 		 * GIC is already initialized by TF-A, we only need to
360 		 * handle eventual SGI or PPI configuration changes.
361 		 */
362 		if (affinity_routing_is_enabled(gd))
363 			gicv3_sync_redist_config(gd);
364 		else
365 			gic_legacy_sync_dist_config(gd);
366 	} else {
367 		/*
368 		 * Non-TF-A case where all CPU specific configuration
369 		 * of GIC must be done here.
370 		 */
371 		init_gic_per_cpu(gd);
372 	}
373 }
374 
375 void gic_init_donate_sgi_to_ns(size_t it)
376 {
377 	struct gic_data *gd = &gic_data;
378 
379 	assert(it >= GIC_SGI_SEC_BASE && it <= GIC_SGI_SEC_MAX);
380 
381 	/* Assert it's secure to start with. */
382 	assert(!(gd->per_cpu_group_status & BIT32(it)) &&
383 	       (gd->per_cpu_group_modifier & BIT32(it)));
384 
385 	gd->per_cpu_group_modifier &= ~BIT32(it);
386 	gd->per_cpu_group_status |= BIT32(it);
387 
388 	if (affinity_routing_is_enabled(gd)) {
389 		vaddr_t gicr_base = get_gicr_base(gd);
390 
391 		if (!gicr_base)
392 			panic("GICR_BASE missing");
393 
394 		/* Disable interrupt */
395 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(it));
396 
397 		/* Wait for the write to GICR_ICENABLER0 to propagate */
398 		gicr_wait_for_pending_write(gicr_base);
399 
400 		/* Make interrupt non-pending */
401 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(it));
402 
403 		/* Make it to non-secure */
404 		io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status);
405 		io_write32(gicr_base + GICR_IGRPMODR0,
406 			   gd->per_cpu_group_modifier);
407 	} else {
408 		/* Disable interrupt */
409 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(it));
410 
411 		/* Make interrupt non-pending */
412 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(it));
413 
414 		/* Make it to non-secure */
415 		io_write32(gd->gicd_base + GICD_IGROUPR(0),
416 			   gd->per_cpu_group_status);
417 	}
418 }
419 
420 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
421 			  uint32_t *prio)
422 {
423 	int it_num = DT_INFO_INVALID_INTERRUPT;
424 
425 	if (type)
426 		*type = IRQ_TYPE_NONE;
427 
428 	if (prio)
429 		*prio = 0;
430 
431 	if (!properties || count < 2)
432 		return DT_INFO_INVALID_INTERRUPT;
433 
434 	it_num = fdt32_to_cpu(properties[1]);
435 
436 	switch (fdt32_to_cpu(properties[0])) {
437 	case GIC_PPI:
438 		it_num += 16;
439 		break;
440 	case GIC_SPI:
441 		it_num += 32;
442 		break;
443 	default:
444 		it_num = DT_INFO_INVALID_INTERRUPT;
445 	}
446 
447 	return it_num;
448 }
449 
450 static void __maybe_unused probe_redist_base_addrs(vaddr_t *gicr_base_addrs,
451 						   paddr_t gicr_base_pa)
452 {
453 	size_t sz = GICR_V3_PCPUBASE_SIZE;
454 	paddr_t pa = gicr_base_pa;
455 	size_t core_pos = 0;
456 	uint64_t mt_bit = 0;
457 	uint64_t mpidr = 0;
458 	uint64_t tv = 0;
459 	vaddr_t va = 0;
460 
461 #ifdef ARM64
462 	mt_bit = read_mpidr_el1() & MPIDR_MT_MASK;
463 #endif
464 	do {
465 		va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz);
466 		if (!va)
467 			panic();
468 		tv = io_read64(va + GICR_TYPER);
469 
470 		/*
471 		 * Extract an mpidr from the Type register to calculate the
472 		 * core position of this redistributer instance.
473 		 */
474 		mpidr = mt_bit;
475 		mpidr |= SHIFT_U64((tv >> GICR_TYPER_AFF3_SHIFT) &
476 				   MPIDR_AFFLVL_MASK, MPIDR_AFF3_SHIFT);
477 		mpidr |= (tv >> GICR_TYPER_AFF0_SHIFT) &
478 			 (MPIDR_AFF0_MASK | MPIDR_AFF1_MASK | MPIDR_AFF2_MASK);
479 		core_pos = get_core_pos_mpidr(mpidr);
480 		if (core_pos < CFG_TEE_CORE_NB_CORE) {
481 			DMSG("GICR_BASE[%zu] at %#"PRIxVA, core_pos, va);
482 			gicr_base_addrs[core_pos] = va;
483 		} else {
484 			EMSG("Skipping too large core_pos %zu from GICR_TYPER",
485 			     core_pos);
486 		}
487 		pa += sz;
488 	} while (!(tv & GICR_TYPER_LAST));
489 }
490 
491 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
492 			       paddr_t gicr_base_pa __maybe_unused)
493 {
494 	struct gic_data *gd = &gic_data;
495 	vaddr_t gicc_base = 0;
496 	vaddr_t gicd_base = 0;
497 	uint32_t vers __maybe_unused = 0;
498 
499 	assert(cpu_mmu_enabled());
500 
501 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
502 				    GIC_DIST_REG_SIZE);
503 	if (!gicd_base)
504 		panic();
505 
506 	vers = io_read32(gicd_base + GICD_PIDR2);
507 	vers >>= GICD_PIDR2_ARCHREV_SHIFT;
508 	vers &= GICD_PIDR2_ARCHREV_MASK;
509 
510 	if (IS_ENABLED(CFG_ARM_GICV3)) {
511 		assert(vers == 3);
512 	} else {
513 		assert(vers == 2 || vers == 1);
514 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
515 					    GIC_CPU_REG_SIZE);
516 		if (!gicc_base)
517 			panic();
518 	}
519 
520 	gd->gicc_base = gicc_base;
521 	gd->gicd_base = gicd_base;
522 	gd->max_it = probe_max_it(gicc_base, gicd_base);
523 #if defined(CFG_ARM_GICV3)
524 	if (affinity_routing_is_enabled(gd) && gicr_base_pa)
525 		probe_redist_base_addrs(gd->gicr_base, gicr_base_pa);
526 #endif
527 	gd->chip.ops = &gic_ops;
528 
529 	if (IS_ENABLED(CFG_DT))
530 		gd->chip.dt_get_irq = gic_dt_get_irq;
531 }
532 
533 void gic_init_v3(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
534 		 paddr_t gicr_base_pa)
535 {
536 	struct gic_data __maybe_unused *gd = &gic_data;
537 	size_t __maybe_unused n = 0;
538 
539 	gic_init_base_addr(gicc_base_pa, gicd_base_pa, gicr_base_pa);
540 
541 #if defined(CFG_WITH_ARM_TRUSTED_FW)
542 	/* GIC configuration is initialized from TF-A when embedded */
543 	if (affinity_routing_is_enabled(gd)) {
544 		/* Secure affinity routing enabled */
545 		vaddr_t gicr_base = get_gicr_base(gd);
546 
547 		if (gicr_base) {
548 			gd->per_cpu_group_status = io_read32(gicr_base +
549 							     GICR_IGROUPR0);
550 			gd->per_cpu_group_modifier = io_read32(gicr_base +
551 							       GICR_IGRPMODR0);
552 		} else {
553 			IMSG("GIC redistributor base address not provided");
554 			IMSG("Assuming default GIC group status and modifier");
555 			gd->per_cpu_group_status = 0xffff00ff;
556 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
557 		}
558 	} else {
559 		/* Legacy operation with secure affinity routing disabled */
560 		gd->per_cpu_group_status = io_read32(gd->gicd_base +
561 						     GICD_IGROUPR(0));
562 		gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
563 	}
564 #else /*!CFG_WITH_ARM_TRUSTED_FW*/
565 	/*
566 	 * Without TF-A, GIC is always configured in for legacy operation
567 	 * with secure affinity routing disabled.
568 	 */
569 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
570 		/* Disable interrupts */
571 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
572 
573 		/* Make interrupts non-pending */
574 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
575 
576 		/* Mark interrupts non-secure */
577 		if (n == 0) {
578 			/* per-CPU inerrupts config:
579 			 * ID0-ID7(SGI)	  for Non-secure interrupts
580 			 * ID8-ID15(SGI)  for Secure interrupts.
581 			 * All PPI config as Non-secure interrupts.
582 			 */
583 			gd->per_cpu_group_status = 0xffff00ff;
584 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
585 			io_write32(gd->gicd_base + GICD_IGROUPR(n),
586 				   gd->per_cpu_group_status);
587 		} else {
588 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
589 		}
590 	}
591 
592 	/* Set the priority mask to permit Non-secure interrupts, and to
593 	 * allow the Non-secure world to adjust the priority mask itself
594 	 */
595 #if defined(CFG_ARM_GICV3)
596 	write_icc_pmr(0x80);
597 	write_icc_igrpen1(1);
598 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
599 #else
600 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
601 
602 	/* Enable GIC */
603 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
604 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
605 	io_setbits32(gd->gicd_base + GICD_CTLR,
606 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1NS);
607 #endif
608 #endif /*!CFG_WITH_ARM_TRUSTED_FW*/
609 
610 	interrupt_main_init(&gic_data.chip);
611 }
612 
613 static void gic_it_add(struct gic_data *gd, size_t it)
614 {
615 	size_t idx = it / NUM_INTS_PER_REG;
616 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
617 
618 	assert(gd == &gic_data);
619 
620 	/* Disable the interrupt */
621 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
622 	/* Make it non-pending */
623 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
624 	/* Assign it to group0 */
625 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
626 #if defined(CFG_ARM_GICV3)
627 	/* Assign it to group1S */
628 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
629 #endif
630 }
631 
632 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
633 				uint8_t cpu_mask)
634 {
635 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
636 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
637 	uint32_t target, target_shift;
638 	vaddr_t itargetsr = gd->gicd_base +
639 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
640 
641 	assert(gd == &gic_data);
642 
643 	/* Assigned to group0 */
644 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
645 
646 	/* Route it to selected CPUs */
647 	target = io_read32(itargetsr);
648 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
649 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
650 	target |= cpu_mask << target_shift;
651 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
652 	io_write32(itargetsr, target);
653 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
654 }
655 
656 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
657 {
658 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
659 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
660 
661 	assert(gd == &gic_data);
662 
663 	/* Assigned to group0 */
664 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
665 
666 	/* Set prio it to selected CPUs */
667 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
668 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
669 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
670 }
671 
672 static void gic_it_enable(struct gic_data *gd, size_t it)
673 {
674 	size_t idx = it / NUM_INTS_PER_REG;
675 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
676 	vaddr_t base = gd->gicd_base;
677 
678 	assert(gd == &gic_data);
679 
680 	/* Assigned to group0 */
681 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
682 
683 	/* Enable the interrupt */
684 	io_write32(base + GICD_ISENABLER(idx), mask);
685 }
686 
687 static void gic_it_disable(struct gic_data *gd, size_t it)
688 {
689 	size_t idx = it / NUM_INTS_PER_REG;
690 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
691 
692 	assert(gd == &gic_data);
693 
694 	/* Assigned to group0 */
695 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
696 
697 	/* Disable the interrupt */
698 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
699 }
700 
701 static void gic_it_set_pending(struct gic_data *gd, size_t it)
702 {
703 	size_t idx = it / NUM_INTS_PER_REG;
704 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
705 
706 	assert(gd == &gic_data);
707 
708 	/* Should be Peripheral Interrupt */
709 	assert(it >= NUM_SGI);
710 
711 	/* Raise the interrupt */
712 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
713 }
714 
715 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
716 {
717 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
718 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
719 	bool __maybe_unused to_list = cpu_mask & 0xff;
720 
721 	/* One and only one of the bit fields shall be non-zero */
722 	assert(to_others + to_current + to_list == 1);
723 }
724 
725 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
726 			     uint32_t cpu_mask, bool ns)
727 {
728 #if defined(CFG_ARM_GICV3)
729 	uint32_t mask_id = it & 0xf;
730 	uint64_t mask = SHIFT_U64(mask_id, 24);
731 
732 	assert_cpu_mask_is_valid(cpu_mask);
733 
734 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
735 		mask |= BIT64(GICC_SGI_IRM_BIT);
736 	} else {
737 		uint64_t mpidr = read_mpidr();
738 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
739 				     MPIDR_AFF1_SHIFT;
740 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
741 				     MPIDR_AFF2_SHIFT;
742 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
743 				     MPIDR_AFF3_SHIFT;
744 
745 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
746 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
747 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
748 
749 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
750 			mask |= BIT32(mpidr & 0xf);
751 		} else {
752 			/*
753 			 * Only support sending SGI to the cores in the
754 			 * same cluster now.
755 			 */
756 			mask |= cpu_mask & 0xff;
757 		}
758 	}
759 
760 	/* Raise the interrupt */
761 	if (ns)
762 		write_icc_asgi1r(mask);
763 	else
764 		write_icc_sgi1r(mask);
765 #else
766 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
767 	uint32_t mask_group = ns;
768 	uint32_t mask = mask_id;
769 
770 	assert_cpu_mask_is_valid(cpu_mask);
771 
772 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
773 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
774 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
775 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
776 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
777 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
778 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
779 	} else {
780 		mask |= SHIFT_U32(cpu_mask & 0xff,
781 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
782 	}
783 
784 	/* Raise the interrupt */
785 	io_write32(gd->gicd_base + GICD_SGIR, mask);
786 #endif
787 }
788 
789 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
790 {
791 	assert(gd == &gic_data);
792 
793 #if defined(CFG_ARM_GICV3)
794 	return read_icc_iar1();
795 #else
796 	return io_read32(gd->gicc_base + GICC_IAR);
797 #endif
798 }
799 
800 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
801 {
802 	assert(gd == &gic_data);
803 
804 #if defined(CFG_ARM_GICV3)
805 	write_icc_eoir1(eoir);
806 #else
807 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
808 #endif
809 }
810 
811 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
812 {
813 	size_t idx = it / NUM_INTS_PER_REG;
814 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
815 
816 	assert(gd == &gic_data);
817 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
818 }
819 
820 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
821 {
822 	size_t idx = it / NUM_INTS_PER_REG;
823 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
824 
825 	assert(gd == &gic_data);
826 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
827 }
828 
829 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
830 {
831 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
832 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
833 				ITARGETSR_FIELD_BITS;
834 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
835 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
836 
837 	assert(gd == &gic_data);
838 	return (target & target_mask) >> target_shift;
839 }
840 
841 void gic_dump_state(void)
842 {
843 	struct gic_data *gd = &gic_data;
844 	int i = 0;
845 
846 #if defined(CFG_ARM_GICV3)
847 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
848 #else
849 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
850 #endif
851 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
852 
853 	for (i = 0; i <= (int)gd->max_it; i++) {
854 		if (gic_it_is_enabled(gd, i)) {
855 			DMSG("irq%d: enabled, group:%d, target:%x", i,
856 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
857 		}
858 	}
859 }
860 
861 static void __maybe_unused gic_native_itr_handler(void)
862 {
863 	struct gic_data *gd = &gic_data;
864 	uint32_t iar = 0;
865 	uint32_t id = 0;
866 
867 	iar = gic_read_iar(gd);
868 	id = iar & GICC_IAR_IT_ID_MASK;
869 
870 	if (id <= gd->max_it)
871 		interrupt_call_handlers(&gd->chip, id);
872 	else
873 		DMSG("ignoring interrupt %" PRIu32, id);
874 
875 	gic_write_eoir(gd, iar);
876 }
877 
878 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
879 /* Override interrupt_main_handler() with driver implementation */
880 void interrupt_main_handler(void)
881 {
882 	gic_native_itr_handler();
883 }
884 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
885 
886 static void gic_op_add(struct itr_chip *chip, size_t it,
887 		       uint32_t type __unused,
888 		       uint32_t prio __unused)
889 {
890 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
891 
892 	assert(gd == &gic_data);
893 
894 	if (it > gd->max_it)
895 		panic();
896 
897 	if (it < GIC_SPI_BASE) {
898 		if (gic_primary_done)
899 			panic("Cannot add SGI or PPI after boot");
900 
901 		/* Assign it to Secure Group 1, G1S */
902 		gd->per_cpu_group_modifier |= BIT32(it);
903 		gd->per_cpu_group_status &= ~BIT32(it);
904 	}
905 
906 	if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) {
907 		vaddr_t gicr_base = get_gicr_base(gd);
908 
909 		if (!gicr_base)
910 			panic("GICR_BASE missing");
911 
912 		/* Disable interrupt */
913 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(it));
914 
915 		/* Wait for the write to GICR_ICENABLER0 to propagate */
916 		gicr_wait_for_pending_write(gicr_base);
917 
918 		/* Make interrupt non-pending */
919 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(it));
920 
921 		/* Make it to Secure */
922 		io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status);
923 		io_write32(gicr_base + GICR_IGRPMODR0,
924 			   gd->per_cpu_group_modifier);
925 	} else {
926 		gic_it_add(gd, it);
927 		/* Set the CPU mask to deliver interrupts to any online core */
928 		gic_it_set_cpu_mask(gd, it, 0xff);
929 		gic_it_set_prio(gd, it, 0x1);
930 	}
931 }
932 
933 static void gic_op_enable(struct itr_chip *chip, size_t it)
934 {
935 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
936 
937 	assert(gd == &gic_data);
938 
939 	if (it > gd->max_it)
940 		panic();
941 
942 	if (it < GIC_SPI_BASE)
943 		gd->per_cpu_enable |= BIT(it);
944 
945 	if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) {
946 		vaddr_t gicr_base = get_gicr_base(gd);
947 
948 		if (!gicr_base)
949 			panic("GICR_BASE missing");
950 
951 		/* Assigned to G1S */
952 		assert(gd->per_cpu_group_modifier & BIT(it) &&
953 		       !(gd->per_cpu_group_status & BIT(it)));
954 		io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable);
955 	} else {
956 		gic_it_enable(gd, it);
957 	}
958 }
959 
960 static void gic_op_disable(struct itr_chip *chip, size_t it)
961 {
962 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
963 
964 	assert(gd == &gic_data);
965 
966 	if (it > gd->max_it)
967 		panic();
968 
969 	gic_it_disable(gd, it);
970 }
971 
972 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
973 {
974 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
975 
976 	assert(gd == &gic_data);
977 
978 	if (it > gd->max_it)
979 		panic();
980 
981 	gic_it_set_pending(gd, it);
982 }
983 
984 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
985 			     uint32_t cpu_mask)
986 {
987 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
988 	bool ns = false;
989 
990 	assert(gd == &gic_data);
991 
992 	/* Should be Software Generated Interrupt */
993 	assert(it < NUM_SGI);
994 
995 	ns = BIT32(it) & gd->per_cpu_group_status;
996 	gic_it_raise_sgi(gd, it, cpu_mask, ns);
997 }
998 
999 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
1000 			uint8_t cpu_mask)
1001 {
1002 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1003 
1004 	assert(gd == &gic_data);
1005 
1006 	if (it > gd->max_it)
1007 		panic();
1008 
1009 	gic_it_set_cpu_mask(gd, it, cpu_mask);
1010 }
1011 
1012 #ifdef CFG_DT
1013 /* Callback for "interrupts" and "interrupts-extended" DT node properties */
1014 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data,
1015 				     struct itr_desc *itr_desc)
1016 {
1017 	int itr_num = DT_INFO_INVALID_INTERRUPT;
1018 	struct itr_chip *chip = priv_data;
1019 	uint32_t phandle_args[2] = { };
1020 	uint32_t type = 0;
1021 	uint32_t prio = 0;
1022 
1023 	assert(arg && itr_desc);
1024 
1025 	/*
1026 	 * gic_dt_get_irq() expects phandle arguments passed are still in DT
1027 	 * format (big-endian) whereas struct dt_pargs carries converted
1028 	 * formats. Therefore swap again phandle arguments. gic_dt_get_irq()
1029 	 * consumes only the 2 first arguments.
1030 	 */
1031 	if (arg->args_count < 2)
1032 		return TEE_ERROR_GENERIC;
1033 	phandle_args[0] = cpu_to_fdt32(arg->args[0]);
1034 	phandle_args[1] = cpu_to_fdt32(arg->args[1]);
1035 
1036 	itr_num = gic_dt_get_irq((const void *)phandle_args, 2, &type, &prio);
1037 	if (itr_num == DT_INFO_INVALID_INTERRUPT)
1038 		return TEE_ERROR_GENERIC;
1039 
1040 	gic_op_add(chip, itr_num, type, prio);
1041 
1042 	itr_desc->chip = chip;
1043 	itr_desc->itr_num = itr_num;
1044 
1045 	return TEE_SUCCESS;
1046 }
1047 
1048 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused)
1049 {
1050 	if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb,
1051 					&gic_data.chip))
1052 		panic();
1053 
1054 	return TEE_SUCCESS;
1055 }
1056 
1057 static const struct dt_device_match gic_match_table[] = {
1058 	{ .compatible = "arm,cortex-a15-gic" },
1059 	{ .compatible = "arm,cortex-a7-gic" },
1060 	{ .compatible = "arm,cortex-a5-gic" },
1061 	{ .compatible = "arm,cortex-a9-gic" },
1062 	{ .compatible = "arm,gic-400" },
1063 	{ }
1064 };
1065 
1066 DEFINE_DT_DRIVER(gic_dt_driver) = {
1067 	.name = "gic",
1068 	.match_table = gic_match_table,
1069 	.probe = gic_probe,
1070 };
1071 #endif /*CFG_DT*/
1072 
1073 static TEE_Result gic_set_primary_done(void)
1074 {
1075 	gic_primary_done = true;
1076 	return TEE_SUCCESS;
1077 }
1078 
1079 nex_release_init_resource(gic_set_primary_done);
1080