xref: /optee_os/core/drivers/gic.c (revision 695be9d6057d6160ebf89cc8c2e1e94c9d976da7)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, 2023-2024 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <drivers/gic.h>
12 #include <dt-bindings/interrupt-controller/arm-gic.h>
13 #include <initcall.h>
14 #include <io.h>
15 #include <keep.h>
16 #include <kernel/dt.h>
17 #include <kernel/dt_driver.h>
18 #include <kernel/interrupt.h>
19 #include <kernel/misc.h>
20 #include <kernel/mutex.h>
21 #include <kernel/panic.h>
22 #include <libfdt.h>
23 #include <mm/core_memprot.h>
24 #include <mm/core_mmu.h>
25 #include <trace.h>
26 #include <util.h>
27 
28 /* Offsets from gic.gicc_base */
29 #define GICC_CTLR		(0x000)
30 #define GICC_PMR		(0x004)
31 #define GICC_IAR		(0x00C)
32 #define GICC_EOIR		(0x010)
33 
34 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
35 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
36 #define GICC_CTLR_FIQEN		(1 << 3)
37 
38 /* Offsets from gic.gicd_base */
39 #define GICD_CTLR		(0x000)
40 #define GICD_TYPER		(0x004)
41 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
42 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
43 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
44 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
45 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
46 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
47 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
48 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
49 #define GICD_SGIR		(0xF00)
50 
51 #ifdef CFG_ARM_GICV3
52 #define GICD_PIDR2		(0xFFE8)
53 #else
54 /* Called ICPIDR2 in GICv2 specification */
55 #define GICD_PIDR2		(0xFE8)
56 #endif
57 
58 #define GICD_CTLR_ENABLEGRP0	BIT32(0)
59 #define GICD_CTLR_ENABLEGRP1NS	BIT32(1)
60 #define GICD_CTLR_ENABLEGRP1S	BIT32(2)
61 #define GICD_CTLR_ARE_S		BIT32(4)
62 #define GICD_CTLR_ARE_NS	BIT32(5)
63 
64 /* Offsets from gic.gicr_base[core_pos] */
65 #define GICR_V3_PCPUBASE_SIZE	(2 * 64 * 1024)
66 #define GICR_SGI_BASE_OFFSET	(64 * 1024)
67 #define GICR_CTLR		(0x00)
68 #define GICR_TYPER		(0x08)
69 
70 #define GICR_IGROUPR0		(GICR_SGI_BASE_OFFSET + 0x080)
71 #define GICR_IGRPMODR0		(GICR_SGI_BASE_OFFSET + 0xD00)
72 #define GICR_ICENABLER0		(GICR_SGI_BASE_OFFSET + 0x180)
73 #define GICR_ICPENDR0		(GICR_SGI_BASE_OFFSET + 0x280)
74 #define GICR_ISENABLER0		(GICR_SGI_BASE_OFFSET + 0x100)
75 #define GICR_ICFGR0		(GICR_SGI_BASE_OFFSET + 0xC00)
76 #define GICR_ICFGR1		(GICR_SGI_BASE_OFFSET + 0xC04)
77 #define GICR_IPRIORITYR(n)	(GICR_SGI_BASE_OFFSET + 0x400 + (n) * 4)
78 
79 #define GICR_CTLR_RWP		BIT32(3)
80 
81 #define GICR_TYPER_LAST		BIT64(4)
82 #define GICR_TYPER_AFF3_SHIFT	56
83 #define GICR_TYPER_AFF2_SHIFT	48
84 #define GICR_TYPER_AFF1_SHIFT	40
85 #define GICR_TYPER_AFF0_SHIFT	32
86 
87 /* GICD IDR2 name differs on GICv3 and GICv2 but uses same bit map */
88 #define GICD_PIDR2_ARCHREV_SHIFT	4
89 #define GICD_PIDR2_ARCHREV_MASK		0xF
90 
91 /* Number of Private Peripheral Interrupt */
92 #define NUM_PPI	32
93 
94 /* Number of Software Generated Interrupt */
95 #define NUM_SGI			16
96 
97 /* Number of Non-secure Software Generated Interrupt */
98 #define NUM_NS_SGI		8
99 
100 /* Number of interrupts in one register */
101 #define NUM_INTS_PER_REG	32
102 
103 /* Number of targets in one register */
104 #define NUM_TARGETS_PER_REG	4
105 
106 /* Accessors to access ITARGETSRn */
107 #define ITARGETSR_FIELD_BITS	8
108 #define ITARGETSR_FIELD_MASK	0xff
109 
110 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
111 #define GICC_IAR_IT_ID_MASK	0x3ff
112 #define GICC_IAR_CPU_ID_MASK	0x7
113 #define GICC_IAR_CPU_ID_SHIFT	10
114 
115 #define GICC_SGI_IRM_BIT	40
116 #define GICC_SGI_AFF1_SHIFT	16
117 #define GICC_SGI_AFF2_SHIFT	32
118 #define GICC_SGI_AFF3_SHIFT	48
119 
120 #define GICD_SGIR_SIGINTID_MASK			0xf
121 #define GICD_SGIR_TO_OTHER_CPUS			0x1
122 #define GICD_SGIR_TO_THIS_CPU			0x2
123 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
124 #define GICD_SGIR_NSATT_SHIFT			15
125 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
126 
127 struct gic_data {
128 	vaddr_t gicc_base;
129 	vaddr_t gicd_base;
130 #if defined(CFG_ARM_GICV3)
131 	vaddr_t gicr_base[CFG_TEE_CORE_NB_CORE];
132 #endif
133 	size_t max_it;
134 	uint32_t per_cpu_group_status;
135 	uint32_t per_cpu_group_modifier;
136 	uint32_t per_cpu_enable;
137 	struct itr_chip chip;
138 };
139 
140 static bool gic_primary_done __nex_bss;
141 static struct gic_data gic_data __nex_bss;
142 static struct mutex gic_mutex = MUTEX_INITIALIZER;
143 
144 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
145 		       uint32_t prio);
146 static void gic_op_enable(struct itr_chip *chip, size_t it);
147 static void gic_op_disable(struct itr_chip *chip, size_t it);
148 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
149 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
150 			     uint32_t cpu_mask);
151 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
152 			uint8_t cpu_mask);
153 
154 static const struct itr_ops gic_ops = {
155 	.add = gic_op_add,
156 	.mask = gic_op_disable,
157 	.unmask = gic_op_enable,
158 	.enable = gic_op_enable,
159 	.disable = gic_op_disable,
160 	.raise_pi = gic_op_raise_pi,
161 	.raise_sgi = gic_op_raise_sgi,
162 	.set_affinity = gic_op_set_affinity,
163 };
164 DECLARE_KEEP_PAGER(gic_ops);
165 
166 static vaddr_t __maybe_unused get_gicr_base(struct gic_data *gd __maybe_unused)
167 {
168 #if defined(CFG_ARM_GICV3)
169 	return gd->gicr_base[get_core_pos()];
170 #else
171 	return 0;
172 #endif
173 }
174 
175 static bool affinity_routing_is_enabled(struct gic_data *gd)
176 {
177 	return IS_ENABLED(CFG_ARM_GICV3) &&
178 	       io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S;
179 }
180 
181 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
182 {
183 	int i;
184 	uint32_t old_ctlr;
185 	size_t ret = 0;
186 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
187 			  GICD_TYPER_IT_LINES_NUM_MASK;
188 
189 	/*
190 	 * Probe which interrupt number is the largest.
191 	 */
192 #if defined(CFG_ARM_GICV3)
193 	old_ctlr = read_icc_ctlr();
194 	write_icc_ctlr(0);
195 #else
196 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
197 	io_write32(gicc_base + GICC_CTLR, 0);
198 #endif
199 	for (i = max_regs; i >= 0; i--) {
200 		uint32_t old_reg;
201 		uint32_t reg;
202 		int b;
203 
204 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
205 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
206 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
207 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
208 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
209 			if (BIT32(b) & reg) {
210 				ret = i * NUM_INTS_PER_REG + b;
211 				goto out;
212 			}
213 		}
214 	}
215 out:
216 #if defined(CFG_ARM_GICV3)
217 	write_icc_ctlr(old_ctlr);
218 #else
219 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
220 #endif
221 	return ret;
222 }
223 
224 static void gicr_wait_for_pending_write(vaddr_t gicr_base)
225 {
226 	/*
227 	 * Wait for changes to
228 	 * - GICR_ICENABLER0
229 	 * - GICR_CTLR.DPG1S
230 	 * - GICR_CTLR.DPG1NS
231 	 * - GICR_CTLR.DPG0
232 	 * to be visible to all agents in the system.
233 	 */
234 	while (io_read32(gicr_base + GICR_CTLR) & GICR_CTLR_RWP)
235 		;
236 }
237 
238 static void gicv3_sync_redist_config(struct gic_data *gd)
239 {
240 	vaddr_t gicr_base = get_gicr_base(gd);
241 	bool need_sync = false;
242 	uint32_t gmod0 = 0;
243 	uint32_t grp0 = 0;
244 	size_t n = 0;
245 
246 	/*
247 	 * If gicr_base isn't available there's no need to synchronize SGI
248 	 * configuration since gic_init_donate_sgi_to_ns() would panic.
249 	 */
250 	if (!gicr_base)
251 		return;
252 
253 	grp0 = io_read32(gicr_base + GICR_IGROUPR0);
254 	gmod0 = io_read32(gicr_base + GICR_IGRPMODR0);
255 	for (n = GIC_SGI_SEC_BASE; n < GIC_SPI_BASE; n++) {
256 		/* Ignore matching bits */
257 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)) &&
258 		    !(BIT32(n) & (gmod0 ^ gd->per_cpu_group_modifier)))
259 			continue;
260 		/*
261 		 * SGI/PPI-n differs from primary CPU configuration,
262 		 * let's sync up.
263 		 */
264 		need_sync = true;
265 
266 		/* Disable interrupt */
267 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(n));
268 
269 		/* Wait for the write to GICR_ICENABLER0 to propagate */
270 		gicr_wait_for_pending_write(gicr_base);
271 
272 		/* Make interrupt non-pending */
273 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(n));
274 
275 		if (BIT32(n) & gd->per_cpu_group_status)
276 			grp0 |= BIT32(n);
277 		else
278 			grp0 &= ~BIT32(n);
279 		if (BIT32(n) & gd->per_cpu_group_modifier)
280 			gmod0 |= BIT32(n);
281 		else
282 			gmod0 &= ~BIT32(n);
283 	}
284 
285 	if (need_sync) {
286 		io_write32(gicr_base + GICR_IGROUPR0, grp0);
287 		io_write32(gicr_base + GICR_IGRPMODR0, gmod0);
288 		io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable);
289 	}
290 }
291 
292 static void gic_legacy_sync_dist_config(struct gic_data *gd)
293 {
294 	bool need_sync = false;
295 	uint32_t grp0 = 0;
296 	size_t n = 0;
297 
298 	grp0 = io_read32(gd->gicd_base + GICD_IGROUPR(0));
299 	for (n = GIC_SGI_SEC_BASE; n < GIC_SPI_BASE; n++) {
300 		/* Ignore matching bits */
301 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)))
302 			continue;
303 		/*
304 		 * SGI/PPI-n differs from primary CPU configuration,
305 		 * let's sync up.
306 		 */
307 		need_sync = true;
308 
309 		/* Disable interrupt */
310 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(n));
311 
312 		/* Make interrupt non-pending */
313 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(n));
314 
315 		if (BIT32(n) & gd->per_cpu_group_status)
316 			grp0 |= BIT32(n);
317 		else
318 			grp0 &= ~BIT32(n);
319 	}
320 
321 	if (need_sync) {
322 		io_write32(gd->gicd_base + GICD_IGROUPR(0), grp0);
323 		io_write32(gd->gicd_base + GICD_ISENABLER(0),
324 			   gd->per_cpu_enable);
325 	}
326 }
327 
328 static void init_gic_per_cpu(struct gic_data *gd)
329 {
330 	io_write32(gd->gicd_base + GICD_IGROUPR(0), gd->per_cpu_group_status);
331 
332 	/*
333 	 * Set the priority mask to permit Non-secure interrupts, and to
334 	 * allow the Non-secure world to adjust the priority mask itself
335 	 */
336 #if defined(CFG_ARM_GICV3)
337 	write_icc_pmr(0x80);
338 	write_icc_igrpen1(1);
339 #else
340 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
341 
342 	/* Enable GIC */
343 	io_write32(gd->gicc_base + GICC_CTLR,
344 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
345 		   GICC_CTLR_FIQEN);
346 #endif
347 }
348 
349 void gic_init_per_cpu(void)
350 {
351 	struct gic_data *gd = &gic_data;
352 
353 #if defined(CFG_ARM_GICV3)
354 	assert(gd->gicd_base);
355 #else
356 	assert(gd->gicd_base && gd->gicc_base);
357 #endif
358 
359 	if (IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
360 		/*
361 		 * GIC is already initialized by TF-A, we only need to
362 		 * handle eventual SGI or PPI configuration changes.
363 		 */
364 		if (affinity_routing_is_enabled(gd))
365 			gicv3_sync_redist_config(gd);
366 		else
367 			gic_legacy_sync_dist_config(gd);
368 	} else {
369 		/*
370 		 * Non-TF-A case where all CPU specific configuration
371 		 * of GIC must be done here.
372 		 */
373 		init_gic_per_cpu(gd);
374 	}
375 }
376 
377 void gic_init_donate_sgi_to_ns(size_t it)
378 {
379 	struct gic_data *gd = &gic_data;
380 
381 	assert(it >= GIC_SGI_SEC_BASE && it <= GIC_SGI_SEC_MAX);
382 
383 	/* Assert it's secure to start with. */
384 	assert(!(gd->per_cpu_group_status & BIT32(it)) &&
385 	       (gd->per_cpu_group_modifier & BIT32(it)));
386 
387 	gd->per_cpu_group_modifier &= ~BIT32(it);
388 	gd->per_cpu_group_status |= BIT32(it);
389 
390 	if (affinity_routing_is_enabled(gd)) {
391 		vaddr_t gicr_base = get_gicr_base(gd);
392 
393 		if (!gicr_base)
394 			panic("GICR_BASE missing");
395 
396 		/* Disable interrupt */
397 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(it));
398 
399 		/* Wait for the write to GICR_ICENABLER0 to propagate */
400 		gicr_wait_for_pending_write(gicr_base);
401 
402 		/* Make interrupt non-pending */
403 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(it));
404 
405 		/* Make it to non-secure */
406 		io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status);
407 		io_write32(gicr_base + GICR_IGRPMODR0,
408 			   gd->per_cpu_group_modifier);
409 	} else {
410 		/* Disable interrupt */
411 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(it));
412 
413 		/* Make interrupt non-pending */
414 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(it));
415 
416 		/* Make it to non-secure */
417 		io_write32(gd->gicd_base + GICD_IGROUPR(0),
418 			   gd->per_cpu_group_status);
419 	}
420 }
421 
422 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
423 			  uint32_t *prio)
424 {
425 	int it_num = DT_INFO_INVALID_INTERRUPT;
426 
427 	if (type)
428 		*type = IRQ_TYPE_NONE;
429 
430 	if (prio)
431 		*prio = 0;
432 
433 	if (!properties || count < 2)
434 		return DT_INFO_INVALID_INTERRUPT;
435 
436 	it_num = fdt32_to_cpu(properties[1]);
437 
438 	switch (fdt32_to_cpu(properties[0])) {
439 	case GIC_PPI:
440 		it_num += 16;
441 		break;
442 	case GIC_SPI:
443 		it_num += 32;
444 		break;
445 	default:
446 		it_num = DT_INFO_INVALID_INTERRUPT;
447 	}
448 
449 	return it_num;
450 }
451 
452 static void __maybe_unused probe_redist_base_addrs(vaddr_t *gicr_base_addrs,
453 						   paddr_t gicr_base_pa)
454 {
455 	size_t sz = GICR_V3_PCPUBASE_SIZE;
456 	paddr_t pa = gicr_base_pa;
457 	size_t core_pos = 0;
458 	uint64_t mt_bit = 0;
459 	uint64_t mpidr = 0;
460 	uint64_t tv = 0;
461 	vaddr_t va = 0;
462 
463 #ifdef ARM64
464 	mt_bit = read_mpidr_el1() & MPIDR_MT_MASK;
465 #endif
466 	do {
467 		va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz);
468 		if (!va)
469 			panic();
470 		tv = io_read64(va + GICR_TYPER);
471 
472 		/*
473 		 * Extract an mpidr from the Type register to calculate the
474 		 * core position of this redistributer instance.
475 		 */
476 		mpidr = mt_bit;
477 		mpidr |= SHIFT_U64((tv >> GICR_TYPER_AFF3_SHIFT) &
478 				   MPIDR_AFFLVL_MASK, MPIDR_AFF3_SHIFT);
479 		mpidr |= (tv >> GICR_TYPER_AFF0_SHIFT) &
480 			 (MPIDR_AFF0_MASK | MPIDR_AFF1_MASK | MPIDR_AFF2_MASK);
481 		core_pos = get_core_pos_mpidr(mpidr);
482 		if (core_pos < CFG_TEE_CORE_NB_CORE) {
483 			DMSG("GICR_BASE[%zu] at %#"PRIxVA, core_pos, va);
484 			gicr_base_addrs[core_pos] = va;
485 		} else {
486 			EMSG("Skipping too large core_pos %zu from GICR_TYPER",
487 			     core_pos);
488 		}
489 		pa += sz;
490 	} while (!(tv & GICR_TYPER_LAST));
491 }
492 
493 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
494 			       paddr_t gicr_base_pa __maybe_unused)
495 {
496 	struct gic_data *gd = &gic_data;
497 	vaddr_t gicc_base = 0;
498 	vaddr_t gicd_base = 0;
499 	uint32_t vers __maybe_unused = 0;
500 
501 	assert(cpu_mmu_enabled());
502 
503 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
504 				    GIC_DIST_REG_SIZE);
505 	if (!gicd_base)
506 		panic();
507 
508 	vers = io_read32(gicd_base + GICD_PIDR2);
509 	vers >>= GICD_PIDR2_ARCHREV_SHIFT;
510 	vers &= GICD_PIDR2_ARCHREV_MASK;
511 
512 	if (IS_ENABLED(CFG_ARM_GICV3)) {
513 		assert(vers == 4 || vers == 3);
514 	} else {
515 		assert(vers == 2 || vers == 1);
516 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
517 					    GIC_CPU_REG_SIZE);
518 		if (!gicc_base)
519 			panic();
520 	}
521 
522 	gd->gicc_base = gicc_base;
523 	gd->gicd_base = gicd_base;
524 	gd->max_it = probe_max_it(gicc_base, gicd_base);
525 #if defined(CFG_ARM_GICV3)
526 	if (affinity_routing_is_enabled(gd) && gicr_base_pa)
527 		probe_redist_base_addrs(gd->gicr_base, gicr_base_pa);
528 #endif
529 	gd->chip.ops = &gic_ops;
530 
531 	if (IS_ENABLED(CFG_DT))
532 		gd->chip.dt_get_irq = gic_dt_get_irq;
533 }
534 
535 void gic_init_v3(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
536 		 paddr_t gicr_base_pa)
537 {
538 	struct gic_data __maybe_unused *gd = &gic_data;
539 	size_t __maybe_unused n = 0;
540 
541 	gic_init_base_addr(gicc_base_pa, gicd_base_pa, gicr_base_pa);
542 
543 #if defined(CFG_WITH_ARM_TRUSTED_FW)
544 	/* GIC configuration is initialized from TF-A when embedded */
545 	if (affinity_routing_is_enabled(gd)) {
546 		/* Secure affinity routing enabled */
547 		vaddr_t gicr_base = get_gicr_base(gd);
548 
549 		if (gicr_base) {
550 			gd->per_cpu_group_status = io_read32(gicr_base +
551 							     GICR_IGROUPR0);
552 			gd->per_cpu_group_modifier = io_read32(gicr_base +
553 							       GICR_IGRPMODR0);
554 		} else {
555 			IMSG("GIC redistributor base address not provided");
556 			IMSG("Assuming default GIC group status and modifier");
557 			gd->per_cpu_group_status = 0xffff00ff;
558 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
559 		}
560 	} else {
561 		/* Legacy operation with secure affinity routing disabled */
562 		gd->per_cpu_group_status = io_read32(gd->gicd_base +
563 						     GICD_IGROUPR(0));
564 		gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
565 	}
566 #else /*!CFG_WITH_ARM_TRUSTED_FW*/
567 	/*
568 	 * Without TF-A, GIC is always configured in for legacy operation
569 	 * with secure affinity routing disabled.
570 	 */
571 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
572 		/* Disable interrupts */
573 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
574 
575 		/* Make interrupts non-pending */
576 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
577 
578 		/* Mark interrupts non-secure */
579 		if (n == 0) {
580 			/* per-CPU inerrupts config:
581 			 * ID0-ID7(SGI)	  for Non-secure interrupts
582 			 * ID8-ID15(SGI)  for Secure interrupts.
583 			 * All PPI config as Non-secure interrupts.
584 			 */
585 			gd->per_cpu_group_status = 0xffff00ff;
586 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
587 			io_write32(gd->gicd_base + GICD_IGROUPR(n),
588 				   gd->per_cpu_group_status);
589 		} else {
590 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
591 		}
592 	}
593 
594 	/* Set the priority mask to permit Non-secure interrupts, and to
595 	 * allow the Non-secure world to adjust the priority mask itself
596 	 */
597 #if defined(CFG_ARM_GICV3)
598 	write_icc_pmr(0x80);
599 	write_icc_igrpen1(1);
600 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
601 #else
602 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
603 
604 	/* Enable GIC */
605 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
606 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
607 	io_setbits32(gd->gicd_base + GICD_CTLR,
608 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1NS);
609 #endif
610 #endif /*!CFG_WITH_ARM_TRUSTED_FW*/
611 
612 	interrupt_main_init(&gic_data.chip);
613 }
614 
615 static void gic_it_add(struct gic_data *gd, size_t it)
616 {
617 	size_t idx = it / NUM_INTS_PER_REG;
618 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
619 
620 	assert(gd == &gic_data);
621 
622 	/* Disable the interrupt */
623 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
624 	/* Make it non-pending */
625 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
626 	/* Assign it to group0 */
627 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
628 #if defined(CFG_ARM_GICV3)
629 	/* Assign it to group1S */
630 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
631 #endif
632 }
633 
634 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
635 				uint8_t cpu_mask)
636 {
637 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
638 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
639 	uint32_t target, target_shift;
640 	vaddr_t itargetsr = gd->gicd_base +
641 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
642 
643 	assert(gd == &gic_data);
644 
645 	/* Assigned to group0 */
646 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
647 
648 	/* Route it to selected CPUs */
649 	target = io_read32(itargetsr);
650 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
651 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
652 	target |= cpu_mask << target_shift;
653 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
654 	io_write32(itargetsr, target);
655 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
656 }
657 
658 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
659 {
660 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
661 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
662 
663 	assert(gd == &gic_data);
664 
665 	/* Assigned to group0 */
666 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
667 
668 	/* Set prio it to selected CPUs */
669 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
670 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
671 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
672 }
673 
674 static void gic_it_enable(struct gic_data *gd, size_t it)
675 {
676 	size_t idx = it / NUM_INTS_PER_REG;
677 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
678 	vaddr_t base = gd->gicd_base;
679 
680 	assert(gd == &gic_data);
681 
682 	/* Assigned to group0 */
683 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
684 
685 	/* Enable the interrupt */
686 	io_write32(base + GICD_ISENABLER(idx), mask);
687 }
688 
689 static void gic_it_disable(struct gic_data *gd, size_t it)
690 {
691 	size_t idx = it / NUM_INTS_PER_REG;
692 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
693 
694 	assert(gd == &gic_data);
695 
696 	/* Assigned to group0 */
697 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
698 
699 	/* Disable the interrupt */
700 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
701 }
702 
703 static void gic_it_set_pending(struct gic_data *gd, size_t it)
704 {
705 	size_t idx = it / NUM_INTS_PER_REG;
706 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
707 
708 	assert(gd == &gic_data);
709 
710 	/* Should be Peripheral Interrupt */
711 	assert(it >= NUM_SGI);
712 
713 	/* Raise the interrupt */
714 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
715 }
716 
717 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
718 {
719 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
720 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
721 	bool __maybe_unused to_list = cpu_mask & 0xff;
722 
723 	/* One and only one of the bit fields shall be non-zero */
724 	assert(to_others + to_current + to_list == 1);
725 }
726 
727 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
728 			     uint32_t cpu_mask, bool ns)
729 {
730 #if defined(CFG_ARM_GICV3)
731 	uint32_t mask_id = it & 0xf;
732 	uint64_t mask = SHIFT_U64(mask_id, 24);
733 
734 	assert_cpu_mask_is_valid(cpu_mask);
735 
736 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
737 		mask |= BIT64(GICC_SGI_IRM_BIT);
738 	} else {
739 		uint64_t mpidr = read_mpidr();
740 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
741 				     MPIDR_AFF1_SHIFT;
742 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
743 				     MPIDR_AFF2_SHIFT;
744 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
745 				     MPIDR_AFF3_SHIFT;
746 
747 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
748 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
749 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
750 
751 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
752 			mask |= BIT32(mpidr & 0xf);
753 		} else {
754 			/*
755 			 * Only support sending SGI to the cores in the
756 			 * same cluster now.
757 			 */
758 			mask |= cpu_mask & 0xff;
759 		}
760 	}
761 
762 	/* Raise the interrupt */
763 	if (ns)
764 		write_icc_asgi1r(mask);
765 	else
766 		write_icc_sgi1r(mask);
767 #else
768 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
769 	uint32_t mask_group = ns;
770 	uint32_t mask = mask_id;
771 
772 	assert_cpu_mask_is_valid(cpu_mask);
773 
774 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
775 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
776 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
777 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
778 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
779 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
780 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
781 	} else {
782 		mask |= SHIFT_U32(cpu_mask & 0xff,
783 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
784 	}
785 
786 	/* Raise the interrupt */
787 	io_write32(gd->gicd_base + GICD_SGIR, mask);
788 #endif
789 }
790 
791 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
792 {
793 	assert(gd == &gic_data);
794 
795 #if defined(CFG_ARM_GICV3)
796 	return read_icc_iar1();
797 #else
798 	return io_read32(gd->gicc_base + GICC_IAR);
799 #endif
800 }
801 
802 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
803 {
804 	assert(gd == &gic_data);
805 
806 #if defined(CFG_ARM_GICV3)
807 	write_icc_eoir1(eoir);
808 #else
809 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
810 #endif
811 }
812 
813 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
814 {
815 	size_t idx = it / NUM_INTS_PER_REG;
816 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
817 
818 	assert(gd == &gic_data);
819 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
820 }
821 
822 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
823 {
824 	size_t idx = it / NUM_INTS_PER_REG;
825 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
826 
827 	assert(gd == &gic_data);
828 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
829 }
830 
831 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
832 {
833 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
834 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
835 				ITARGETSR_FIELD_BITS;
836 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
837 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
838 
839 	assert(gd == &gic_data);
840 	return (target & target_mask) >> target_shift;
841 }
842 
843 void gic_dump_state(void)
844 {
845 	struct gic_data *gd = &gic_data;
846 	int i = 0;
847 
848 #if defined(CFG_ARM_GICV3)
849 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
850 #else
851 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
852 #endif
853 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
854 
855 	for (i = 0; i <= (int)gd->max_it; i++) {
856 		if (gic_it_is_enabled(gd, i)) {
857 			DMSG("irq%d: enabled, group:%d, target:%x", i,
858 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
859 		}
860 	}
861 }
862 
863 TEE_Result gic_spi_release_to_ns(size_t it)
864 {
865 	struct gic_data *gd = &gic_data;
866 	size_t idx = it / NUM_INTS_PER_REG;
867 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
868 
869 	if (it >= gd->max_it || it < GIC_SPI_BASE)
870 		return TEE_ERROR_BAD_PARAMETERS;
871 	/* Make sure it's already disabled */
872 	if (!gic_it_is_enabled(gd, it))
873 		return TEE_ERROR_BAD_STATE;
874 	/* Assert it's secure to start with */
875 	if (!gic_it_get_group(gd, it))
876 		return TEE_ERROR_BAD_STATE;
877 
878 	mutex_lock(&gic_mutex);
879 	gic_it_set_cpu_mask(gd, it, 0);
880 	gic_it_set_prio(gd, it, GIC_SPI_PRI_NS_EL1);
881 
882 	/* Clear pending status */
883 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
884 	/* Assign it to NS Group1 */
885 	io_setbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
886 #if defined(CFG_ARM_GICV3)
887 	io_clrbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
888 #endif
889 	mutex_unlock(&gic_mutex);
890 	return TEE_SUCCESS;
891 }
892 
893 static void __maybe_unused gic_native_itr_handler(void)
894 {
895 	struct gic_data *gd = &gic_data;
896 	uint32_t iar = 0;
897 	uint32_t id = 0;
898 
899 	iar = gic_read_iar(gd);
900 	id = iar & GICC_IAR_IT_ID_MASK;
901 
902 	if (id <= gd->max_it)
903 		interrupt_call_handlers(&gd->chip, id);
904 	else
905 		DMSG("ignoring interrupt %" PRIu32, id);
906 
907 	gic_write_eoir(gd, iar);
908 }
909 
910 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
911 /* Override interrupt_main_handler() with driver implementation */
912 void interrupt_main_handler(void)
913 {
914 	gic_native_itr_handler();
915 }
916 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
917 
918 static void gic_op_add(struct itr_chip *chip, size_t it,
919 		       uint32_t type __unused,
920 		       uint32_t prio __unused)
921 {
922 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
923 
924 	assert(gd == &gic_data);
925 
926 	if (it > gd->max_it)
927 		panic();
928 
929 	if (it < GIC_SPI_BASE) {
930 		if (gic_primary_done)
931 			panic("Cannot add SGI or PPI after boot");
932 
933 		/* Assign it to Secure Group 1, G1S */
934 		gd->per_cpu_group_modifier |= BIT32(it);
935 		gd->per_cpu_group_status &= ~BIT32(it);
936 	}
937 
938 	if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) {
939 		vaddr_t gicr_base = get_gicr_base(gd);
940 
941 		if (!gicr_base)
942 			panic("GICR_BASE missing");
943 
944 		/* Disable interrupt */
945 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(it));
946 
947 		/* Wait for the write to GICR_ICENABLER0 to propagate */
948 		gicr_wait_for_pending_write(gicr_base);
949 
950 		/* Make interrupt non-pending */
951 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(it));
952 
953 		/* Make it to Secure */
954 		io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status);
955 		io_write32(gicr_base + GICR_IGRPMODR0,
956 			   gd->per_cpu_group_modifier);
957 	} else {
958 		gic_it_add(gd, it);
959 		/* Set the CPU mask to deliver interrupts to any online core */
960 		gic_it_set_cpu_mask(gd, it, 0xff);
961 		gic_it_set_prio(gd, it, 0x1);
962 	}
963 }
964 
965 static void gic_op_enable(struct itr_chip *chip, size_t it)
966 {
967 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
968 
969 	assert(gd == &gic_data);
970 
971 	if (it > gd->max_it)
972 		panic();
973 
974 	if (it < GIC_SPI_BASE)
975 		gd->per_cpu_enable |= BIT(it);
976 
977 	if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) {
978 		vaddr_t gicr_base = get_gicr_base(gd);
979 
980 		if (!gicr_base)
981 			panic("GICR_BASE missing");
982 
983 		/* Assigned to G1S */
984 		assert(gd->per_cpu_group_modifier & BIT(it) &&
985 		       !(gd->per_cpu_group_status & BIT(it)));
986 		io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable);
987 	} else {
988 		gic_it_enable(gd, it);
989 	}
990 }
991 
992 static void gic_op_disable(struct itr_chip *chip, size_t it)
993 {
994 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
995 
996 	assert(gd == &gic_data);
997 
998 	if (it > gd->max_it)
999 		panic();
1000 
1001 	gic_it_disable(gd, it);
1002 }
1003 
1004 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
1005 {
1006 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1007 
1008 	assert(gd == &gic_data);
1009 
1010 	if (it > gd->max_it)
1011 		panic();
1012 
1013 	gic_it_set_pending(gd, it);
1014 }
1015 
1016 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
1017 			     uint32_t cpu_mask)
1018 {
1019 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1020 	bool ns = false;
1021 
1022 	assert(gd == &gic_data);
1023 
1024 	/* Should be Software Generated Interrupt */
1025 	assert(it < NUM_SGI);
1026 
1027 	ns = BIT32(it) & gd->per_cpu_group_status;
1028 	gic_it_raise_sgi(gd, it, cpu_mask, ns);
1029 }
1030 
1031 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
1032 			uint8_t cpu_mask)
1033 {
1034 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1035 
1036 	assert(gd == &gic_data);
1037 
1038 	if (it > gd->max_it)
1039 		panic();
1040 
1041 	gic_it_set_cpu_mask(gd, it, cpu_mask);
1042 }
1043 
1044 #ifdef CFG_DT
1045 /* Callback for "interrupts" and "interrupts-extended" DT node properties */
1046 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data,
1047 				     struct itr_desc *itr_desc)
1048 {
1049 	int itr_num = DT_INFO_INVALID_INTERRUPT;
1050 	struct itr_chip *chip = priv_data;
1051 	uint32_t phandle_args[2] = { };
1052 	uint32_t type = 0;
1053 	uint32_t prio = 0;
1054 
1055 	assert(arg && itr_desc);
1056 
1057 	/*
1058 	 * gic_dt_get_irq() expects phandle arguments passed are still in DT
1059 	 * format (big-endian) whereas struct dt_pargs carries converted
1060 	 * formats. Therefore swap again phandle arguments. gic_dt_get_irq()
1061 	 * consumes only the 2 first arguments.
1062 	 */
1063 	if (arg->args_count < 2)
1064 		return TEE_ERROR_GENERIC;
1065 	phandle_args[0] = cpu_to_fdt32(arg->args[0]);
1066 	phandle_args[1] = cpu_to_fdt32(arg->args[1]);
1067 
1068 	itr_num = gic_dt_get_irq((const void *)phandle_args, 2, &type, &prio);
1069 	if (itr_num == DT_INFO_INVALID_INTERRUPT)
1070 		return TEE_ERROR_GENERIC;
1071 
1072 	gic_op_add(chip, itr_num, type, prio);
1073 
1074 	itr_desc->chip = chip;
1075 	itr_desc->itr_num = itr_num;
1076 
1077 	return TEE_SUCCESS;
1078 }
1079 
1080 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused)
1081 {
1082 	if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb,
1083 					&gic_data.chip))
1084 		panic();
1085 
1086 	return TEE_SUCCESS;
1087 }
1088 
1089 static const struct dt_device_match gic_match_table[] = {
1090 	{ .compatible = "arm,cortex-a15-gic" },
1091 	{ .compatible = "arm,cortex-a7-gic" },
1092 	{ .compatible = "arm,cortex-a5-gic" },
1093 	{ .compatible = "arm,cortex-a9-gic" },
1094 	{ .compatible = "arm,gic-400" },
1095 	{ }
1096 };
1097 
1098 DEFINE_DT_DRIVER(gic_dt_driver) = {
1099 	.name = "gic",
1100 	.match_table = gic_match_table,
1101 	.probe = gic_probe,
1102 };
1103 #endif /*CFG_DT*/
1104 
1105 static TEE_Result gic_set_primary_done(void)
1106 {
1107 	gic_primary_done = true;
1108 	return TEE_SUCCESS;
1109 }
1110 
1111 nex_release_init_resource(gic_set_primary_done);
1112