xref: /optee_os/core/drivers/gic.c (revision 45fecab081173ef58b1cb14b6ddf6892b0b9d3f6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, 2023-2024 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <drivers/gic.h>
12 #include <dt-bindings/interrupt-controller/arm-gic.h>
13 #include <initcall.h>
14 #include <io.h>
15 #include <keep.h>
16 #include <kernel/dt.h>
17 #include <kernel/dt_driver.h>
18 #include <kernel/interrupt.h>
19 #include <kernel/misc.h>
20 #include <kernel/mutex.h>
21 #include <kernel/panic.h>
22 #include <libfdt.h>
23 #include <mm/core_memprot.h>
24 #include <mm/core_mmu.h>
25 #include <trace.h>
26 #include <util.h>
27 
28 /* Offsets from gic.gicc_base */
29 #define GICC_CTLR		(0x000)
30 #define GICC_PMR		(0x004)
31 #define GICC_IAR		(0x00C)
32 #define GICC_EOIR		(0x010)
33 
34 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
35 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
36 #define GICC_CTLR_FIQEN		(1 << 3)
37 
38 /* Offsets from gic.gicd_base */
39 #define GICD_CTLR		(0x000)
40 #define GICD_TYPER		(0x004)
41 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
42 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
43 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
44 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
45 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
46 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
47 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
48 #define GICD_ICFGR(n)		(0xc00 + (n) * 4)
49 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
50 #define GICD_SGIR		(0xF00)
51 
52 #ifdef CFG_ARM_GICV3
53 #define GICD_PIDR2		(0xFFE8)
54 #else
55 /* Called ICPIDR2 in GICv2 specification */
56 #define GICD_PIDR2		(0xFE8)
57 #endif
58 
59 #define GICD_CTLR_ENABLEGRP0	BIT32(0)
60 #define GICD_CTLR_ENABLEGRP1NS	BIT32(1)
61 #define GICD_CTLR_ENABLEGRP1S	BIT32(2)
62 #define GICD_CTLR_ARE_S		BIT32(4)
63 #define GICD_CTLR_ARE_NS	BIT32(5)
64 
65 /* Offsets from gic.gicr_base[core_pos] */
66 #define GICR_V3_PCPUBASE_SIZE	(2 * 64 * 1024)
67 #define GICR_SGI_BASE_OFFSET	(64 * 1024)
68 #define GICR_CTLR		(0x00)
69 #define GICR_TYPER		(0x08)
70 
71 #define GICR_IGROUPR0		(GICR_SGI_BASE_OFFSET + 0x080)
72 #define GICR_IGRPMODR0		(GICR_SGI_BASE_OFFSET + 0xD00)
73 #define GICR_ICENABLER0		(GICR_SGI_BASE_OFFSET + 0x180)
74 #define GICR_ICPENDR0		(GICR_SGI_BASE_OFFSET + 0x280)
75 #define GICR_ISENABLER0		(GICR_SGI_BASE_OFFSET + 0x100)
76 #define GICR_ICFGR0		(GICR_SGI_BASE_OFFSET + 0xC00)
77 #define GICR_ICFGR1		(GICR_SGI_BASE_OFFSET + 0xC04)
78 #define GICR_IPRIORITYR(n)	(GICR_SGI_BASE_OFFSET + 0x400 + (n) * 4)
79 
80 #define GICR_CTLR_RWP		BIT32(3)
81 
82 #define GICR_TYPER_LAST		BIT64(4)
83 #define GICR_TYPER_AFF3_SHIFT	56
84 #define GICR_TYPER_AFF2_SHIFT	48
85 #define GICR_TYPER_AFF1_SHIFT	40
86 #define GICR_TYPER_AFF0_SHIFT	32
87 
88 /* GICD IDR2 name differs on GICv3 and GICv2 but uses same bit map */
89 #define GICD_PIDR2_ARCHREV_SHIFT	4
90 #define GICD_PIDR2_ARCHREV_MASK		0xF
91 
92 /* Number of Private Peripheral Interrupt */
93 #define NUM_PPI	32
94 
95 /* Number of Software Generated Interrupt */
96 #define NUM_SGI			16
97 
98 /* Number of Non-secure Software Generated Interrupt */
99 #define NUM_NS_SGI		8
100 
101 /* Number of interrupts in one register */
102 #define NUM_INTS_PER_REG	32
103 
104 /* Number of targets in one register */
105 #define NUM_TARGETS_PER_REG	4
106 
107 /* Accessors to access ITARGETSRn */
108 #define ITARGETSR_FIELD_BITS	8
109 #define ITARGETSR_FIELD_MASK	0xff
110 
111 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
112 #define GICC_IAR_IT_ID_MASK	0x3ff
113 #define GICC_IAR_CPU_ID_MASK	0x7
114 #define GICC_IAR_CPU_ID_SHIFT	10
115 
116 #define GICC_SGI_IRM_BIT	40
117 #define GICC_SGI_AFF1_SHIFT	16
118 #define GICC_SGI_AFF2_SHIFT	32
119 #define GICC_SGI_AFF3_SHIFT	48
120 
121 #define GICD_SGIR_SIGINTID_MASK			0xf
122 #define GICD_SGIR_TO_OTHER_CPUS			0x1
123 #define GICD_SGIR_TO_THIS_CPU			0x2
124 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
125 #define GICD_SGIR_NSATT_SHIFT			15
126 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
127 
128 /* GICD ICFGR bit fields */
129 #define GICD_ICFGR_TYPE_EDGE		2
130 #define GICD_ICFGR_TYPE_LEVEL		0
131 #define GICD_ICFGR_FIELD_BITS		2
132 #define GICD_ICFGR_FIELD_MASK		0x3
133 #define GICD_ICFGR_NUM_INTS_PER_REG	(NUM_INTS_PER_REG / \
134 					 GICD_ICFGR_FIELD_BITS)
135 
136 struct gic_data {
137 	vaddr_t gicc_base;
138 	vaddr_t gicd_base;
139 #if defined(CFG_ARM_GICV3)
140 	vaddr_t gicr_base[CFG_TEE_CORE_NB_CORE];
141 #endif
142 	size_t max_it;
143 	uint32_t per_cpu_group_status;
144 	uint32_t per_cpu_group_modifier;
145 	uint32_t per_cpu_enable;
146 	struct itr_chip chip;
147 };
148 
149 static bool gic_primary_done __nex_bss;
150 static struct gic_data gic_data __nex_bss;
151 static struct mutex gic_mutex = MUTEX_INITIALIZER;
152 
153 static void gic_op_configure(struct itr_chip *chip, size_t it, uint32_t type,
154 			     uint32_t prio);
155 static void gic_op_enable(struct itr_chip *chip, size_t it);
156 static void gic_op_disable(struct itr_chip *chip, size_t it);
157 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
158 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
159 			     uint32_t cpu_mask);
160 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
161 			uint8_t cpu_mask);
162 
163 static const struct itr_ops gic_ops = {
164 	.configure = gic_op_configure,
165 	.mask = gic_op_disable,
166 	.unmask = gic_op_enable,
167 	.enable = gic_op_enable,
168 	.disable = gic_op_disable,
169 	.raise_pi = gic_op_raise_pi,
170 	.raise_sgi = gic_op_raise_sgi,
171 	.set_affinity = gic_op_set_affinity,
172 };
173 DECLARE_KEEP_PAGER(gic_ops);
174 
175 static vaddr_t __maybe_unused get_gicr_base(struct gic_data *gd __maybe_unused)
176 {
177 #if defined(CFG_ARM_GICV3)
178 	return gd->gicr_base[get_core_pos()];
179 #else
180 	return 0;
181 #endif
182 }
183 
184 static bool affinity_routing_is_enabled(struct gic_data *gd)
185 {
186 	return IS_ENABLED(CFG_ARM_GICV3) &&
187 	       io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S;
188 }
189 
190 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
191 {
192 	int i = 0;
193 	uint32_t old_ctlr = 0;
194 	size_t ret = 0;
195 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
196 			  GICD_TYPER_IT_LINES_NUM_MASK;
197 
198 	/*
199 	 * Probe which interrupt number is the largest.
200 	 */
201 #if defined(CFG_ARM_GICV3)
202 	old_ctlr = read_icc_ctlr();
203 	write_icc_ctlr(0);
204 #else
205 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
206 	io_write32(gicc_base + GICC_CTLR, 0);
207 #endif
208 	for (i = max_regs; i >= 0; i--) {
209 		uint32_t old_reg = 0;
210 		uint32_t reg = 0;
211 		int b = 0;
212 
213 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
214 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
215 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
216 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
217 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
218 			if (BIT32(b) & reg) {
219 				ret = i * NUM_INTS_PER_REG + b;
220 				goto out;
221 			}
222 		}
223 	}
224 out:
225 #if defined(CFG_ARM_GICV3)
226 	write_icc_ctlr(old_ctlr);
227 #else
228 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
229 #endif
230 	return ret;
231 }
232 
233 static void gicr_wait_for_pending_write(vaddr_t gicr_base)
234 {
235 	/*
236 	 * Wait for changes to
237 	 * - GICR_ICENABLER0
238 	 * - GICR_CTLR.DPG1S
239 	 * - GICR_CTLR.DPG1NS
240 	 * - GICR_CTLR.DPG0
241 	 * to be visible to all agents in the system.
242 	 */
243 	while (io_read32(gicr_base + GICR_CTLR) & GICR_CTLR_RWP)
244 		;
245 }
246 
247 static void gicv3_sync_redist_config(struct gic_data *gd)
248 {
249 	vaddr_t gicr_base = get_gicr_base(gd);
250 	bool need_sync = false;
251 	uint32_t gmod0 = 0;
252 	uint32_t grp0 = 0;
253 	size_t n = 0;
254 
255 	/*
256 	 * If gicr_base isn't available there's no need to synchronize SGI
257 	 * configuration since gic_init_donate_sgi_to_ns() would panic.
258 	 */
259 	if (!gicr_base)
260 		return;
261 
262 	grp0 = io_read32(gicr_base + GICR_IGROUPR0);
263 	gmod0 = io_read32(gicr_base + GICR_IGRPMODR0);
264 	for (n = GIC_SGI_SEC_BASE; n < GIC_SPI_BASE; n++) {
265 		/* Ignore matching bits */
266 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)) &&
267 		    !(BIT32(n) & (gmod0 ^ gd->per_cpu_group_modifier)))
268 			continue;
269 		/*
270 		 * SGI/PPI-n differs from primary CPU configuration,
271 		 * let's sync up.
272 		 */
273 		need_sync = true;
274 
275 		/* Disable interrupt */
276 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(n));
277 
278 		/* Wait for the write to GICR_ICENABLER0 to propagate */
279 		gicr_wait_for_pending_write(gicr_base);
280 
281 		/* Make interrupt non-pending */
282 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(n));
283 
284 		if (BIT32(n) & gd->per_cpu_group_status)
285 			grp0 |= BIT32(n);
286 		else
287 			grp0 &= ~BIT32(n);
288 		if (BIT32(n) & gd->per_cpu_group_modifier)
289 			gmod0 |= BIT32(n);
290 		else
291 			gmod0 &= ~BIT32(n);
292 	}
293 
294 	if (need_sync) {
295 		io_write32(gicr_base + GICR_IGROUPR0, grp0);
296 		io_write32(gicr_base + GICR_IGRPMODR0, gmod0);
297 		io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable);
298 	}
299 }
300 
301 static void gic_legacy_sync_dist_config(struct gic_data *gd)
302 {
303 	bool need_sync = false;
304 	uint32_t grp0 = 0;
305 	size_t n = 0;
306 
307 	grp0 = io_read32(gd->gicd_base + GICD_IGROUPR(0));
308 	for (n = GIC_SGI_SEC_BASE; n < GIC_SPI_BASE; n++) {
309 		/* Ignore matching bits */
310 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)))
311 			continue;
312 		/*
313 		 * SGI/PPI-n differs from primary CPU configuration,
314 		 * let's sync up.
315 		 */
316 		need_sync = true;
317 
318 		/* Disable interrupt */
319 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(n));
320 
321 		/* Make interrupt non-pending */
322 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(n));
323 
324 		if (BIT32(n) & gd->per_cpu_group_status)
325 			grp0 |= BIT32(n);
326 		else
327 			grp0 &= ~BIT32(n);
328 	}
329 
330 	if (need_sync) {
331 		io_write32(gd->gicd_base + GICD_IGROUPR(0), grp0);
332 		io_write32(gd->gicd_base + GICD_ISENABLER(0),
333 			   gd->per_cpu_enable);
334 	}
335 }
336 
337 static void init_gic_per_cpu(struct gic_data *gd)
338 {
339 	io_write32(gd->gicd_base + GICD_IGROUPR(0), gd->per_cpu_group_status);
340 
341 	/*
342 	 * Set the priority mask to permit Non-secure interrupts, and to
343 	 * allow the Non-secure world to adjust the priority mask itself
344 	 */
345 #if defined(CFG_ARM_GICV3)
346 	write_icc_pmr(0x80);
347 	write_icc_igrpen1(1);
348 #else
349 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
350 
351 	/* Enable GIC */
352 	io_write32(gd->gicc_base + GICC_CTLR,
353 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
354 		   GICC_CTLR_FIQEN);
355 #endif
356 }
357 
358 void gic_init_per_cpu(void)
359 {
360 	struct gic_data *gd = &gic_data;
361 
362 #if defined(CFG_ARM_GICV3)
363 	assert(gd->gicd_base);
364 #else
365 	assert(gd->gicd_base && gd->gicc_base);
366 #endif
367 
368 	if (IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
369 		/*
370 		 * GIC is already initialized by TF-A, we only need to
371 		 * handle eventual SGI or PPI configuration changes.
372 		 */
373 		if (affinity_routing_is_enabled(gd))
374 			gicv3_sync_redist_config(gd);
375 		else
376 			gic_legacy_sync_dist_config(gd);
377 	} else {
378 		/*
379 		 * Non-TF-A case where all CPU specific configuration
380 		 * of GIC must be done here.
381 		 */
382 		init_gic_per_cpu(gd);
383 	}
384 }
385 
386 void gic_init_donate_sgi_to_ns(size_t it)
387 {
388 	struct gic_data *gd = &gic_data;
389 
390 	assert(it >= GIC_SGI_SEC_BASE && it <= GIC_SGI_SEC_MAX);
391 
392 	/* Assert it's secure to start with. */
393 	assert(!(gd->per_cpu_group_status & BIT32(it)) &&
394 	       (gd->per_cpu_group_modifier & BIT32(it)));
395 
396 	gd->per_cpu_group_modifier &= ~BIT32(it);
397 	gd->per_cpu_group_status |= BIT32(it);
398 
399 	if (affinity_routing_is_enabled(gd)) {
400 		vaddr_t gicr_base = get_gicr_base(gd);
401 
402 		if (!gicr_base)
403 			panic("GICR_BASE missing");
404 
405 		/* Disable interrupt */
406 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(it));
407 
408 		/* Wait for the write to GICR_ICENABLER0 to propagate */
409 		gicr_wait_for_pending_write(gicr_base);
410 
411 		/* Make interrupt non-pending */
412 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(it));
413 
414 		/* Make it to non-secure */
415 		io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status);
416 		io_write32(gicr_base + GICR_IGRPMODR0,
417 			   gd->per_cpu_group_modifier);
418 	} else {
419 		/* Disable interrupt */
420 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(it));
421 
422 		/* Make interrupt non-pending */
423 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(it));
424 
425 		/* Make it to non-secure */
426 		io_write32(gd->gicd_base + GICD_IGROUPR(0),
427 			   gd->per_cpu_group_status);
428 	}
429 }
430 
431 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
432 			  uint32_t *prio)
433 {
434 	int it_num = DT_INFO_INVALID_INTERRUPT;
435 	uint32_t detection_type = IRQ_TYPE_NONE;
436 	uint32_t interrupt_type = GIC_PPI;
437 
438 	if (!properties || count < 2 || count > 3)
439 		return DT_INFO_INVALID_INTERRUPT;
440 
441 	interrupt_type = fdt32_to_cpu(properties[0]);
442 	it_num = (int)fdt32_to_cpu(properties[1]);
443 
444 	if (count == 3) {
445 		detection_type = fdt32_to_cpu(properties[2]) & GENMASK_32(3, 0);
446 		if (interrupt_type == GIC_PPI &&
447 		    detection_type != IRQ_TYPE_EDGE_RISING) {
448 			EMSG("PPI must be edge rising");
449 			return DT_INFO_INVALID_INTERRUPT;
450 		}
451 
452 		if (interrupt_type == GIC_SPI &&
453 		    (detection_type != IRQ_TYPE_EDGE_RISING &&
454 		     detection_type != IRQ_TYPE_LEVEL_HIGH)) {
455 			EMSG("SPI must be edge rising or high level");
456 			return DT_INFO_INVALID_INTERRUPT;
457 		}
458 	}
459 
460 	switch (interrupt_type) {
461 	case GIC_PPI:
462 		it_num += 16;
463 		detection_type = IRQ_TYPE_EDGE_RISING;
464 		break;
465 	case GIC_SPI:
466 		it_num += 32;
467 		break;
468 	default:
469 		return DT_INFO_INVALID_INTERRUPT;
470 	}
471 
472 	if (type)
473 		*type = detection_type;
474 
475 	if (prio)
476 		*prio = 0;
477 
478 	return it_num;
479 }
480 
481 static void __maybe_unused probe_redist_base_addrs(vaddr_t *gicr_base_addrs,
482 						   paddr_t gicr_base_pa)
483 {
484 	size_t sz = GICR_V3_PCPUBASE_SIZE;
485 	paddr_t pa = gicr_base_pa;
486 	size_t core_pos = 0;
487 	uint64_t mt_bit = 0;
488 	uint64_t mpidr = 0;
489 	uint64_t tv = 0;
490 	vaddr_t va = 0;
491 
492 #ifdef ARM64
493 	mt_bit = read_mpidr_el1() & MPIDR_MT_MASK;
494 #endif
495 	do {
496 		va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz);
497 		if (!va)
498 			panic();
499 		tv = io_read64(va + GICR_TYPER);
500 
501 		/*
502 		 * Extract an mpidr from the Type register to calculate the
503 		 * core position of this redistributer instance.
504 		 */
505 		mpidr = mt_bit;
506 		mpidr |= SHIFT_U64((tv >> GICR_TYPER_AFF3_SHIFT) &
507 				   MPIDR_AFFLVL_MASK, MPIDR_AFF3_SHIFT);
508 		mpidr |= (tv >> GICR_TYPER_AFF0_SHIFT) &
509 			 (MPIDR_AFF0_MASK | MPIDR_AFF1_MASK | MPIDR_AFF2_MASK);
510 		core_pos = get_core_pos_mpidr(mpidr);
511 		if (core_pos < CFG_TEE_CORE_NB_CORE) {
512 			DMSG("GICR_BASE[%zu] at %#"PRIxVA, core_pos, va);
513 			gicr_base_addrs[core_pos] = va;
514 		} else {
515 			EMSG("Skipping too large core_pos %zu from GICR_TYPER",
516 			     core_pos);
517 		}
518 		pa += sz;
519 	} while (!(tv & GICR_TYPER_LAST));
520 }
521 
522 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
523 			       paddr_t gicr_base_pa __maybe_unused)
524 {
525 	struct gic_data *gd = &gic_data;
526 	vaddr_t gicc_base = 0;
527 	vaddr_t gicd_base = 0;
528 	uint32_t vers __maybe_unused = 0;
529 
530 	assert(cpu_mmu_enabled());
531 
532 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
533 				    GIC_DIST_REG_SIZE);
534 	if (!gicd_base)
535 		panic();
536 
537 	vers = io_read32(gicd_base + GICD_PIDR2);
538 	vers >>= GICD_PIDR2_ARCHREV_SHIFT;
539 	vers &= GICD_PIDR2_ARCHREV_MASK;
540 
541 	if (IS_ENABLED(CFG_ARM_GICV3)) {
542 		assert(vers == 4 || vers == 3);
543 	} else {
544 		assert(vers == 2 || vers == 1);
545 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
546 					    GIC_CPU_REG_SIZE);
547 		if (!gicc_base)
548 			panic();
549 	}
550 
551 	gd->gicc_base = gicc_base;
552 	gd->gicd_base = gicd_base;
553 	gd->max_it = probe_max_it(gicc_base, gicd_base);
554 #if defined(CFG_ARM_GICV3)
555 	if (affinity_routing_is_enabled(gd) && gicr_base_pa)
556 		probe_redist_base_addrs(gd->gicr_base, gicr_base_pa);
557 #endif
558 	gd->chip.ops = &gic_ops;
559 
560 	if (IS_ENABLED(CFG_DT))
561 		gd->chip.dt_get_irq = gic_dt_get_irq;
562 }
563 
564 void gic_init_v3(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
565 		 paddr_t gicr_base_pa)
566 {
567 	struct gic_data __maybe_unused *gd = &gic_data;
568 	size_t __maybe_unused n = 0;
569 
570 	gic_init_base_addr(gicc_base_pa, gicd_base_pa, gicr_base_pa);
571 
572 #if defined(CFG_WITH_ARM_TRUSTED_FW)
573 	/* GIC configuration is initialized from TF-A when embedded */
574 	if (affinity_routing_is_enabled(gd)) {
575 		/* Secure affinity routing enabled */
576 		vaddr_t gicr_base = get_gicr_base(gd);
577 
578 		if (gicr_base) {
579 			gd->per_cpu_group_status = io_read32(gicr_base +
580 							     GICR_IGROUPR0);
581 			gd->per_cpu_group_modifier = io_read32(gicr_base +
582 							       GICR_IGRPMODR0);
583 		} else {
584 			IMSG("GIC redistributor base address not provided");
585 			IMSG("Assuming default GIC group status and modifier");
586 			gd->per_cpu_group_status = 0xffff00ff;
587 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
588 		}
589 	} else {
590 		/* Legacy operation with secure affinity routing disabled */
591 		gd->per_cpu_group_status = io_read32(gd->gicd_base +
592 						     GICD_IGROUPR(0));
593 		gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
594 	}
595 #else /*!CFG_WITH_ARM_TRUSTED_FW*/
596 	/*
597 	 * Without TF-A, GIC is always configured in for legacy operation
598 	 * with secure affinity routing disabled.
599 	 */
600 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
601 		/* Disable interrupts */
602 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
603 
604 		/* Make interrupts non-pending */
605 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
606 
607 		/* Mark interrupts non-secure */
608 		if (n == 0) {
609 			/* per-CPU inerrupts config:
610 			 * ID0-ID7(SGI)	  for Non-secure interrupts
611 			 * ID8-ID15(SGI)  for Secure interrupts.
612 			 * All PPI config as Non-secure interrupts.
613 			 */
614 			gd->per_cpu_group_status = 0xffff00ff;
615 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
616 			io_write32(gd->gicd_base + GICD_IGROUPR(n),
617 				   gd->per_cpu_group_status);
618 		} else {
619 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
620 		}
621 	}
622 
623 	/* Set the priority mask to permit Non-secure interrupts, and to
624 	 * allow the Non-secure world to adjust the priority mask itself
625 	 */
626 #if defined(CFG_ARM_GICV3)
627 	write_icc_pmr(0x80);
628 	write_icc_igrpen1(1);
629 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
630 #else
631 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
632 
633 	/* Enable GIC */
634 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
635 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
636 	io_setbits32(gd->gicd_base + GICD_CTLR,
637 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1NS);
638 #endif
639 #endif /*!CFG_WITH_ARM_TRUSTED_FW*/
640 
641 	interrupt_main_init(&gic_data.chip);
642 }
643 
644 static void gic_it_configure(struct gic_data *gd, size_t it)
645 {
646 	size_t idx = it / NUM_INTS_PER_REG;
647 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
648 
649 	assert(gd == &gic_data);
650 
651 	/* Disable the interrupt */
652 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
653 	/* Make it non-pending */
654 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
655 	/* Assign it to group0 */
656 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
657 #if defined(CFG_ARM_GICV3)
658 	/* Assign it to group1S */
659 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
660 #endif
661 }
662 
663 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
664 				uint8_t cpu_mask)
665 {
666 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
667 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
668 	uint32_t target = 0;
669 	uint32_t target_shift = 0;
670 	vaddr_t itargetsr = gd->gicd_base +
671 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
672 
673 	assert(gd == &gic_data);
674 
675 	/* Assigned to group0 */
676 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
677 
678 	/* Route it to selected CPUs */
679 	target = io_read32(itargetsr);
680 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
681 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
682 	target |= cpu_mask << target_shift;
683 	DMSG("cpu_mask: writing %#"PRIx32" to %#" PRIxVA, target, itargetsr);
684 	io_write32(itargetsr, target);
685 	DMSG("cpu_mask: %#"PRIx32, io_read32(itargetsr));
686 }
687 
688 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
689 {
690 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
691 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
692 
693 	assert(gd == &gic_data);
694 
695 	/* Assigned to group0 */
696 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
697 
698 	/* Set prio it to selected CPUs */
699 	DMSG("prio: writing %#"PRIx8" to %#" PRIxVA,
700 	     prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
701 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
702 }
703 
704 static void gic_it_set_type(struct gic_data *gd, size_t it, uint32_t type)
705 {
706 	size_t index = it / GICD_ICFGR_NUM_INTS_PER_REG;
707 	uint32_t shift = (it % GICD_ICFGR_NUM_INTS_PER_REG) *
708 			 GICD_ICFGR_FIELD_BITS;
709 	uint32_t icfg = 0;
710 
711 	assert(type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH);
712 
713 	if (type == IRQ_TYPE_EDGE_RISING)
714 		icfg = GICD_ICFGR_TYPE_EDGE;
715 	else
716 		icfg = GICD_ICFGR_TYPE_LEVEL;
717 
718 	io_mask32(gd->gicd_base + GICD_ICFGR(index),
719 		  SHIFT_U32(icfg, shift),
720 		  SHIFT_U32(GICD_ICFGR_FIELD_MASK, shift));
721 }
722 
723 static void gic_it_enable(struct gic_data *gd, size_t it)
724 {
725 	size_t idx = it / NUM_INTS_PER_REG;
726 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
727 	vaddr_t base = gd->gicd_base;
728 
729 	assert(gd == &gic_data);
730 
731 	/* Assigned to group0 */
732 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
733 
734 	/* Enable the interrupt */
735 	io_write32(base + GICD_ISENABLER(idx), mask);
736 }
737 
738 static void gic_it_disable(struct gic_data *gd, size_t it)
739 {
740 	size_t idx = it / NUM_INTS_PER_REG;
741 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
742 
743 	assert(gd == &gic_data);
744 
745 	/* Assigned to group0 */
746 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
747 
748 	/* Disable the interrupt */
749 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
750 }
751 
752 static void gic_it_set_pending(struct gic_data *gd, size_t it)
753 {
754 	size_t idx = it / NUM_INTS_PER_REG;
755 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
756 
757 	assert(gd == &gic_data);
758 
759 	/* Should be Peripheral Interrupt */
760 	assert(it >= NUM_SGI);
761 
762 	/* Raise the interrupt */
763 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
764 }
765 
766 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
767 {
768 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
769 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
770 	bool __maybe_unused to_list = cpu_mask & 0xff;
771 
772 	/* One and only one of the bit fields shall be non-zero */
773 	assert(to_others + to_current + to_list == 1);
774 }
775 
776 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
777 			     uint32_t cpu_mask, bool ns)
778 {
779 #if defined(CFG_ARM_GICV3)
780 	uint32_t mask_id = it & 0xf;
781 	uint64_t mask = SHIFT_U64(mask_id, 24);
782 
783 	assert_cpu_mask_is_valid(cpu_mask);
784 
785 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
786 		mask |= BIT64(GICC_SGI_IRM_BIT);
787 	} else {
788 		uint64_t mpidr = read_mpidr();
789 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
790 				     MPIDR_AFF1_SHIFT;
791 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
792 				     MPIDR_AFF2_SHIFT;
793 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
794 				     MPIDR_AFF3_SHIFT;
795 
796 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
797 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
798 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
799 
800 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
801 			mask |= BIT32(mpidr & 0xf);
802 		} else {
803 			/*
804 			 * Only support sending SGI to the cores in the
805 			 * same cluster now.
806 			 */
807 			mask |= cpu_mask & 0xff;
808 		}
809 	}
810 
811 	/* Raise the interrupt */
812 	if (ns)
813 		write_icc_asgi1r(mask);
814 	else
815 		write_icc_sgi1r(mask);
816 #else
817 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
818 	uint32_t mask_group = ns;
819 	uint32_t mask = mask_id;
820 
821 	assert_cpu_mask_is_valid(cpu_mask);
822 
823 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
824 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
825 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
826 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
827 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
828 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
829 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
830 	} else {
831 		mask |= SHIFT_U32(cpu_mask & 0xff,
832 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
833 	}
834 
835 	/* Raise the interrupt */
836 	io_write32(gd->gicd_base + GICD_SGIR, mask);
837 #endif
838 }
839 
840 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
841 {
842 	assert(gd == &gic_data);
843 
844 #if defined(CFG_ARM_GICV3)
845 	return read_icc_iar1();
846 #else
847 	return io_read32(gd->gicc_base + GICC_IAR);
848 #endif
849 }
850 
851 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
852 {
853 	assert(gd == &gic_data);
854 
855 #if defined(CFG_ARM_GICV3)
856 	write_icc_eoir1(eoir);
857 #else
858 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
859 #endif
860 }
861 
862 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
863 {
864 	size_t idx = it / NUM_INTS_PER_REG;
865 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
866 
867 	assert(gd == &gic_data);
868 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
869 }
870 
871 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
872 {
873 	size_t idx = it / NUM_INTS_PER_REG;
874 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
875 
876 	assert(gd == &gic_data);
877 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
878 }
879 
880 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
881 {
882 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
883 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
884 				ITARGETSR_FIELD_BITS;
885 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
886 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
887 
888 	assert(gd == &gic_data);
889 	return (target & target_mask) >> target_shift;
890 }
891 
892 void gic_dump_state(void)
893 {
894 	struct gic_data *gd = &gic_data;
895 	int i = 0;
896 
897 #if defined(CFG_ARM_GICV3)
898 	DMSG("GICC_CTLR: %#"PRIx32, read_icc_ctlr());
899 #else
900 	DMSG("GICC_CTLR: %#"PRIx32, io_read32(gd->gicc_base + GICC_CTLR));
901 #endif
902 	DMSG("GICD_CTLR: %#"PRIx32, io_read32(gd->gicd_base + GICD_CTLR));
903 
904 	for (i = 0; i <= (int)gd->max_it; i++) {
905 		if (gic_it_is_enabled(gd, i)) {
906 			DMSG("irq%d: enabled, group:%d, target:%#"PRIx32, i,
907 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
908 		}
909 	}
910 }
911 
912 TEE_Result gic_spi_release_to_ns(size_t it)
913 {
914 	struct gic_data *gd = &gic_data;
915 	size_t idx = it / NUM_INTS_PER_REG;
916 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
917 
918 	if (it >= gd->max_it || it < GIC_SPI_BASE)
919 		return TEE_ERROR_BAD_PARAMETERS;
920 	/* Make sure it's already disabled */
921 	if (!gic_it_is_enabled(gd, it))
922 		return TEE_ERROR_BAD_STATE;
923 	/* Assert it's secure to start with */
924 	if (!gic_it_get_group(gd, it))
925 		return TEE_ERROR_BAD_STATE;
926 
927 	mutex_lock(&gic_mutex);
928 	gic_it_set_cpu_mask(gd, it, 0);
929 	gic_it_set_prio(gd, it, GIC_SPI_PRI_NS_EL1);
930 
931 	/* Clear pending status */
932 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
933 	/* Assign it to NS Group1 */
934 	io_setbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
935 #if defined(CFG_ARM_GICV3)
936 	io_clrbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
937 #endif
938 	mutex_unlock(&gic_mutex);
939 	return TEE_SUCCESS;
940 }
941 
942 static void __maybe_unused gic_native_itr_handler(void)
943 {
944 	struct gic_data *gd = &gic_data;
945 	uint32_t iar = 0;
946 	uint32_t id = 0;
947 
948 	iar = gic_read_iar(gd);
949 	id = iar & GICC_IAR_IT_ID_MASK;
950 
951 	if (id >= 1020 && id <= 1023) {
952 		/*
953 		 * Special INTIDs
954 		 * 1020: Interrupt expected to be handled at SEL1 or SEL2.
955 		 *       PE (Processing Element) is either executing at EL3
956 		 *       in AArch64 state or in monitor mode in AArch32 state.
957 		 *       Reserved on GIC V1 and GIC V2.
958 		 * 1021: Interrupt expected to be handled at NSEL1 or NSEL2
959 		 *       PE (Processing Element) is either executing at EL3
960 		 *       in AArch64 state or in monitor mode in AArch32 state.
961 		 *       Reserved on GIC V1 and GIC V2.
962 		 * 1022: -(GICv3.3): Interrupt is an NMI
963 		 *       -(Legacy): Group 1 interrupt to be signaled to the
964 		 *        PE and acknowledged using alias registers. Reserved if
965 		 *        interrupt grouping is not supported.
966 		 * 1023: No pending interrupt with sufficient priority
967 		 *       (spurious) or the highest priority pending interrupt is
968 		 *       not appropriate for the current security state or
969 		 *       interrupt group.
970 		 */
971 		DMSG("Special interrupt %"PRIu32, id);
972 
973 		return;
974 	}
975 
976 	if (id <= gd->max_it)
977 		interrupt_call_handlers(&gd->chip, id);
978 	else
979 		EMSG("Unhandled interrupt %"PRIu32, id);
980 
981 	gic_write_eoir(gd, iar);
982 }
983 
984 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
985 /* Override interrupt_main_handler() with driver implementation */
986 void interrupt_main_handler(void)
987 {
988 	gic_native_itr_handler();
989 }
990 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
991 
992 static void gic_op_configure(struct itr_chip *chip, size_t it,
993 			     uint32_t type, uint32_t prio __unused)
994 {
995 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
996 
997 	assert(gd == &gic_data);
998 
999 	if (it > gd->max_it)
1000 		panic();
1001 
1002 	if (it < GIC_SPI_BASE) {
1003 		if (gic_primary_done)
1004 			panic("Cannot add SGI or PPI after boot");
1005 
1006 		/* Assign it to Secure Group 1, G1S */
1007 		gd->per_cpu_group_modifier |= BIT32(it);
1008 		gd->per_cpu_group_status &= ~BIT32(it);
1009 	}
1010 
1011 	if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) {
1012 		vaddr_t gicr_base = get_gicr_base(gd);
1013 
1014 		if (!gicr_base)
1015 			panic("GICR_BASE missing");
1016 
1017 		/* Disable interrupt */
1018 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(it));
1019 
1020 		/* Wait for the write to GICR_ICENABLER0 to propagate */
1021 		gicr_wait_for_pending_write(gicr_base);
1022 
1023 		/* Make interrupt non-pending */
1024 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(it));
1025 
1026 		/* Make it to Secure */
1027 		io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status);
1028 		io_write32(gicr_base + GICR_IGRPMODR0,
1029 			   gd->per_cpu_group_modifier);
1030 	} else {
1031 		gic_it_configure(gd, it);
1032 		/* Set the CPU mask to deliver interrupts to any online core */
1033 		gic_it_set_cpu_mask(gd, it, 0xff);
1034 		gic_it_set_prio(gd, it, 0x1);
1035 		if (type != IRQ_TYPE_NONE)
1036 			gic_it_set_type(gd, it, type);
1037 	}
1038 }
1039 
1040 static void gic_op_enable(struct itr_chip *chip, size_t it)
1041 {
1042 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1043 
1044 	assert(gd == &gic_data);
1045 
1046 	if (it > gd->max_it)
1047 		panic();
1048 
1049 	if (it < GIC_SPI_BASE)
1050 		gd->per_cpu_enable |= BIT(it);
1051 
1052 	if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) {
1053 		vaddr_t gicr_base = get_gicr_base(gd);
1054 
1055 		if (!gicr_base)
1056 			panic("GICR_BASE missing");
1057 
1058 		/* Assigned to G1S */
1059 		assert(gd->per_cpu_group_modifier & BIT(it) &&
1060 		       !(gd->per_cpu_group_status & BIT(it)));
1061 		io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable);
1062 	} else {
1063 		gic_it_enable(gd, it);
1064 	}
1065 }
1066 
1067 static void gic_op_disable(struct itr_chip *chip, size_t it)
1068 {
1069 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1070 
1071 	assert(gd == &gic_data);
1072 
1073 	if (it > gd->max_it)
1074 		panic();
1075 
1076 	gic_it_disable(gd, it);
1077 }
1078 
1079 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
1080 {
1081 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1082 
1083 	assert(gd == &gic_data);
1084 
1085 	if (it > gd->max_it)
1086 		panic();
1087 
1088 	gic_it_set_pending(gd, it);
1089 }
1090 
1091 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
1092 			     uint32_t cpu_mask)
1093 {
1094 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1095 	bool ns = false;
1096 
1097 	assert(gd == &gic_data);
1098 
1099 	/* Should be Software Generated Interrupt */
1100 	assert(it < NUM_SGI);
1101 
1102 	ns = BIT32(it) & gd->per_cpu_group_status;
1103 	gic_it_raise_sgi(gd, it, cpu_mask, ns);
1104 }
1105 
1106 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
1107 			uint8_t cpu_mask)
1108 {
1109 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1110 
1111 	assert(gd == &gic_data);
1112 
1113 	if (it > gd->max_it)
1114 		panic();
1115 
1116 	gic_it_set_cpu_mask(gd, it, cpu_mask);
1117 }
1118 
1119 #ifdef CFG_DT
1120 /* Callback for "interrupts" and "interrupts-extended" DT node properties */
1121 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data,
1122 				     struct itr_desc *itr_desc)
1123 {
1124 	int itr_num = DT_INFO_INVALID_INTERRUPT;
1125 	struct itr_chip *chip = priv_data;
1126 	uint32_t phandle_args[3] = { };
1127 	uint32_t type = 0;
1128 	uint32_t prio = 0;
1129 
1130 	assert(arg && itr_desc);
1131 
1132 	/*
1133 	 * gic_dt_get_irq() expects phandle arguments passed are still in DT
1134 	 * format (big-endian) whereas struct dt_pargs carries converted
1135 	 * formats. Therefore swap again phandle arguments. gic_dt_get_irq()
1136 	 * consumes only the 2 first arguments.
1137 	 */
1138 	if (arg->args_count < 2)
1139 		return TEE_ERROR_GENERIC;
1140 
1141 	phandle_args[0] = cpu_to_fdt32(arg->args[0]);
1142 	phandle_args[1] = cpu_to_fdt32(arg->args[1]);
1143 	if (arg->args_count >= 3)
1144 		phandle_args[2] = cpu_to_fdt32(arg->args[2]);
1145 
1146 	itr_num = gic_dt_get_irq((const void *)phandle_args, arg->args_count,
1147 				 &type, &prio);
1148 	if (itr_num == DT_INFO_INVALID_INTERRUPT)
1149 		return TEE_ERROR_GENERIC;
1150 
1151 	gic_op_configure(chip, itr_num, type, prio);
1152 
1153 	itr_desc->chip = chip;
1154 	itr_desc->itr_num = itr_num;
1155 
1156 	return TEE_SUCCESS;
1157 }
1158 
1159 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused)
1160 {
1161 	if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb,
1162 					&gic_data.chip))
1163 		panic();
1164 
1165 	return TEE_SUCCESS;
1166 }
1167 
1168 static const struct dt_device_match gic_match_table[] = {
1169 	{ .compatible = "arm,cortex-a15-gic" },
1170 	{ .compatible = "arm,cortex-a7-gic" },
1171 	{ .compatible = "arm,cortex-a5-gic" },
1172 	{ .compatible = "arm,cortex-a9-gic" },
1173 	{ .compatible = "arm,gic-400" },
1174 	{ }
1175 };
1176 
1177 DEFINE_DT_DRIVER(gic_dt_driver) = {
1178 	.name = "gic",
1179 	.match_table = gic_match_table,
1180 	.probe = gic_probe,
1181 };
1182 #endif /*CFG_DT*/
1183 
1184 static TEE_Result gic_set_primary_done(void)
1185 {
1186 	gic_primary_done = true;
1187 	return TEE_SUCCESS;
1188 }
1189 
1190 nex_release_init_resource(gic_set_primary_done);
1191