xref: /optee_os/core/drivers/gic.c (revision 69171bec89ce7cae515a67a8b47d9764f915255c)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <dt-bindings/interrupt-controller/arm-gic.h>
10 #include <config.h>
11 #include <compiler.h>
12 #include <drivers/gic.h>
13 #include <keep.h>
14 #include <kernel/dt.h>
15 #include <kernel/dt_driver.h>
16 #include <kernel/interrupt.h>
17 #include <kernel/panic.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <libfdt.h>
21 #include <util.h>
22 #include <io.h>
23 #include <trace.h>
24 
25 /* Offsets from gic.gicc_base */
26 #define GICC_CTLR		(0x000)
27 #define GICC_PMR		(0x004)
28 #define GICC_IAR		(0x00C)
29 #define GICC_EOIR		(0x010)
30 
31 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
32 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
33 #define GICD_CTLR_ENABLEGRP1S	(1 << 2)
34 #define GICC_CTLR_FIQEN		(1 << 3)
35 
36 /* Offsets from gic.gicd_base */
37 #define GICD_CTLR		(0x000)
38 #define GICD_TYPER		(0x004)
39 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
40 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
41 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
42 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
43 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
44 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
45 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
46 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
47 #define GICD_SGIR		(0xF00)
48 
49 #ifdef CFG_ARM_GICV3
50 #define GICD_PIDR2		(0xFFE8)
51 #else
52 /* Called ICPIDR2 in GICv2 specification */
53 #define GICD_PIDR2		(0xFE8)
54 #endif
55 
56 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
57 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
58 
59 /* GICD IDR2 name differs on GICv3 and GICv2 but uses same bit map */
60 #define GICD_PIDR2_ARCHREV_SHIFT	4
61 #define GICD_PIDR2_ARCHREV_MASK		0xF
62 
63 /* Number of Private Peripheral Interrupt */
64 #define NUM_PPI	32
65 
66 /* Number of Software Generated Interrupt */
67 #define NUM_SGI			16
68 
69 /* Number of Non-secure Software Generated Interrupt */
70 #define NUM_NS_SGI		8
71 
72 /* Number of interrupts in one register */
73 #define NUM_INTS_PER_REG	32
74 
75 /* Number of targets in one register */
76 #define NUM_TARGETS_PER_REG	4
77 
78 /* Accessors to access ITARGETSRn */
79 #define ITARGETSR_FIELD_BITS	8
80 #define ITARGETSR_FIELD_MASK	0xff
81 
82 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
83 #define GICC_IAR_IT_ID_MASK	0x3ff
84 #define GICC_IAR_CPU_ID_MASK	0x7
85 #define GICC_IAR_CPU_ID_SHIFT	10
86 
87 #define GICC_SGI_IRM_BIT	40
88 #define GICC_SGI_AFF1_SHIFT	16
89 #define GICC_SGI_AFF2_SHIFT	32
90 #define GICC_SGI_AFF3_SHIFT	48
91 
92 #define GICD_SGIR_SIGINTID_MASK			0xf
93 #define GICD_SGIR_TO_OTHER_CPUS			0x1
94 #define GICD_SGIR_TO_THIS_CPU			0x2
95 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
96 #define GICD_SGIR_NSATT_SHIFT			15
97 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
98 
99 struct gic_data {
100 	vaddr_t gicc_base;
101 	vaddr_t gicd_base;
102 	size_t max_it;
103 	struct itr_chip chip;
104 };
105 
106 static struct gic_data gic_data __nex_bss;
107 
108 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
109 		       uint32_t prio);
110 static void gic_op_enable(struct itr_chip *chip, size_t it);
111 static void gic_op_disable(struct itr_chip *chip, size_t it);
112 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
113 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
114 			     uint32_t cpu_mask);
115 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
116 			uint8_t cpu_mask);
117 
118 static const struct itr_ops gic_ops = {
119 	.add = gic_op_add,
120 	.mask = gic_op_disable,
121 	.unmask = gic_op_enable,
122 	.enable = gic_op_enable,
123 	.disable = gic_op_disable,
124 	.raise_pi = gic_op_raise_pi,
125 	.raise_sgi = gic_op_raise_sgi,
126 	.set_affinity = gic_op_set_affinity,
127 };
128 DECLARE_KEEP_PAGER(gic_ops);
129 
130 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
131 {
132 	int i;
133 	uint32_t old_ctlr;
134 	size_t ret = 0;
135 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
136 			  GICD_TYPER_IT_LINES_NUM_MASK;
137 
138 	/*
139 	 * Probe which interrupt number is the largest.
140 	 */
141 #if defined(CFG_ARM_GICV3)
142 	old_ctlr = read_icc_ctlr();
143 	write_icc_ctlr(0);
144 #else
145 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
146 	io_write32(gicc_base + GICC_CTLR, 0);
147 #endif
148 	for (i = max_regs; i >= 0; i--) {
149 		uint32_t old_reg;
150 		uint32_t reg;
151 		int b;
152 
153 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
154 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
155 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
156 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
157 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
158 			if (BIT32(b) & reg) {
159 				ret = i * NUM_INTS_PER_REG + b;
160 				goto out;
161 			}
162 		}
163 	}
164 out:
165 #if defined(CFG_ARM_GICV3)
166 	write_icc_ctlr(old_ctlr);
167 #else
168 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
169 #endif
170 	return ret;
171 }
172 
173 void gic_cpu_init(void)
174 {
175 	struct gic_data *gd = &gic_data;
176 
177 #if defined(CFG_ARM_GICV3)
178 	assert(gd->gicd_base);
179 #else
180 	assert(gd->gicd_base && gd->gicc_base);
181 #endif
182 
183 	/* per-CPU interrupts config:
184 	 * ID0-ID7(SGI)   for Non-secure interrupts
185 	 * ID8-ID15(SGI)  for Secure interrupts.
186 	 * All PPI config as Non-secure interrupts.
187 	 */
188 	io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
189 
190 	/* Set the priority mask to permit Non-secure interrupts, and to
191 	 * allow the Non-secure world to adjust the priority mask itself
192 	 */
193 #if defined(CFG_ARM_GICV3)
194 	write_icc_pmr(0x80);
195 	write_icc_igrpen1(1);
196 #else
197 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
198 
199 	/* Enable GIC */
200 	io_write32(gd->gicc_base + GICC_CTLR,
201 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
202 		   GICC_CTLR_FIQEN);
203 #endif
204 }
205 
206 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
207 			  uint32_t *prio)
208 {
209 	int it_num = DT_INFO_INVALID_INTERRUPT;
210 
211 	if (type)
212 		*type = IRQ_TYPE_NONE;
213 
214 	if (prio)
215 		*prio = 0;
216 
217 	if (!properties || count < 2)
218 		return DT_INFO_INVALID_INTERRUPT;
219 
220 	it_num = fdt32_to_cpu(properties[1]);
221 
222 	switch (fdt32_to_cpu(properties[0])) {
223 	case GIC_PPI:
224 		it_num += 16;
225 		break;
226 	case GIC_SPI:
227 		it_num += 32;
228 		break;
229 	default:
230 		it_num = DT_INFO_INVALID_INTERRUPT;
231 	}
232 
233 	return it_num;
234 }
235 
236 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
237 {
238 	struct gic_data *gd = &gic_data;
239 	vaddr_t gicc_base = 0;
240 	vaddr_t gicd_base = 0;
241 	uint32_t vers __maybe_unused = 0;
242 
243 	assert(cpu_mmu_enabled());
244 
245 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
246 				    GIC_DIST_REG_SIZE);
247 	if (!gicd_base)
248 		panic();
249 
250 	vers = io_read32(gicd_base + GICD_PIDR2);
251 	vers >>= GICD_PIDR2_ARCHREV_SHIFT;
252 	vers &= GICD_PIDR2_ARCHREV_MASK;
253 
254 	if (IS_ENABLED(CFG_ARM_GICV3)) {
255 		assert(vers == 3);
256 	} else {
257 		assert(vers == 2);
258 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
259 					    GIC_CPU_REG_SIZE);
260 		if (!gicc_base)
261 			panic();
262 	}
263 
264 	gd->gicc_base = gicc_base;
265 	gd->gicd_base = gicd_base;
266 	gd->max_it = probe_max_it(gicc_base, gicd_base);
267 	gd->chip.ops = &gic_ops;
268 
269 	if (IS_ENABLED(CFG_DT))
270 		gd->chip.dt_get_irq = gic_dt_get_irq;
271 }
272 
273 void gic_init(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
274 {
275 	struct gic_data __maybe_unused *gd = &gic_data;
276 	size_t __maybe_unused n = 0;
277 
278 	gic_init_base_addr(gicc_base_pa, gicd_base_pa);
279 
280 	/* GIC configuration is initialized from TF-A when embedded */
281 #ifndef CFG_WITH_ARM_TRUSTED_FW
282 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
283 		/* Disable interrupts */
284 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
285 
286 		/* Make interrupts non-pending */
287 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
288 
289 		/* Mark interrupts non-secure */
290 		if (n == 0) {
291 			/* per-CPU inerrupts config:
292 			 * ID0-ID7(SGI)	  for Non-secure interrupts
293 			 * ID8-ID15(SGI)  for Secure interrupts.
294 			 * All PPI config as Non-secure interrupts.
295 			 */
296 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
297 		} else {
298 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
299 		}
300 	}
301 
302 	/* Set the priority mask to permit Non-secure interrupts, and to
303 	 * allow the Non-secure world to adjust the priority mask itself
304 	 */
305 #if defined(CFG_ARM_GICV3)
306 	write_icc_pmr(0x80);
307 	write_icc_igrpen1(1);
308 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
309 #else
310 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
311 
312 	/* Enable GIC */
313 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
314 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
315 	io_setbits32(gd->gicd_base + GICD_CTLR,
316 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
317 #endif
318 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
319 
320 	interrupt_main_init(&gic_data.chip);
321 }
322 
323 static void gic_it_add(struct gic_data *gd, size_t it)
324 {
325 	size_t idx = it / NUM_INTS_PER_REG;
326 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
327 
328 	assert(gd == &gic_data);
329 
330 	/* Disable the interrupt */
331 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
332 	/* Make it non-pending */
333 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
334 	/* Assign it to group0 */
335 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
336 #if defined(CFG_ARM_GICV3)
337 	/* Assign it to group1S */
338 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
339 #endif
340 }
341 
342 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
343 				uint8_t cpu_mask)
344 {
345 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
346 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
347 	uint32_t target, target_shift;
348 	vaddr_t itargetsr = gd->gicd_base +
349 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
350 
351 	assert(gd == &gic_data);
352 
353 	/* Assigned to group0 */
354 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
355 
356 	/* Route it to selected CPUs */
357 	target = io_read32(itargetsr);
358 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
359 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
360 	target |= cpu_mask << target_shift;
361 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
362 	io_write32(itargetsr, target);
363 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
364 }
365 
366 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
367 {
368 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
369 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
370 
371 	assert(gd == &gic_data);
372 
373 	/* Assigned to group0 */
374 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
375 
376 	/* Set prio it to selected CPUs */
377 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
378 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
379 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
380 }
381 
382 static void gic_it_enable(struct gic_data *gd, size_t it)
383 {
384 	size_t idx = it / NUM_INTS_PER_REG;
385 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
386 	vaddr_t base = gd->gicd_base;
387 
388 	assert(gd == &gic_data);
389 
390 	/* Assigned to group0 */
391 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
392 
393 	/* Enable the interrupt */
394 	io_write32(base + GICD_ISENABLER(idx), mask);
395 }
396 
397 static void gic_it_disable(struct gic_data *gd, size_t it)
398 {
399 	size_t idx = it / NUM_INTS_PER_REG;
400 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
401 
402 	assert(gd == &gic_data);
403 
404 	/* Assigned to group0 */
405 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
406 
407 	/* Disable the interrupt */
408 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
409 }
410 
411 static void gic_it_set_pending(struct gic_data *gd, size_t it)
412 {
413 	size_t idx = it / NUM_INTS_PER_REG;
414 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
415 
416 	assert(gd == &gic_data);
417 
418 	/* Should be Peripheral Interrupt */
419 	assert(it >= NUM_SGI);
420 
421 	/* Raise the interrupt */
422 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
423 }
424 
425 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
426 {
427 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
428 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
429 	bool __maybe_unused to_list = cpu_mask & 0xff;
430 
431 	/* One and only one of the bit fields shall be non-zero */
432 	assert(to_others + to_current + to_list == 1);
433 }
434 
435 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
436 			     uint32_t cpu_mask, uint8_t group)
437 {
438 #if defined(CFG_ARM_GICV3)
439 	uint32_t mask_id = it & 0xf;
440 	uint64_t mask = SHIFT_U64(mask_id, 24);
441 
442 	assert_cpu_mask_is_valid(cpu_mask);
443 
444 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
445 		mask |= BIT64(GICC_SGI_IRM_BIT);
446 	} else {
447 		uint64_t mpidr = read_mpidr();
448 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
449 				     MPIDR_AFF1_SHIFT;
450 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
451 				     MPIDR_AFF2_SHIFT;
452 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
453 				     MPIDR_AFF3_SHIFT;
454 
455 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
456 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
457 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
458 
459 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
460 			mask |= BIT32(mpidr & 0xf);
461 		} else {
462 			/*
463 			 * Only support sending SGI to the cores in the
464 			 * same cluster now.
465 			 */
466 			mask |= cpu_mask & 0xff;
467 		}
468 	}
469 
470 	/* Raise the interrupt */
471 	if (group)
472 		write_icc_asgi1r(mask);
473 	else
474 		write_icc_sgi1r(mask);
475 #else
476 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
477 	uint32_t mask_group = group & 0x1;
478 	uint32_t mask = mask_id;
479 
480 	assert_cpu_mask_is_valid(cpu_mask);
481 
482 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
483 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
484 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
485 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
486 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
487 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
488 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
489 	} else {
490 		mask |= SHIFT_U32(cpu_mask & 0xff,
491 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
492 	}
493 
494 	/* Raise the interrupt */
495 	io_write32(gd->gicd_base + GICD_SGIR, mask);
496 #endif
497 }
498 
499 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
500 {
501 	assert(gd == &gic_data);
502 
503 #if defined(CFG_ARM_GICV3)
504 	return read_icc_iar1();
505 #else
506 	return io_read32(gd->gicc_base + GICC_IAR);
507 #endif
508 }
509 
510 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
511 {
512 	assert(gd == &gic_data);
513 
514 #if defined(CFG_ARM_GICV3)
515 	write_icc_eoir1(eoir);
516 #else
517 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
518 #endif
519 }
520 
521 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
522 {
523 	size_t idx = it / NUM_INTS_PER_REG;
524 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
525 
526 	assert(gd == &gic_data);
527 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
528 }
529 
530 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
531 {
532 	size_t idx = it / NUM_INTS_PER_REG;
533 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
534 
535 	assert(gd == &gic_data);
536 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
537 }
538 
539 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
540 {
541 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
542 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
543 				ITARGETSR_FIELD_BITS;
544 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
545 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
546 
547 	assert(gd == &gic_data);
548 	return (target & target_mask) >> target_shift;
549 }
550 
551 void gic_dump_state(void)
552 {
553 	struct gic_data *gd = &gic_data;
554 	int i = 0;
555 
556 #if defined(CFG_ARM_GICV3)
557 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
558 #else
559 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
560 #endif
561 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
562 
563 	for (i = 0; i <= (int)gd->max_it; i++) {
564 		if (gic_it_is_enabled(gd, i)) {
565 			DMSG("irq%d: enabled, group:%d, target:%x", i,
566 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
567 		}
568 	}
569 }
570 
571 static void __maybe_unused gic_native_itr_handler(void)
572 {
573 	struct gic_data *gd = &gic_data;
574 	uint32_t iar = 0;
575 	uint32_t id = 0;
576 
577 	iar = gic_read_iar(gd);
578 	id = iar & GICC_IAR_IT_ID_MASK;
579 
580 	if (id <= gd->max_it)
581 		interrupt_call_handlers(&gd->chip, id);
582 	else
583 		DMSG("ignoring interrupt %" PRIu32, id);
584 
585 	gic_write_eoir(gd, iar);
586 }
587 
588 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
589 /* Override interrupt_main_handler() with driver implementation */
590 void interrupt_main_handler(void)
591 {
592 	gic_native_itr_handler();
593 }
594 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
595 
596 static void gic_op_add(struct itr_chip *chip, size_t it,
597 		       uint32_t type __unused,
598 		       uint32_t prio __unused)
599 {
600 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
601 
602 	assert(gd == &gic_data);
603 
604 	if (it > gd->max_it)
605 		panic();
606 
607 	gic_it_add(gd, it);
608 	/* Set the CPU mask to deliver interrupts to any online core */
609 	gic_it_set_cpu_mask(gd, it, 0xff);
610 	gic_it_set_prio(gd, it, 0x1);
611 }
612 
613 static void gic_op_enable(struct itr_chip *chip, size_t it)
614 {
615 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
616 
617 	assert(gd == &gic_data);
618 
619 	if (it > gd->max_it)
620 		panic();
621 
622 	gic_it_enable(gd, it);
623 }
624 
625 static void gic_op_disable(struct itr_chip *chip, size_t it)
626 {
627 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
628 
629 	assert(gd == &gic_data);
630 
631 	if (it > gd->max_it)
632 		panic();
633 
634 	gic_it_disable(gd, it);
635 }
636 
637 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
638 {
639 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
640 
641 	assert(gd == &gic_data);
642 
643 	if (it > gd->max_it)
644 		panic();
645 
646 	gic_it_set_pending(gd, it);
647 }
648 
649 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
650 			     uint32_t cpu_mask)
651 {
652 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
653 
654 	assert(gd == &gic_data);
655 
656 	/* Should be Software Generated Interrupt */
657 	assert(it < NUM_SGI);
658 
659 	if (it > gd->max_it)
660 		panic();
661 
662 	if (it < NUM_NS_SGI)
663 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
664 	else
665 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
666 }
667 
668 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
669 			uint8_t cpu_mask)
670 {
671 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
672 
673 	assert(gd == &gic_data);
674 
675 	if (it > gd->max_it)
676 		panic();
677 
678 	gic_it_set_cpu_mask(gd, it, cpu_mask);
679 }
680 
681 #ifdef CFG_DT
682 /* Callback for "interrupts" and "interrupts-extended" DT node properties */
683 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data,
684 				     struct itr_desc *itr_desc)
685 {
686 	int itr_num = DT_INFO_INVALID_INTERRUPT;
687 	struct itr_chip *chip = priv_data;
688 	uint32_t phandle_args[2] = { };
689 	uint32_t type = 0;
690 	uint32_t prio = 0;
691 
692 	assert(arg && itr_desc);
693 
694 	/*
695 	 * gic_dt_get_irq() expects phandle arguments passed are still in DT
696 	 * format (big-endian) whereas struct dt_pargs carries converted
697 	 * formats. Therefore swap again phandle arguments. gic_dt_get_irq()
698 	 * consumes only the 2 first arguments.
699 	 */
700 	if (arg->args_count < 2)
701 		return TEE_ERROR_GENERIC;
702 	phandle_args[0] = cpu_to_fdt32(arg->args[0]);
703 	phandle_args[1] = cpu_to_fdt32(arg->args[1]);
704 
705 	itr_num = gic_dt_get_irq((const void *)phandle_args, 2, &type, &prio);
706 	if (itr_num == DT_INFO_INVALID_INTERRUPT)
707 		return TEE_ERROR_GENERIC;
708 
709 	gic_op_add(chip, itr_num, type, prio);
710 
711 	itr_desc->chip = chip;
712 	itr_desc->itr_num = itr_num;
713 
714 	return TEE_SUCCESS;
715 }
716 
717 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused)
718 {
719 	if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb,
720 					&gic_data.chip))
721 		panic();
722 
723 	return TEE_SUCCESS;
724 }
725 
726 static const struct dt_device_match gic_match_table[] = {
727 	{ .compatible = "arm,cortex-a15-gic" },
728 	{ .compatible = "arm,cortex-a7-gic" },
729 	{ .compatible = "arm,cortex-a5-gic" },
730 	{ .compatible = "arm,cortex-a9-gic" },
731 	{ .compatible = "arm,gic-400" },
732 	{ }
733 };
734 
735 DEFINE_DT_DRIVER(gic_dt_driver) = {
736 	.name = "gic",
737 	.match_table = gic_match_table,
738 	.probe = gic_probe,
739 };
740 #endif /*CFG_DT*/
741