xref: /optee_os/core/drivers/gic.c (revision a2a3dfbcd35b75f3822a37031d65eeeee7e22f90)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <dt-bindings/interrupt-controller/arm-gic.h>
10 #include <config.h>
11 #include <compiler.h>
12 #include <drivers/gic.h>
13 #include <keep.h>
14 #include <kernel/dt.h>
15 #include <kernel/dt_driver.h>
16 #include <kernel/interrupt.h>
17 #include <kernel/panic.h>
18 #include <mm/core_memprot.h>
19 #include <mm/core_mmu.h>
20 #include <libfdt.h>
21 #include <util.h>
22 #include <io.h>
23 #include <trace.h>
24 
25 /* Offsets from gic.gicc_base */
26 #define GICC_CTLR		(0x000)
27 #define GICC_PMR		(0x004)
28 #define GICC_IAR		(0x00C)
29 #define GICC_EOIR		(0x010)
30 
31 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
32 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
33 #define GICD_CTLR_ENABLEGRP1S	(1 << 2)
34 #define GICC_CTLR_FIQEN		(1 << 3)
35 
36 /* Offsets from gic.gicd_base */
37 #define GICD_CTLR		(0x000)
38 #define GICD_TYPER		(0x004)
39 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
40 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
41 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
42 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
43 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
44 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
45 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
46 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
47 #define GICD_SGIR		(0xF00)
48 
49 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
50 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
51 
52 /* Number of Private Peripheral Interrupt */
53 #define NUM_PPI	32
54 
55 /* Number of Software Generated Interrupt */
56 #define NUM_SGI			16
57 
58 /* Number of Non-secure Software Generated Interrupt */
59 #define NUM_NS_SGI		8
60 
61 /* Number of interrupts in one register */
62 #define NUM_INTS_PER_REG	32
63 
64 /* Number of targets in one register */
65 #define NUM_TARGETS_PER_REG	4
66 
67 /* Accessors to access ITARGETSRn */
68 #define ITARGETSR_FIELD_BITS	8
69 #define ITARGETSR_FIELD_MASK	0xff
70 
71 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
72 #define GICC_IAR_IT_ID_MASK	0x3ff
73 #define GICC_IAR_CPU_ID_MASK	0x7
74 #define GICC_IAR_CPU_ID_SHIFT	10
75 
76 #define GICC_SGI_IRM_BIT	40
77 #define GICC_SGI_AFF1_SHIFT	16
78 #define GICC_SGI_AFF2_SHIFT	32
79 #define GICC_SGI_AFF3_SHIFT	48
80 
81 #define GICD_SGIR_SIGINTID_MASK			0xf
82 #define GICD_SGIR_TO_OTHER_CPUS			0x1
83 #define GICD_SGIR_TO_THIS_CPU			0x2
84 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
85 #define GICD_SGIR_NSATT_SHIFT			15
86 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
87 
88 struct gic_data {
89 	vaddr_t gicc_base;
90 	vaddr_t gicd_base;
91 	size_t max_it;
92 	struct itr_chip chip;
93 };
94 
95 static struct gic_data gic_data __nex_bss;
96 
97 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
98 		       uint32_t prio);
99 static void gic_op_enable(struct itr_chip *chip, size_t it);
100 static void gic_op_disable(struct itr_chip *chip, size_t it);
101 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
102 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
103 			     uint32_t cpu_mask);
104 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
105 			uint8_t cpu_mask);
106 
107 static const struct itr_ops gic_ops = {
108 	.add = gic_op_add,
109 	.mask = gic_op_disable,
110 	.unmask = gic_op_enable,
111 	.enable = gic_op_enable,
112 	.disable = gic_op_disable,
113 	.raise_pi = gic_op_raise_pi,
114 	.raise_sgi = gic_op_raise_sgi,
115 	.set_affinity = gic_op_set_affinity,
116 };
117 DECLARE_KEEP_PAGER(gic_ops);
118 
119 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
120 {
121 	int i;
122 	uint32_t old_ctlr;
123 	size_t ret = 0;
124 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
125 			  GICD_TYPER_IT_LINES_NUM_MASK;
126 
127 	/*
128 	 * Probe which interrupt number is the largest.
129 	 */
130 #if defined(CFG_ARM_GICV3)
131 	old_ctlr = read_icc_ctlr();
132 	write_icc_ctlr(0);
133 #else
134 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
135 	io_write32(gicc_base + GICC_CTLR, 0);
136 #endif
137 	for (i = max_regs; i >= 0; i--) {
138 		uint32_t old_reg;
139 		uint32_t reg;
140 		int b;
141 
142 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
143 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
144 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
145 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
146 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
147 			if (BIT32(b) & reg) {
148 				ret = i * NUM_INTS_PER_REG + b;
149 				goto out;
150 			}
151 		}
152 	}
153 out:
154 #if defined(CFG_ARM_GICV3)
155 	write_icc_ctlr(old_ctlr);
156 #else
157 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
158 #endif
159 	return ret;
160 }
161 
162 void gic_cpu_init(void)
163 {
164 	struct gic_data *gd = &gic_data;
165 
166 #if defined(CFG_ARM_GICV3)
167 	assert(gd->gicd_base);
168 #else
169 	assert(gd->gicd_base && gd->gicc_base);
170 #endif
171 
172 	/* per-CPU interrupts config:
173 	 * ID0-ID7(SGI)   for Non-secure interrupts
174 	 * ID8-ID15(SGI)  for Secure interrupts.
175 	 * All PPI config as Non-secure interrupts.
176 	 */
177 	io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
178 
179 	/* Set the priority mask to permit Non-secure interrupts, and to
180 	 * allow the Non-secure world to adjust the priority mask itself
181 	 */
182 #if defined(CFG_ARM_GICV3)
183 	write_icc_pmr(0x80);
184 	write_icc_igrpen1(1);
185 #else
186 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
187 
188 	/* Enable GIC */
189 	io_write32(gd->gicc_base + GICC_CTLR,
190 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
191 		   GICC_CTLR_FIQEN);
192 #endif
193 }
194 
195 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
196 			  uint32_t *prio)
197 {
198 	int it_num = DT_INFO_INVALID_INTERRUPT;
199 
200 	if (type)
201 		*type = IRQ_TYPE_NONE;
202 
203 	if (prio)
204 		*prio = 0;
205 
206 	if (!properties || count < 2)
207 		return DT_INFO_INVALID_INTERRUPT;
208 
209 	it_num = fdt32_to_cpu(properties[1]);
210 
211 	switch (fdt32_to_cpu(properties[0])) {
212 	case GIC_PPI:
213 		it_num += 16;
214 		break;
215 	case GIC_SPI:
216 		it_num += 32;
217 		break;
218 	default:
219 		it_num = DT_INFO_INVALID_INTERRUPT;
220 	}
221 
222 	return it_num;
223 }
224 
225 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
226 {
227 	struct gic_data *gd = &gic_data;
228 	vaddr_t gicc_base = 0;
229 	vaddr_t gicd_base = 0;
230 
231 	assert(cpu_mmu_enabled());
232 
233 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
234 				    GIC_DIST_REG_SIZE);
235 	if (!gicd_base)
236 		panic();
237 
238 	if (!IS_ENABLED(CFG_ARM_GICV3)) {
239 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
240 					    GIC_CPU_REG_SIZE);
241 		if (!gicc_base)
242 			panic();
243 	}
244 
245 	gd->gicc_base = gicc_base;
246 	gd->gicd_base = gicd_base;
247 	gd->max_it = probe_max_it(gicc_base, gicd_base);
248 	gd->chip.ops = &gic_ops;
249 
250 	if (IS_ENABLED(CFG_DT))
251 		gd->chip.dt_get_irq = gic_dt_get_irq;
252 }
253 
254 void gic_init(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
255 {
256 	struct gic_data __maybe_unused *gd = &gic_data;
257 	size_t __maybe_unused n = 0;
258 
259 	gic_init_base_addr(gicc_base_pa, gicd_base_pa);
260 
261 	/* GIC configuration is initialized from TF-A when embedded */
262 #ifndef CFG_WITH_ARM_TRUSTED_FW
263 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
264 		/* Disable interrupts */
265 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
266 
267 		/* Make interrupts non-pending */
268 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
269 
270 		/* Mark interrupts non-secure */
271 		if (n == 0) {
272 			/* per-CPU inerrupts config:
273 			 * ID0-ID7(SGI)	  for Non-secure interrupts
274 			 * ID8-ID15(SGI)  for Secure interrupts.
275 			 * All PPI config as Non-secure interrupts.
276 			 */
277 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
278 		} else {
279 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
280 		}
281 	}
282 
283 	/* Set the priority mask to permit Non-secure interrupts, and to
284 	 * allow the Non-secure world to adjust the priority mask itself
285 	 */
286 #if defined(CFG_ARM_GICV3)
287 	write_icc_pmr(0x80);
288 	write_icc_igrpen1(1);
289 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
290 #else
291 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
292 
293 	/* Enable GIC */
294 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
295 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
296 	io_setbits32(gd->gicd_base + GICD_CTLR,
297 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
298 #endif
299 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
300 
301 	interrupt_main_init(&gic_data.chip);
302 }
303 
304 static void gic_it_add(struct gic_data *gd, size_t it)
305 {
306 	size_t idx = it / NUM_INTS_PER_REG;
307 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
308 
309 	assert(gd == &gic_data);
310 
311 	/* Disable the interrupt */
312 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
313 	/* Make it non-pending */
314 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
315 	/* Assign it to group0 */
316 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
317 #if defined(CFG_ARM_GICV3)
318 	/* Assign it to group1S */
319 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
320 #endif
321 }
322 
323 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
324 				uint8_t cpu_mask)
325 {
326 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
327 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
328 	uint32_t target, target_shift;
329 	vaddr_t itargetsr = gd->gicd_base +
330 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
331 
332 	assert(gd == &gic_data);
333 
334 	/* Assigned to group0 */
335 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
336 
337 	/* Route it to selected CPUs */
338 	target = io_read32(itargetsr);
339 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
340 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
341 	target |= cpu_mask << target_shift;
342 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
343 	io_write32(itargetsr, target);
344 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
345 }
346 
347 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
348 {
349 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
350 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
351 
352 	assert(gd == &gic_data);
353 
354 	/* Assigned to group0 */
355 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
356 
357 	/* Set prio it to selected CPUs */
358 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
359 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
360 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
361 }
362 
363 static void gic_it_enable(struct gic_data *gd, size_t it)
364 {
365 	size_t idx = it / NUM_INTS_PER_REG;
366 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
367 	vaddr_t base = gd->gicd_base;
368 
369 	assert(gd == &gic_data);
370 
371 	/* Assigned to group0 */
372 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
373 
374 	/* Enable the interrupt */
375 	io_write32(base + GICD_ISENABLER(idx), mask);
376 }
377 
378 static void gic_it_disable(struct gic_data *gd, size_t it)
379 {
380 	size_t idx = it / NUM_INTS_PER_REG;
381 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
382 
383 	assert(gd == &gic_data);
384 
385 	/* Assigned to group0 */
386 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
387 
388 	/* Disable the interrupt */
389 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
390 }
391 
392 static void gic_it_set_pending(struct gic_data *gd, size_t it)
393 {
394 	size_t idx = it / NUM_INTS_PER_REG;
395 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
396 
397 	assert(gd == &gic_data);
398 
399 	/* Should be Peripheral Interrupt */
400 	assert(it >= NUM_SGI);
401 
402 	/* Raise the interrupt */
403 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
404 }
405 
406 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
407 {
408 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
409 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
410 	bool __maybe_unused to_list = cpu_mask & 0xff;
411 
412 	/* One and only one of the bit fields shall be non-zero */
413 	assert(to_others + to_current + to_list == 1);
414 }
415 
416 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
417 			     uint32_t cpu_mask, uint8_t group)
418 {
419 #if defined(CFG_ARM_GICV3)
420 	uint32_t mask_id = it & 0xf;
421 	uint64_t mask = SHIFT_U64(mask_id, 24);
422 
423 	assert_cpu_mask_is_valid(cpu_mask);
424 
425 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
426 		mask |= BIT64(GICC_SGI_IRM_BIT);
427 	} else {
428 		uint64_t mpidr = read_mpidr();
429 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
430 				     MPIDR_AFF1_SHIFT;
431 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
432 				     MPIDR_AFF2_SHIFT;
433 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
434 				     MPIDR_AFF3_SHIFT;
435 
436 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
437 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
438 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
439 
440 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
441 			mask |= BIT32(mpidr & 0xf);
442 		} else {
443 			/*
444 			 * Only support sending SGI to the cores in the
445 			 * same cluster now.
446 			 */
447 			mask |= cpu_mask & 0xff;
448 		}
449 	}
450 
451 	/* Raise the interrupt */
452 	if (group)
453 		write_icc_asgi1r(mask);
454 	else
455 		write_icc_sgi1r(mask);
456 #else
457 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
458 	uint32_t mask_group = group & 0x1;
459 	uint32_t mask = mask_id;
460 
461 	assert_cpu_mask_is_valid(cpu_mask);
462 
463 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
464 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
465 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
466 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
467 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
468 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
469 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
470 	} else {
471 		mask |= SHIFT_U32(cpu_mask & 0xff,
472 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
473 	}
474 
475 	/* Raise the interrupt */
476 	io_write32(gd->gicd_base + GICD_SGIR, mask);
477 #endif
478 }
479 
480 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
481 {
482 	assert(gd == &gic_data);
483 
484 #if defined(CFG_ARM_GICV3)
485 	return read_icc_iar1();
486 #else
487 	return io_read32(gd->gicc_base + GICC_IAR);
488 #endif
489 }
490 
491 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
492 {
493 	assert(gd == &gic_data);
494 
495 #if defined(CFG_ARM_GICV3)
496 	write_icc_eoir1(eoir);
497 #else
498 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
499 #endif
500 }
501 
502 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
503 {
504 	size_t idx = it / NUM_INTS_PER_REG;
505 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
506 
507 	assert(gd == &gic_data);
508 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
509 }
510 
511 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
512 {
513 	size_t idx = it / NUM_INTS_PER_REG;
514 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
515 
516 	assert(gd == &gic_data);
517 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
518 }
519 
520 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
521 {
522 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
523 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
524 				ITARGETSR_FIELD_BITS;
525 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
526 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
527 
528 	assert(gd == &gic_data);
529 	return (target & target_mask) >> target_shift;
530 }
531 
532 void gic_dump_state(void)
533 {
534 	struct gic_data *gd = &gic_data;
535 	int i = 0;
536 
537 #if defined(CFG_ARM_GICV3)
538 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
539 #else
540 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
541 #endif
542 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
543 
544 	for (i = 0; i <= (int)gd->max_it; i++) {
545 		if (gic_it_is_enabled(gd, i)) {
546 			DMSG("irq%d: enabled, group:%d, target:%x", i,
547 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
548 		}
549 	}
550 }
551 
552 static void __maybe_unused gic_native_itr_handler(void)
553 {
554 	struct gic_data *gd = &gic_data;
555 	uint32_t iar = 0;
556 	uint32_t id = 0;
557 
558 	iar = gic_read_iar(gd);
559 	id = iar & GICC_IAR_IT_ID_MASK;
560 
561 	if (id <= gd->max_it)
562 		interrupt_call_handlers(&gd->chip, id);
563 	else
564 		DMSG("ignoring interrupt %" PRIu32, id);
565 
566 	gic_write_eoir(gd, iar);
567 }
568 
569 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
570 /* Override interrupt_main_handler() with driver implementation */
571 void interrupt_main_handler(void)
572 {
573 	gic_native_itr_handler();
574 }
575 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
576 
577 static void gic_op_add(struct itr_chip *chip, size_t it,
578 		       uint32_t type __unused,
579 		       uint32_t prio __unused)
580 {
581 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
582 
583 	assert(gd == &gic_data);
584 
585 	if (it > gd->max_it)
586 		panic();
587 
588 	gic_it_add(gd, it);
589 	/* Set the CPU mask to deliver interrupts to any online core */
590 	gic_it_set_cpu_mask(gd, it, 0xff);
591 	gic_it_set_prio(gd, it, 0x1);
592 }
593 
594 static void gic_op_enable(struct itr_chip *chip, size_t it)
595 {
596 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
597 
598 	assert(gd == &gic_data);
599 
600 	if (it > gd->max_it)
601 		panic();
602 
603 	gic_it_enable(gd, it);
604 }
605 
606 static void gic_op_disable(struct itr_chip *chip, size_t it)
607 {
608 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
609 
610 	assert(gd == &gic_data);
611 
612 	if (it > gd->max_it)
613 		panic();
614 
615 	gic_it_disable(gd, it);
616 }
617 
618 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
619 {
620 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
621 
622 	assert(gd == &gic_data);
623 
624 	if (it > gd->max_it)
625 		panic();
626 
627 	gic_it_set_pending(gd, it);
628 }
629 
630 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
631 			     uint32_t cpu_mask)
632 {
633 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
634 
635 	assert(gd == &gic_data);
636 
637 	/* Should be Software Generated Interrupt */
638 	assert(it < NUM_SGI);
639 
640 	if (it > gd->max_it)
641 		panic();
642 
643 	if (it < NUM_NS_SGI)
644 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
645 	else
646 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
647 }
648 
649 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
650 			uint8_t cpu_mask)
651 {
652 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
653 
654 	assert(gd == &gic_data);
655 
656 	if (it > gd->max_it)
657 		panic();
658 
659 	gic_it_set_cpu_mask(gd, it, cpu_mask);
660 }
661 
662 #ifdef CFG_DT
663 /* Callback for "interrupts" and "interrupts-extended" DT node properties */
664 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data,
665 				     struct itr_desc *itr_desc)
666 {
667 	int itr_num = DT_INFO_INVALID_INTERRUPT;
668 	struct itr_chip *chip = priv_data;
669 	uint32_t phandle_args[2] = { };
670 	uint32_t type = 0;
671 	uint32_t prio = 0;
672 
673 	assert(arg && itr_desc);
674 
675 	/*
676 	 * gic_dt_get_irq() expects phandle arguments passed are still in DT
677 	 * format (big-endian) whereas struct dt_pargs carries converted
678 	 * formats. Therefore swap again phandle arguments. gic_dt_get_irq()
679 	 * consumes only the 2 first arguments.
680 	 */
681 	if (arg->args_count < 2)
682 		return TEE_ERROR_GENERIC;
683 	phandle_args[0] = cpu_to_fdt32(arg->args[0]);
684 	phandle_args[1] = cpu_to_fdt32(arg->args[1]);
685 
686 	itr_num = gic_dt_get_irq((const void *)phandle_args, 2, &type, &prio);
687 	if (itr_num == DT_INFO_INVALID_INTERRUPT)
688 		return TEE_ERROR_GENERIC;
689 
690 	gic_op_add(chip, itr_num, type, prio);
691 
692 	itr_desc->chip = chip;
693 	itr_desc->itr_num = itr_num;
694 
695 	return TEE_SUCCESS;
696 }
697 
698 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused)
699 {
700 	if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb,
701 					&gic_data.chip))
702 		panic();
703 
704 	return TEE_SUCCESS;
705 }
706 
707 static const struct dt_device_match gic_match_table[] = {
708 	{ .compatible = "arm,cortex-a15-gic" },
709 	{ .compatible = "arm,cortex-a7-gic" },
710 	{ .compatible = "arm,cortex-a5-gic" },
711 	{ .compatible = "arm,cortex-a9-gic" },
712 	{ .compatible = "arm,gic-400" },
713 	{ }
714 };
715 
716 DEFINE_DT_DRIVER(gic_dt_driver) = {
717 	.name = "gic",
718 	.match_table = gic_match_table,
719 	.probe = gic_probe,
720 };
721 #endif /*CFG_DT*/
722