xref: /optee_os/core/drivers/gic.c (revision 14885eb1688bb0826c53522d4c3d99ef9c461f25)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <config.h>
10 #include <compiler.h>
11 #include <drivers/gic.h>
12 #include <keep.h>
13 #include <kernel/dt.h>
14 #include <kernel/dt_driver.h>
15 #include <kernel/interrupt.h>
16 #include <kernel/panic.h>
17 #include <mm/core_memprot.h>
18 #include <mm/core_mmu.h>
19 #include <libfdt.h>
20 #include <util.h>
21 #include <io.h>
22 #include <trace.h>
23 
24 /* Offsets from gic.gicc_base */
25 #define GICC_CTLR		(0x000)
26 #define GICC_PMR		(0x004)
27 #define GICC_IAR		(0x00C)
28 #define GICC_EOIR		(0x010)
29 
30 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
31 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
32 #define GICD_CTLR_ENABLEGRP1S	(1 << 2)
33 #define GICC_CTLR_FIQEN		(1 << 3)
34 
35 /* Offsets from gic.gicd_base */
36 #define GICD_CTLR		(0x000)
37 #define GICD_TYPER		(0x004)
38 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
39 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
40 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
41 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
42 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
43 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
44 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
45 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
46 #define GICD_SGIR		(0xF00)
47 
48 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
49 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
50 
51 /* Number of Private Peripheral Interrupt */
52 #define NUM_PPI	32
53 
54 /* Number of Software Generated Interrupt */
55 #define NUM_SGI			16
56 
57 /* Number of Non-secure Software Generated Interrupt */
58 #define NUM_NS_SGI		8
59 
60 /* Number of interrupts in one register */
61 #define NUM_INTS_PER_REG	32
62 
63 /* Number of targets in one register */
64 #define NUM_TARGETS_PER_REG	4
65 
66 /* Accessors to access ITARGETSRn */
67 #define ITARGETSR_FIELD_BITS	8
68 #define ITARGETSR_FIELD_MASK	0xff
69 
70 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
71 #define GICC_IAR_IT_ID_MASK	0x3ff
72 #define GICC_IAR_CPU_ID_MASK	0x7
73 #define GICC_IAR_CPU_ID_SHIFT	10
74 
75 #define GICC_SGI_IRM_BIT	40
76 #define GICC_SGI_AFF1_SHIFT	16
77 #define GICC_SGI_AFF2_SHIFT	32
78 #define GICC_SGI_AFF3_SHIFT	48
79 
80 #define GICD_SGIR_SIGINTID_MASK			0xf
81 #define GICD_SGIR_TO_OTHER_CPUS			0x1
82 #define GICD_SGIR_TO_THIS_CPU			0x2
83 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
84 #define GICD_SGIR_NSATT_SHIFT			15
85 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
86 
87 struct gic_data {
88 	vaddr_t gicc_base;
89 	vaddr_t gicd_base;
90 	size_t max_it;
91 	struct itr_chip chip;
92 };
93 
94 static struct gic_data gic_data __nex_bss;
95 
96 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
97 		       uint32_t prio);
98 static void gic_op_enable(struct itr_chip *chip, size_t it);
99 static void gic_op_disable(struct itr_chip *chip, size_t it);
100 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
101 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
102 			     uint32_t cpu_mask);
103 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
104 			uint8_t cpu_mask);
105 
106 static const struct itr_ops gic_ops = {
107 	.add = gic_op_add,
108 	.mask = gic_op_disable,
109 	.unmask = gic_op_enable,
110 	.enable = gic_op_enable,
111 	.disable = gic_op_disable,
112 	.raise_pi = gic_op_raise_pi,
113 	.raise_sgi = gic_op_raise_sgi,
114 	.set_affinity = gic_op_set_affinity,
115 };
116 DECLARE_KEEP_PAGER(gic_ops);
117 
118 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
119 {
120 	int i;
121 	uint32_t old_ctlr;
122 	size_t ret = 0;
123 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
124 			  GICD_TYPER_IT_LINES_NUM_MASK;
125 
126 	/*
127 	 * Probe which interrupt number is the largest.
128 	 */
129 #if defined(CFG_ARM_GICV3)
130 	old_ctlr = read_icc_ctlr();
131 	write_icc_ctlr(0);
132 #else
133 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
134 	io_write32(gicc_base + GICC_CTLR, 0);
135 #endif
136 	for (i = max_regs; i >= 0; i--) {
137 		uint32_t old_reg;
138 		uint32_t reg;
139 		int b;
140 
141 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
142 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
143 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
144 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
145 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
146 			if (BIT32(b) & reg) {
147 				ret = i * NUM_INTS_PER_REG + b;
148 				goto out;
149 			}
150 		}
151 	}
152 out:
153 #if defined(CFG_ARM_GICV3)
154 	write_icc_ctlr(old_ctlr);
155 #else
156 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
157 #endif
158 	return ret;
159 }
160 
161 void gic_cpu_init(void)
162 {
163 	struct gic_data *gd = &gic_data;
164 
165 #if defined(CFG_ARM_GICV3)
166 	assert(gd->gicd_base);
167 #else
168 	assert(gd->gicd_base && gd->gicc_base);
169 #endif
170 
171 	/* per-CPU interrupts config:
172 	 * ID0-ID7(SGI)   for Non-secure interrupts
173 	 * ID8-ID15(SGI)  for Secure interrupts.
174 	 * All PPI config as Non-secure interrupts.
175 	 */
176 	io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
177 
178 	/* Set the priority mask to permit Non-secure interrupts, and to
179 	 * allow the Non-secure world to adjust the priority mask itself
180 	 */
181 #if defined(CFG_ARM_GICV3)
182 	write_icc_pmr(0x80);
183 	write_icc_igrpen1(1);
184 #else
185 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
186 
187 	/* Enable GIC */
188 	io_write32(gd->gicc_base + GICC_CTLR,
189 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
190 		   GICC_CTLR_FIQEN);
191 #endif
192 }
193 
194 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
195 			  uint32_t *prio)
196 {
197 	int it_num = DT_INFO_INVALID_INTERRUPT;
198 
199 	if (type)
200 		*type = IRQ_TYPE_NONE;
201 
202 	if (prio)
203 		*prio = 0;
204 
205 	if (!properties || count < 2)
206 		return DT_INFO_INVALID_INTERRUPT;
207 
208 	it_num = fdt32_to_cpu(properties[1]);
209 
210 	switch (fdt32_to_cpu(properties[0])) {
211 	case 1:
212 		it_num += 16;
213 		break;
214 	case 0:
215 		it_num += 32;
216 		break;
217 	default:
218 		it_num = DT_INFO_INVALID_INTERRUPT;
219 	}
220 
221 	return it_num;
222 }
223 
224 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
225 {
226 	struct gic_data *gd = &gic_data;
227 	vaddr_t gicc_base = 0;
228 	vaddr_t gicd_base = 0;
229 
230 	assert(cpu_mmu_enabled());
231 
232 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
233 				    GIC_DIST_REG_SIZE);
234 	if (!gicd_base)
235 		panic();
236 
237 	if (!IS_ENABLED(CFG_ARM_GICV3)) {
238 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
239 					    GIC_CPU_REG_SIZE);
240 		if (!gicc_base)
241 			panic();
242 	}
243 
244 	gd->gicc_base = gicc_base;
245 	gd->gicd_base = gicd_base;
246 	gd->max_it = probe_max_it(gicc_base, gicd_base);
247 	gd->chip.ops = &gic_ops;
248 
249 	if (IS_ENABLED(CFG_DT))
250 		gd->chip.dt_get_irq = gic_dt_get_irq;
251 }
252 
253 void gic_init(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
254 {
255 	struct gic_data __maybe_unused *gd = &gic_data;
256 	size_t __maybe_unused n = 0;
257 
258 	gic_init_base_addr(gicc_base_pa, gicd_base_pa);
259 
260 	/* GIC configuration is initialized from TF-A when embedded */
261 #ifndef CFG_WITH_ARM_TRUSTED_FW
262 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
263 		/* Disable interrupts */
264 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
265 
266 		/* Make interrupts non-pending */
267 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
268 
269 		/* Mark interrupts non-secure */
270 		if (n == 0) {
271 			/* per-CPU inerrupts config:
272 			 * ID0-ID7(SGI)	  for Non-secure interrupts
273 			 * ID8-ID15(SGI)  for Secure interrupts.
274 			 * All PPI config as Non-secure interrupts.
275 			 */
276 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
277 		} else {
278 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
279 		}
280 	}
281 
282 	/* Set the priority mask to permit Non-secure interrupts, and to
283 	 * allow the Non-secure world to adjust the priority mask itself
284 	 */
285 #if defined(CFG_ARM_GICV3)
286 	write_icc_pmr(0x80);
287 	write_icc_igrpen1(1);
288 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
289 #else
290 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
291 
292 	/* Enable GIC */
293 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
294 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
295 	io_setbits32(gd->gicd_base + GICD_CTLR,
296 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
297 #endif
298 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
299 
300 	interrupt_main_init(&gic_data.chip);
301 }
302 
303 static void gic_it_add(struct gic_data *gd, size_t it)
304 {
305 	size_t idx = it / NUM_INTS_PER_REG;
306 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
307 
308 	assert(gd == &gic_data);
309 
310 	/* Disable the interrupt */
311 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
312 	/* Make it non-pending */
313 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
314 	/* Assign it to group0 */
315 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
316 #if defined(CFG_ARM_GICV3)
317 	/* Assign it to group1S */
318 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
319 #endif
320 }
321 
322 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
323 				uint8_t cpu_mask)
324 {
325 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
326 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
327 	uint32_t target, target_shift;
328 	vaddr_t itargetsr = gd->gicd_base +
329 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
330 
331 	assert(gd == &gic_data);
332 
333 	/* Assigned to group0 */
334 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
335 
336 	/* Route it to selected CPUs */
337 	target = io_read32(itargetsr);
338 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
339 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
340 	target |= cpu_mask << target_shift;
341 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
342 	io_write32(itargetsr, target);
343 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
344 }
345 
346 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
347 {
348 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
349 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
350 
351 	assert(gd == &gic_data);
352 
353 	/* Assigned to group0 */
354 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
355 
356 	/* Set prio it to selected CPUs */
357 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
358 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
359 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
360 }
361 
362 static void gic_it_enable(struct gic_data *gd, size_t it)
363 {
364 	size_t idx = it / NUM_INTS_PER_REG;
365 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
366 	vaddr_t base = gd->gicd_base;
367 
368 	assert(gd == &gic_data);
369 
370 	/* Assigned to group0 */
371 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
372 
373 	/* Enable the interrupt */
374 	io_write32(base + GICD_ISENABLER(idx), mask);
375 }
376 
377 static void gic_it_disable(struct gic_data *gd, size_t it)
378 {
379 	size_t idx = it / NUM_INTS_PER_REG;
380 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
381 
382 	assert(gd == &gic_data);
383 
384 	/* Assigned to group0 */
385 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
386 
387 	/* Disable the interrupt */
388 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
389 }
390 
391 static void gic_it_set_pending(struct gic_data *gd, size_t it)
392 {
393 	size_t idx = it / NUM_INTS_PER_REG;
394 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
395 
396 	assert(gd == &gic_data);
397 
398 	/* Should be Peripheral Interrupt */
399 	assert(it >= NUM_SGI);
400 
401 	/* Raise the interrupt */
402 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
403 }
404 
405 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
406 {
407 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
408 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
409 	bool __maybe_unused to_list = cpu_mask & 0xff;
410 
411 	/* One and only one of the bit fields shall be non-zero */
412 	assert(to_others + to_current + to_list == 1);
413 }
414 
415 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
416 			     uint32_t cpu_mask, uint8_t group)
417 {
418 #if defined(CFG_ARM_GICV3)
419 	uint32_t mask_id = it & 0xf;
420 	uint64_t mask = SHIFT_U64(mask_id, 24);
421 
422 	assert_cpu_mask_is_valid(cpu_mask);
423 
424 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
425 		mask |= BIT64(GICC_SGI_IRM_BIT);
426 	} else {
427 		uint64_t mpidr = read_mpidr();
428 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
429 				     MPIDR_AFF1_SHIFT;
430 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
431 				     MPIDR_AFF2_SHIFT;
432 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
433 				     MPIDR_AFF3_SHIFT;
434 
435 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
436 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
437 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
438 
439 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
440 			mask |= BIT32(mpidr & 0xf);
441 		} else {
442 			/*
443 			 * Only support sending SGI to the cores in the
444 			 * same cluster now.
445 			 */
446 			mask |= cpu_mask & 0xff;
447 		}
448 	}
449 
450 	/* Raise the interrupt */
451 	if (group)
452 		write_icc_asgi1r(mask);
453 	else
454 		write_icc_sgi1r(mask);
455 #else
456 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
457 	uint32_t mask_group = group & 0x1;
458 	uint32_t mask = mask_id;
459 
460 	assert_cpu_mask_is_valid(cpu_mask);
461 
462 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
463 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
464 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
465 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
466 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
467 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
468 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
469 	} else {
470 		mask |= SHIFT_U32(cpu_mask & 0xff,
471 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
472 	}
473 
474 	/* Raise the interrupt */
475 	io_write32(gd->gicd_base + GICD_SGIR, mask);
476 #endif
477 }
478 
479 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
480 {
481 	assert(gd == &gic_data);
482 
483 #if defined(CFG_ARM_GICV3)
484 	return read_icc_iar1();
485 #else
486 	return io_read32(gd->gicc_base + GICC_IAR);
487 #endif
488 }
489 
490 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
491 {
492 	assert(gd == &gic_data);
493 
494 #if defined(CFG_ARM_GICV3)
495 	write_icc_eoir1(eoir);
496 #else
497 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
498 #endif
499 }
500 
501 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
502 {
503 	size_t idx = it / NUM_INTS_PER_REG;
504 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
505 
506 	assert(gd == &gic_data);
507 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
508 }
509 
510 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
511 {
512 	size_t idx = it / NUM_INTS_PER_REG;
513 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
514 
515 	assert(gd == &gic_data);
516 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
517 }
518 
519 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
520 {
521 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
522 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
523 				ITARGETSR_FIELD_BITS;
524 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
525 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
526 
527 	assert(gd == &gic_data);
528 	return (target & target_mask) >> target_shift;
529 }
530 
531 void gic_dump_state(void)
532 {
533 	struct gic_data *gd = &gic_data;
534 	int i = 0;
535 
536 #if defined(CFG_ARM_GICV3)
537 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
538 #else
539 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
540 #endif
541 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
542 
543 	for (i = 0; i <= (int)gd->max_it; i++) {
544 		if (gic_it_is_enabled(gd, i)) {
545 			DMSG("irq%d: enabled, group:%d, target:%x", i,
546 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
547 		}
548 	}
549 }
550 
551 static void __maybe_unused gic_native_itr_handler(void)
552 {
553 	struct gic_data *gd = &gic_data;
554 	uint32_t iar = 0;
555 	uint32_t id = 0;
556 
557 	iar = gic_read_iar(gd);
558 	id = iar & GICC_IAR_IT_ID_MASK;
559 
560 	if (id <= gd->max_it)
561 		interrupt_call_handlers(&gd->chip, id);
562 	else
563 		DMSG("ignoring interrupt %" PRIu32, id);
564 
565 	gic_write_eoir(gd, iar);
566 }
567 
568 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
569 /* Override interrupt_main_handler() with driver implementation */
570 void interrupt_main_handler(void)
571 {
572 	gic_native_itr_handler();
573 }
574 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
575 
576 static void gic_op_add(struct itr_chip *chip, size_t it,
577 		       uint32_t type __unused,
578 		       uint32_t prio __unused)
579 {
580 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
581 
582 	assert(gd == &gic_data);
583 
584 	if (it > gd->max_it)
585 		panic();
586 
587 	gic_it_add(gd, it);
588 	/* Set the CPU mask to deliver interrupts to any online core */
589 	gic_it_set_cpu_mask(gd, it, 0xff);
590 	gic_it_set_prio(gd, it, 0x1);
591 }
592 
593 static void gic_op_enable(struct itr_chip *chip, size_t it)
594 {
595 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
596 
597 	assert(gd == &gic_data);
598 
599 	if (it > gd->max_it)
600 		panic();
601 
602 	gic_it_enable(gd, it);
603 }
604 
605 static void gic_op_disable(struct itr_chip *chip, size_t it)
606 {
607 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
608 
609 	assert(gd == &gic_data);
610 
611 	if (it > gd->max_it)
612 		panic();
613 
614 	gic_it_disable(gd, it);
615 }
616 
617 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
618 {
619 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
620 
621 	assert(gd == &gic_data);
622 
623 	if (it > gd->max_it)
624 		panic();
625 
626 	gic_it_set_pending(gd, it);
627 }
628 
629 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
630 			     uint32_t cpu_mask)
631 {
632 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
633 
634 	assert(gd == &gic_data);
635 
636 	/* Should be Software Generated Interrupt */
637 	assert(it < NUM_SGI);
638 
639 	if (it > gd->max_it)
640 		panic();
641 
642 	if (it < NUM_NS_SGI)
643 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
644 	else
645 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
646 }
647 
648 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
649 			uint8_t cpu_mask)
650 {
651 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
652 
653 	assert(gd == &gic_data);
654 
655 	if (it > gd->max_it)
656 		panic();
657 
658 	gic_it_set_cpu_mask(gd, it, cpu_mask);
659 }
660 
661 #ifdef CFG_DT
662 /* Callback for "interrupts" and "interrupts-extended" DT node properties */
663 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data,
664 				     struct itr_desc *itr_desc)
665 {
666 	int itr_num = DT_INFO_INVALID_INTERRUPT;
667 	struct itr_chip *chip = priv_data;
668 	uint32_t phandle_args[2] = { };
669 	uint32_t type = 0;
670 	uint32_t prio = 0;
671 
672 	assert(arg && itr_desc);
673 
674 	/*
675 	 * gic_dt_get_irq() expects phandle arguments passed are still in DT
676 	 * format (big-endian) whereas struct dt_pargs carries converted
677 	 * formats. Therefore swap again phandle arguments. gic_dt_get_irq()
678 	 * consumes only the 2 first arguments.
679 	 */
680 	if (arg->args_count < 2)
681 		return TEE_ERROR_GENERIC;
682 	phandle_args[0] = cpu_to_fdt32(arg->args[0]);
683 	phandle_args[1] = cpu_to_fdt32(arg->args[1]);
684 
685 	itr_num = gic_dt_get_irq((const void *)phandle_args, 2, &type, &prio);
686 	if (itr_num == DT_INFO_INVALID_INTERRUPT)
687 		return TEE_ERROR_GENERIC;
688 
689 	gic_op_add(chip, itr_num, type, prio);
690 
691 	itr_desc->chip = chip;
692 	itr_desc->itr_num = itr_num;
693 
694 	return TEE_SUCCESS;
695 }
696 
697 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused)
698 {
699 	if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb,
700 					&gic_data.chip))
701 		panic();
702 
703 	return TEE_SUCCESS;
704 }
705 
706 static const struct dt_device_match gic_match_table[] = {
707 	{ .compatible = "arm,cortex-a15-gic" },
708 	{ .compatible = "arm,cortex-a7-gic" },
709 	{ .compatible = "arm,cortex-a5-gic" },
710 	{ .compatible = "arm,cortex-a9-gic" },
711 	{ .compatible = "arm,gic-400" },
712 	{ }
713 };
714 
715 DEFINE_DT_DRIVER(gic_dt_driver) = {
716 	.name = "gic",
717 	.match_table = gic_match_table,
718 	.probe = gic_probe,
719 };
720 #endif /*CFG_DT*/
721