xref: /optee_os/core/drivers/gic.c (revision 5da157f55e4ae09227454c134be945bc73929dbc)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, 2023 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <dt-bindings/interrupt-controller/arm-gic.h>
10 #include <compiler.h>
11 #include <config.h>
12 #include <drivers/gic.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/dt.h>
16 #include <kernel/dt_driver.h>
17 #include <kernel/interrupt.h>
18 #include <kernel/misc.h>
19 #include <kernel/panic.h>
20 #include <libfdt.h>
21 #include <mm/core_memprot.h>
22 #include <mm/core_mmu.h>
23 #include <trace.h>
24 #include <util.h>
25 
26 /* Offsets from gic.gicc_base */
27 #define GICC_CTLR		(0x000)
28 #define GICC_PMR		(0x004)
29 #define GICC_IAR		(0x00C)
30 #define GICC_EOIR		(0x010)
31 
32 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
33 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
34 #define GICC_CTLR_FIQEN		(1 << 3)
35 
36 /* Offsets from gic.gicd_base */
37 #define GICD_CTLR		(0x000)
38 #define GICD_TYPER		(0x004)
39 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
40 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
41 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
42 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
43 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
44 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
45 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
46 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
47 #define GICD_SGIR		(0xF00)
48 
49 #ifdef CFG_ARM_GICV3
50 #define GICD_PIDR2		(0xFFE8)
51 #else
52 /* Called ICPIDR2 in GICv2 specification */
53 #define GICD_PIDR2		(0xFE8)
54 #endif
55 
56 #define GICD_CTLR_ENABLEGRP0	BIT32(0)
57 #define GICD_CTLR_ENABLEGRP1NS	BIT32(1)
58 #define GICD_CTLR_ENABLEGRP1S	BIT32(2)
59 #define GICD_CTLR_ARE_S		BIT32(4)
60 #define GICD_CTLR_ARE_NS	BIT32(5)
61 
62 /* Offsets from gic.gicr_base[core_pos] */
63 #define GICR_V3_PCPUBASE_SIZE	(2 * 64 * 1024)
64 #define GICR_SGI_BASE_OFFSET	(64 * 1024)
65 #define GICR_CTLR		(0x00)
66 #define GICR_TYPER		(0x08)
67 
68 #define GICR_IGROUPR0		(GICR_SGI_BASE_OFFSET + 0x080)
69 #define GICR_IGRPMODR0		(GICR_SGI_BASE_OFFSET + 0xD00)
70 
71 #define GICR_TYPER_LAST		BIT64(4)
72 #define GICR_TYPER_AFF3_SHIFT	56
73 #define GICR_TYPER_AFF2_SHIFT	48
74 #define GICR_TYPER_AFF1_SHIFT	40
75 #define GICR_TYPER_AFF0_SHIFT	32
76 
77 /* GICD IDR2 name differs on GICv3 and GICv2 but uses same bit map */
78 #define GICD_PIDR2_ARCHREV_SHIFT	4
79 #define GICD_PIDR2_ARCHREV_MASK		0xF
80 
81 /* Number of Private Peripheral Interrupt */
82 #define NUM_PPI	32
83 
84 /* Number of Software Generated Interrupt */
85 #define NUM_SGI			16
86 
87 /* Number of Non-secure Software Generated Interrupt */
88 #define NUM_NS_SGI		8
89 
90 /* Number of interrupts in one register */
91 #define NUM_INTS_PER_REG	32
92 
93 /* Number of targets in one register */
94 #define NUM_TARGETS_PER_REG	4
95 
96 /* Accessors to access ITARGETSRn */
97 #define ITARGETSR_FIELD_BITS	8
98 #define ITARGETSR_FIELD_MASK	0xff
99 
100 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
101 #define GICC_IAR_IT_ID_MASK	0x3ff
102 #define GICC_IAR_CPU_ID_MASK	0x7
103 #define GICC_IAR_CPU_ID_SHIFT	10
104 
105 #define GICC_SGI_IRM_BIT	40
106 #define GICC_SGI_AFF1_SHIFT	16
107 #define GICC_SGI_AFF2_SHIFT	32
108 #define GICC_SGI_AFF3_SHIFT	48
109 
110 #define GICD_SGIR_SIGINTID_MASK			0xf
111 #define GICD_SGIR_TO_OTHER_CPUS			0x1
112 #define GICD_SGIR_TO_THIS_CPU			0x2
113 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
114 #define GICD_SGIR_NSATT_SHIFT			15
115 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
116 
117 struct gic_data {
118 	vaddr_t gicc_base;
119 	vaddr_t gicd_base;
120 #if defined(CFG_ARM_GICV3)
121 	vaddr_t gicr_base[CFG_TEE_CORE_NB_CORE];
122 #endif
123 	size_t max_it;
124 	uint32_t per_cpu_group_status;
125 	uint32_t per_cpu_group_modifier;
126 	struct itr_chip chip;
127 };
128 
129 static struct gic_data gic_data __nex_bss;
130 
131 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
132 		       uint32_t prio);
133 static void gic_op_enable(struct itr_chip *chip, size_t it);
134 static void gic_op_disable(struct itr_chip *chip, size_t it);
135 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
136 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
137 			     uint32_t cpu_mask);
138 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
139 			uint8_t cpu_mask);
140 
141 static const struct itr_ops gic_ops = {
142 	.add = gic_op_add,
143 	.mask = gic_op_disable,
144 	.unmask = gic_op_enable,
145 	.enable = gic_op_enable,
146 	.disable = gic_op_disable,
147 	.raise_pi = gic_op_raise_pi,
148 	.raise_sgi = gic_op_raise_sgi,
149 	.set_affinity = gic_op_set_affinity,
150 };
151 DECLARE_KEEP_PAGER(gic_ops);
152 
153 static vaddr_t __maybe_unused get_gicr_base(struct gic_data *gd __maybe_unused)
154 {
155 #if defined(CFG_ARM_GICV3)
156 	return gd->gicr_base[get_core_pos()];
157 #else
158 	return 0;
159 #endif
160 }
161 
162 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
163 {
164 	int i;
165 	uint32_t old_ctlr;
166 	size_t ret = 0;
167 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
168 			  GICD_TYPER_IT_LINES_NUM_MASK;
169 
170 	/*
171 	 * Probe which interrupt number is the largest.
172 	 */
173 #if defined(CFG_ARM_GICV3)
174 	old_ctlr = read_icc_ctlr();
175 	write_icc_ctlr(0);
176 #else
177 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
178 	io_write32(gicc_base + GICC_CTLR, 0);
179 #endif
180 	for (i = max_regs; i >= 0; i--) {
181 		uint32_t old_reg;
182 		uint32_t reg;
183 		int b;
184 
185 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
186 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
187 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
188 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
189 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
190 			if (BIT32(b) & reg) {
191 				ret = i * NUM_INTS_PER_REG + b;
192 				goto out;
193 			}
194 		}
195 	}
196 out:
197 #if defined(CFG_ARM_GICV3)
198 	write_icc_ctlr(old_ctlr);
199 #else
200 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
201 #endif
202 	return ret;
203 }
204 
205 static void init_gic_per_cpu(struct gic_data *gd)
206 {
207 	io_write32(gd->gicd_base + GICD_IGROUPR(0), gd->per_cpu_group_status);
208 
209 	/*
210 	 * Set the priority mask to permit Non-secure interrupts, and to
211 	 * allow the Non-secure world to adjust the priority mask itself
212 	 */
213 #if defined(CFG_ARM_GICV3)
214 	write_icc_pmr(0x80);
215 	write_icc_igrpen1(1);
216 #else
217 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
218 
219 	/* Enable GIC */
220 	io_write32(gd->gicc_base + GICC_CTLR,
221 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
222 		   GICC_CTLR_FIQEN);
223 #endif
224 }
225 
226 void gic_init_per_cpu(void)
227 {
228 	struct gic_data *gd = &gic_data;
229 
230 #if defined(CFG_ARM_GICV3)
231 	assert(gd->gicd_base);
232 #else
233 	assert(gd->gicd_base && gd->gicc_base);
234 #endif
235 
236 	/* GIC is already configured in TF-A configurations */
237 	if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW))
238 		init_gic_per_cpu(gd);
239 }
240 
241 void gic_cpu_init(void)
242 {
243 	struct gic_data *gd = &gic_data;
244 
245 #if defined(CFG_ARM_GICV3)
246 	assert(gd->gicd_base);
247 #else
248 	assert(gd->gicd_base && gd->gicc_base);
249 #endif
250 	IMSG("%s is deprecated, please use gic_init_per_cpu()", __func__);
251 
252 	init_gic_per_cpu(gd);
253 }
254 
255 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
256 			  uint32_t *prio)
257 {
258 	int it_num = DT_INFO_INVALID_INTERRUPT;
259 
260 	if (type)
261 		*type = IRQ_TYPE_NONE;
262 
263 	if (prio)
264 		*prio = 0;
265 
266 	if (!properties || count < 2)
267 		return DT_INFO_INVALID_INTERRUPT;
268 
269 	it_num = fdt32_to_cpu(properties[1]);
270 
271 	switch (fdt32_to_cpu(properties[0])) {
272 	case GIC_PPI:
273 		it_num += 16;
274 		break;
275 	case GIC_SPI:
276 		it_num += 32;
277 		break;
278 	default:
279 		it_num = DT_INFO_INVALID_INTERRUPT;
280 	}
281 
282 	return it_num;
283 }
284 
285 static void __maybe_unused probe_redist_base_addrs(vaddr_t *gicr_base_addrs,
286 						   paddr_t gicr_base_pa)
287 {
288 	size_t sz = GICR_V3_PCPUBASE_SIZE;
289 	paddr_t pa = gicr_base_pa;
290 	size_t core_pos = 0;
291 	uint64_t mt_bit = 0;
292 	uint64_t mpidr = 0;
293 	uint64_t tv = 0;
294 	vaddr_t va = 0;
295 
296 #ifdef ARM64
297 	mt_bit = read_mpidr_el1() & MPIDR_MT_MASK;
298 #endif
299 	do {
300 		va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz);
301 		if (!va)
302 			panic();
303 		tv = io_read64(va + GICR_TYPER);
304 
305 		/*
306 		 * Extract an mpidr from the Type register to calculate the
307 		 * core position of this redistributer instance.
308 		 */
309 		mpidr = mt_bit;
310 		mpidr |= SHIFT_U64((tv >> GICR_TYPER_AFF3_SHIFT) &
311 				   MPIDR_AFFLVL_MASK, MPIDR_AFF3_SHIFT);
312 		mpidr |= (tv >> GICR_TYPER_AFF0_SHIFT) &
313 			 (MPIDR_AFF0_MASK | MPIDR_AFF1_MASK | MPIDR_AFF2_MASK);
314 		core_pos = get_core_pos_mpidr(mpidr);
315 		if (core_pos < CFG_TEE_CORE_NB_CORE) {
316 			DMSG("GICR_BASE[%zu] at %#"PRIxVA, core_pos, va);
317 			gicr_base_addrs[core_pos] = va;
318 		} else {
319 			EMSG("Skipping too large core_pos %zu from GICR_TYPER",
320 			     core_pos);
321 		}
322 		pa += sz;
323 	} while (!(tv & GICR_TYPER_LAST));
324 }
325 
326 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
327 			       paddr_t gicr_base_pa __maybe_unused)
328 {
329 	struct gic_data *gd = &gic_data;
330 	vaddr_t gicc_base = 0;
331 	vaddr_t gicd_base = 0;
332 	uint32_t vers __maybe_unused = 0;
333 
334 	assert(cpu_mmu_enabled());
335 
336 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
337 				    GIC_DIST_REG_SIZE);
338 	if (!gicd_base)
339 		panic();
340 
341 	vers = io_read32(gicd_base + GICD_PIDR2);
342 	vers >>= GICD_PIDR2_ARCHREV_SHIFT;
343 	vers &= GICD_PIDR2_ARCHREV_MASK;
344 
345 	if (IS_ENABLED(CFG_ARM_GICV3)) {
346 		assert(vers == 3);
347 	} else {
348 		assert(vers == 2);
349 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
350 					    GIC_CPU_REG_SIZE);
351 		if (!gicc_base)
352 			panic();
353 	}
354 
355 	gd->gicc_base = gicc_base;
356 	gd->gicd_base = gicd_base;
357 	gd->max_it = probe_max_it(gicc_base, gicd_base);
358 #if defined(CFG_ARM_GICV3)
359 	probe_redist_base_addrs(gd->gicr_base, gicr_base_pa);
360 #endif
361 	gd->chip.ops = &gic_ops;
362 
363 	if (IS_ENABLED(CFG_DT))
364 		gd->chip.dt_get_irq = gic_dt_get_irq;
365 }
366 
367 void gic_init_v3(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
368 		 paddr_t gicr_base_pa)
369 {
370 	struct gic_data __maybe_unused *gd = &gic_data;
371 	size_t __maybe_unused n = 0;
372 
373 	gic_init_base_addr(gicc_base_pa, gicd_base_pa, gicr_base_pa);
374 
375 #if defined(CFG_WITH_ARM_TRUSTED_FW)
376 	/* GIC configuration is initialized from TF-A when embedded */
377 	if (io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S) {
378 		vaddr_t gicr_base = get_gicr_base(gd);
379 
380 		if (!gicr_base)
381 			panic("GICR_BASE missing for affinity routing");
382 		/* Secure affinity routing enabled */
383 		gd->per_cpu_group_status = io_read32(gicr_base + GICR_IGROUPR0);
384 		gd->per_cpu_group_modifier = io_read32(gicr_base +
385 						       GICR_IGRPMODR0);
386 	} else {
387 		/* Legacy operation with secure affinity routing disabled */
388 		gd->per_cpu_group_status = io_read32(gd->gicd_base +
389 						     GICD_IGROUPR(0));
390 		gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
391 	}
392 #else /*!CFG_WITH_ARM_TRUSTED_FW*/
393 	/*
394 	 * Without TF-A, GIC is always configured in for legacy operation
395 	 * with secure affinity routing disabled.
396 	 */
397 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
398 		/* Disable interrupts */
399 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
400 
401 		/* Make interrupts non-pending */
402 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
403 
404 		/* Mark interrupts non-secure */
405 		if (n == 0) {
406 			/* per-CPU inerrupts config:
407 			 * ID0-ID7(SGI)	  for Non-secure interrupts
408 			 * ID8-ID15(SGI)  for Secure interrupts.
409 			 * All PPI config as Non-secure interrupts.
410 			 */
411 			gd->per_cpu_group_status = 0xffff00ff;
412 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
413 			io_write32(gd->gicd_base + GICD_IGROUPR(n),
414 				   gd->per_cpu_group_status);
415 		} else {
416 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
417 		}
418 	}
419 
420 	/* Set the priority mask to permit Non-secure interrupts, and to
421 	 * allow the Non-secure world to adjust the priority mask itself
422 	 */
423 #if defined(CFG_ARM_GICV3)
424 	write_icc_pmr(0x80);
425 	write_icc_igrpen1(1);
426 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
427 #else
428 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
429 
430 	/* Enable GIC */
431 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
432 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
433 	io_setbits32(gd->gicd_base + GICD_CTLR,
434 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1NS);
435 #endif
436 #endif /*!CFG_WITH_ARM_TRUSTED_FW*/
437 
438 	interrupt_main_init(&gic_data.chip);
439 }
440 
441 static void gic_it_add(struct gic_data *gd, size_t it)
442 {
443 	size_t idx = it / NUM_INTS_PER_REG;
444 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
445 
446 	assert(gd == &gic_data);
447 
448 	/* Disable the interrupt */
449 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
450 	/* Make it non-pending */
451 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
452 	/* Assign it to group0 */
453 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
454 #if defined(CFG_ARM_GICV3)
455 	/* Assign it to group1S */
456 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
457 #endif
458 }
459 
460 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
461 				uint8_t cpu_mask)
462 {
463 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
464 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
465 	uint32_t target, target_shift;
466 	vaddr_t itargetsr = gd->gicd_base +
467 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
468 
469 	assert(gd == &gic_data);
470 
471 	/* Assigned to group0 */
472 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
473 
474 	/* Route it to selected CPUs */
475 	target = io_read32(itargetsr);
476 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
477 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
478 	target |= cpu_mask << target_shift;
479 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
480 	io_write32(itargetsr, target);
481 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
482 }
483 
484 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
485 {
486 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
487 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
488 
489 	assert(gd == &gic_data);
490 
491 	/* Assigned to group0 */
492 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
493 
494 	/* Set prio it to selected CPUs */
495 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
496 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
497 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
498 }
499 
500 static void gic_it_enable(struct gic_data *gd, size_t it)
501 {
502 	size_t idx = it / NUM_INTS_PER_REG;
503 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
504 	vaddr_t base = gd->gicd_base;
505 
506 	assert(gd == &gic_data);
507 
508 	/* Assigned to group0 */
509 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
510 
511 	/* Enable the interrupt */
512 	io_write32(base + GICD_ISENABLER(idx), mask);
513 }
514 
515 static void gic_it_disable(struct gic_data *gd, size_t it)
516 {
517 	size_t idx = it / NUM_INTS_PER_REG;
518 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
519 
520 	assert(gd == &gic_data);
521 
522 	/* Assigned to group0 */
523 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
524 
525 	/* Disable the interrupt */
526 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
527 }
528 
529 static void gic_it_set_pending(struct gic_data *gd, size_t it)
530 {
531 	size_t idx = it / NUM_INTS_PER_REG;
532 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
533 
534 	assert(gd == &gic_data);
535 
536 	/* Should be Peripheral Interrupt */
537 	assert(it >= NUM_SGI);
538 
539 	/* Raise the interrupt */
540 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
541 }
542 
543 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
544 {
545 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
546 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
547 	bool __maybe_unused to_list = cpu_mask & 0xff;
548 
549 	/* One and only one of the bit fields shall be non-zero */
550 	assert(to_others + to_current + to_list == 1);
551 }
552 
553 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
554 			     uint32_t cpu_mask, uint8_t group)
555 {
556 #if defined(CFG_ARM_GICV3)
557 	uint32_t mask_id = it & 0xf;
558 	uint64_t mask = SHIFT_U64(mask_id, 24);
559 
560 	assert_cpu_mask_is_valid(cpu_mask);
561 
562 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
563 		mask |= BIT64(GICC_SGI_IRM_BIT);
564 	} else {
565 		uint64_t mpidr = read_mpidr();
566 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
567 				     MPIDR_AFF1_SHIFT;
568 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
569 				     MPIDR_AFF2_SHIFT;
570 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
571 				     MPIDR_AFF3_SHIFT;
572 
573 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
574 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
575 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
576 
577 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
578 			mask |= BIT32(mpidr & 0xf);
579 		} else {
580 			/*
581 			 * Only support sending SGI to the cores in the
582 			 * same cluster now.
583 			 */
584 			mask |= cpu_mask & 0xff;
585 		}
586 	}
587 
588 	/* Raise the interrupt */
589 	if (group)
590 		write_icc_asgi1r(mask);
591 	else
592 		write_icc_sgi1r(mask);
593 #else
594 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
595 	uint32_t mask_group = group & 0x1;
596 	uint32_t mask = mask_id;
597 
598 	assert_cpu_mask_is_valid(cpu_mask);
599 
600 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
601 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
602 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
603 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
604 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
605 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
606 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
607 	} else {
608 		mask |= SHIFT_U32(cpu_mask & 0xff,
609 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
610 	}
611 
612 	/* Raise the interrupt */
613 	io_write32(gd->gicd_base + GICD_SGIR, mask);
614 #endif
615 }
616 
617 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
618 {
619 	assert(gd == &gic_data);
620 
621 #if defined(CFG_ARM_GICV3)
622 	return read_icc_iar1();
623 #else
624 	return io_read32(gd->gicc_base + GICC_IAR);
625 #endif
626 }
627 
628 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
629 {
630 	assert(gd == &gic_data);
631 
632 #if defined(CFG_ARM_GICV3)
633 	write_icc_eoir1(eoir);
634 #else
635 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
636 #endif
637 }
638 
639 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
640 {
641 	size_t idx = it / NUM_INTS_PER_REG;
642 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
643 
644 	assert(gd == &gic_data);
645 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
646 }
647 
648 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
649 {
650 	size_t idx = it / NUM_INTS_PER_REG;
651 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
652 
653 	assert(gd == &gic_data);
654 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
655 }
656 
657 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
658 {
659 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
660 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
661 				ITARGETSR_FIELD_BITS;
662 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
663 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
664 
665 	assert(gd == &gic_data);
666 	return (target & target_mask) >> target_shift;
667 }
668 
669 void gic_dump_state(void)
670 {
671 	struct gic_data *gd = &gic_data;
672 	int i = 0;
673 
674 #if defined(CFG_ARM_GICV3)
675 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
676 #else
677 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
678 #endif
679 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
680 
681 	for (i = 0; i <= (int)gd->max_it; i++) {
682 		if (gic_it_is_enabled(gd, i)) {
683 			DMSG("irq%d: enabled, group:%d, target:%x", i,
684 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
685 		}
686 	}
687 }
688 
689 static void __maybe_unused gic_native_itr_handler(void)
690 {
691 	struct gic_data *gd = &gic_data;
692 	uint32_t iar = 0;
693 	uint32_t id = 0;
694 
695 	iar = gic_read_iar(gd);
696 	id = iar & GICC_IAR_IT_ID_MASK;
697 
698 	if (id <= gd->max_it)
699 		interrupt_call_handlers(&gd->chip, id);
700 	else
701 		DMSG("ignoring interrupt %" PRIu32, id);
702 
703 	gic_write_eoir(gd, iar);
704 }
705 
706 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
707 /* Override interrupt_main_handler() with driver implementation */
708 void interrupt_main_handler(void)
709 {
710 	gic_native_itr_handler();
711 }
712 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
713 
714 static void gic_op_add(struct itr_chip *chip, size_t it,
715 		       uint32_t type __unused,
716 		       uint32_t prio __unused)
717 {
718 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
719 
720 	assert(gd == &gic_data);
721 
722 	if (it > gd->max_it)
723 		panic();
724 
725 	gic_it_add(gd, it);
726 	/* Set the CPU mask to deliver interrupts to any online core */
727 	gic_it_set_cpu_mask(gd, it, 0xff);
728 	gic_it_set_prio(gd, it, 0x1);
729 }
730 
731 static void gic_op_enable(struct itr_chip *chip, size_t it)
732 {
733 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
734 
735 	assert(gd == &gic_data);
736 
737 	if (it > gd->max_it)
738 		panic();
739 
740 	gic_it_enable(gd, it);
741 }
742 
743 static void gic_op_disable(struct itr_chip *chip, size_t it)
744 {
745 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
746 
747 	assert(gd == &gic_data);
748 
749 	if (it > gd->max_it)
750 		panic();
751 
752 	gic_it_disable(gd, it);
753 }
754 
755 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
756 {
757 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
758 
759 	assert(gd == &gic_data);
760 
761 	if (it > gd->max_it)
762 		panic();
763 
764 	gic_it_set_pending(gd, it);
765 }
766 
767 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
768 			     uint32_t cpu_mask)
769 {
770 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
771 
772 	assert(gd == &gic_data);
773 
774 	/* Should be Software Generated Interrupt */
775 	assert(it < NUM_SGI);
776 
777 	if (it < NUM_NS_SGI)
778 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
779 	else
780 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
781 }
782 
783 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
784 			uint8_t cpu_mask)
785 {
786 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
787 
788 	assert(gd == &gic_data);
789 
790 	if (it > gd->max_it)
791 		panic();
792 
793 	gic_it_set_cpu_mask(gd, it, cpu_mask);
794 }
795 
796 #ifdef CFG_DT
797 /* Callback for "interrupts" and "interrupts-extended" DT node properties */
798 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data,
799 				     struct itr_desc *itr_desc)
800 {
801 	int itr_num = DT_INFO_INVALID_INTERRUPT;
802 	struct itr_chip *chip = priv_data;
803 	uint32_t phandle_args[2] = { };
804 	uint32_t type = 0;
805 	uint32_t prio = 0;
806 
807 	assert(arg && itr_desc);
808 
809 	/*
810 	 * gic_dt_get_irq() expects phandle arguments passed are still in DT
811 	 * format (big-endian) whereas struct dt_pargs carries converted
812 	 * formats. Therefore swap again phandle arguments. gic_dt_get_irq()
813 	 * consumes only the 2 first arguments.
814 	 */
815 	if (arg->args_count < 2)
816 		return TEE_ERROR_GENERIC;
817 	phandle_args[0] = cpu_to_fdt32(arg->args[0]);
818 	phandle_args[1] = cpu_to_fdt32(arg->args[1]);
819 
820 	itr_num = gic_dt_get_irq((const void *)phandle_args, 2, &type, &prio);
821 	if (itr_num == DT_INFO_INVALID_INTERRUPT)
822 		return TEE_ERROR_GENERIC;
823 
824 	gic_op_add(chip, itr_num, type, prio);
825 
826 	itr_desc->chip = chip;
827 	itr_desc->itr_num = itr_num;
828 
829 	return TEE_SUCCESS;
830 }
831 
832 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused)
833 {
834 	if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb,
835 					&gic_data.chip))
836 		panic();
837 
838 	return TEE_SUCCESS;
839 }
840 
841 static const struct dt_device_match gic_match_table[] = {
842 	{ .compatible = "arm,cortex-a15-gic" },
843 	{ .compatible = "arm,cortex-a7-gic" },
844 	{ .compatible = "arm,cortex-a5-gic" },
845 	{ .compatible = "arm,cortex-a9-gic" },
846 	{ .compatible = "arm,gic-400" },
847 	{ }
848 };
849 
850 DEFINE_DT_DRIVER(gic_dt_driver) = {
851 	.name = "gic",
852 	.match_table = gic_match_table,
853 	.probe = gic_probe,
854 };
855 #endif /*CFG_DT*/
856