xref: /optee_os/core/drivers/gic.c (revision 213ecb84c3d41b9d682a932b58ceedf8cf095140)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, 2023-2024 Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2026 Arm Limited
6  */
7 
8 #include <arm.h>
9 #include <assert.h>
10 #include <compiler.h>
11 #include <config.h>
12 #include <drivers/gic.h>
13 #include <dt-bindings/interrupt-controller/arm-gic.h>
14 #include <initcall.h>
15 #include <io.h>
16 #include <keep.h>
17 #include <kernel/dt.h>
18 #include <kernel/dt_driver.h>
19 #include <kernel/interrupt.h>
20 #include <kernel/misc.h>
21 #include <kernel/mutex.h>
22 #include <kernel/panic.h>
23 #include <libfdt.h>
24 #include <mm/core_memprot.h>
25 #include <mm/core_mmu.h>
26 #include <trace.h>
27 #include <util.h>
28 
29 /* Offsets from gic.gicc_base */
30 #define GICC_CTLR		(0x000)
31 #define GICC_PMR		(0x004)
32 #define GICC_IAR		(0x00C)
33 #define GICC_EOIR		(0x010)
34 
35 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
36 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
37 #define GICC_CTLR_FIQEN		(1 << 3)
38 
39 /* Offsets from gic.gicd_base */
40 #define GICD_CTLR		(0x000)
41 #define GICD_TYPER		(0x004)
42 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
43 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
44 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
45 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
46 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
47 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
48 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
49 #define GICD_ICFGR(n)		(0xc00 + (n) * 4)
50 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
51 #define GICD_SGIR		(0xF00)
52 
53 #ifdef _CFG_ARM_V3_OR_V4
54 #define GICD_PIDR2		(0xFFE8)
55 #else
56 /* Called ICPIDR2 in GICv2 specification */
57 #define GICD_PIDR2		(0xFE8)
58 #endif
59 
60 #define GICD_CTLR_ENABLEGRP0	BIT32(0)
61 #define GICD_CTLR_ENABLEGRP1NS	BIT32(1)
62 #define GICD_CTLR_ENABLEGRP1S	BIT32(2)
63 #define GICD_CTLR_ARE_S		BIT32(4)
64 #define GICD_CTLR_ARE_NS	BIT32(5)
65 
66 /* Offsets from gic.gicr_base[core_pos] */
67 #define GICR_RD_BASE_OFFSET         (0x0000)
68 #define GICR_SGI_BASE_OFFSET        (GICR_RD_BASE_OFFSET + GICR_FRAME_SIZE)
69 #define GICR_VLPI_BASE_OFFSET       (GICR_SGI_BASE_OFFSET + GICR_FRAME_SIZE)
70 #define GICR_RESERVED_BASE_OFFSET   (GICR_VLPI_BASE_OFFSET + GICR_FRAME_SIZE)
71 
72 /* GIC physical LPI Redistributor register map */
73 #define GICR_CTLR		(GICR_RD_BASE_OFFSET + 0x0000)
74 #define GICR_TYPER		(GICR_RD_BASE_OFFSET + 0x0008)
75 
76 /* GIC SGI and PPI Redistributor register map */
77 #define GICR_IGROUPR0		(GICR_SGI_BASE_OFFSET + 0x080)
78 #define GICR_ISENABLER0		(GICR_SGI_BASE_OFFSET + 0x100)
79 #define GICR_ICENABLER0		(GICR_SGI_BASE_OFFSET + 0x180)
80 #define GICR_ICPENDR0		(GICR_SGI_BASE_OFFSET + 0x280)
81 #define GICR_IPRIORITYR(n)	(GICR_SGI_BASE_OFFSET + 0x400 + (n) * 4)
82 #define GICR_ICFGR0		(GICR_SGI_BASE_OFFSET + 0xC00)
83 #define GICR_ICFGR1		(GICR_SGI_BASE_OFFSET + 0xC04)
84 #define GICR_IGRPMODR0		(GICR_SGI_BASE_OFFSET + 0xD00)
85 
86 /* GICR_CTLR, Redistributor Control Register bits */
87 #define GICR_CTLR_RWP		BIT32(3)
88 
89 /* GICR_TYPER, Redistributor Type Register bits */
90 #define GICR_TYPER_LAST		BIT64(4)
91 #define GICR_TYPER_AFF3_SHIFT	56
92 #define GICR_TYPER_AFF2_SHIFT	48
93 #define GICR_TYPER_AFF1_SHIFT	40
94 #define GICR_TYPER_AFF0_SHIFT	32
95 
96 /* GICD IDR2 name differs on GICv3 and GICv2 but uses same bit map */
97 #define GICD_PIDR2_ARCHREV_SHIFT	4
98 #define GICD_PIDR2_ARCHREV_MASK		0xF
99 
100 /* Number of Private Peripheral Interrupt */
101 #define NUM_PPI	32
102 
103 /* Number of Software Generated Interrupt */
104 #define NUM_SGI			16
105 
106 /* Number of Non-secure Software Generated Interrupt */
107 #define NUM_NS_SGI		8
108 
109 /* Number of interrupts in one register */
110 #define NUM_INTS_PER_REG	32
111 
112 /* Number of targets in one register */
113 #define NUM_TARGETS_PER_REG	4
114 
115 /* Accessors to access ITARGETSRn */
116 #define ITARGETSR_FIELD_BITS	8
117 #define ITARGETSR_FIELD_MASK	0xff
118 
119 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
120 #define GICC_IAR_IT_ID_MASK	0x3ff
121 #define GICC_IAR_CPU_ID_MASK	0x7
122 #define GICC_IAR_CPU_ID_SHIFT	10
123 
124 #define GICC_SGI_IRM_BIT	40
125 #define GICC_SGI_AFF1_SHIFT	16
126 #define GICC_SGI_AFF2_SHIFT	32
127 #define GICC_SGI_AFF3_SHIFT	48
128 
129 #define GICD_SGIR_SIGINTID_MASK			0xf
130 #define GICD_SGIR_TO_OTHER_CPUS			0x1
131 #define GICD_SGIR_TO_THIS_CPU			0x2
132 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
133 #define GICD_SGIR_NSATT_SHIFT			15
134 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
135 
136 /* GICD ICFGR bit fields */
137 #define GICD_ICFGR_TYPE_EDGE		2
138 #define GICD_ICFGR_TYPE_LEVEL		0
139 #define GICD_ICFGR_FIELD_BITS		2
140 #define GICD_ICFGR_FIELD_MASK		0x3
141 #define GICD_ICFGR_NUM_INTS_PER_REG	(NUM_INTS_PER_REG / \
142 					 GICD_ICFGR_FIELD_BITS)
143 
144 struct gic_data {
145 	vaddr_t gicc_base;
146 	vaddr_t gicd_base;
147 #ifdef _CFG_ARM_V3_OR_V4
148 	vaddr_t gicr_base[CFG_TEE_CORE_NB_CORE];
149 #endif
150 	size_t max_it;
151 	uint32_t per_cpu_group_status;
152 	uint32_t per_cpu_group_modifier;
153 	uint32_t per_cpu_enable;
154 	struct itr_chip chip;
155 };
156 
157 static bool gic_primary_done __nex_bss;
158 static struct gic_data gic_data __nex_bss;
159 static struct mutex gic_mutex = MUTEX_INITIALIZER;
160 
161 static void gic_op_configure(struct itr_chip *chip, size_t it, uint32_t type,
162 			     uint32_t prio);
163 static void gic_op_enable(struct itr_chip *chip, size_t it);
164 static void gic_op_disable(struct itr_chip *chip, size_t it);
165 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
166 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
167 			     uint32_t cpu_mask);
168 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
169 			uint8_t cpu_mask);
170 
171 static const struct itr_ops gic_ops = {
172 	.configure = gic_op_configure,
173 	.mask = gic_op_disable,
174 	.unmask = gic_op_enable,
175 	.enable = gic_op_enable,
176 	.disable = gic_op_disable,
177 	.raise_pi = gic_op_raise_pi,
178 	.raise_sgi = gic_op_raise_sgi,
179 	.set_affinity = gic_op_set_affinity,
180 };
181 DECLARE_KEEP_PAGER(gic_ops);
182 
get_gicr_base(struct gic_data * gd __maybe_unused)183 static vaddr_t __maybe_unused get_gicr_base(struct gic_data *gd __maybe_unused)
184 {
185 #ifdef _CFG_ARM_V3_OR_V4
186 	return gd->gicr_base[get_core_pos()];
187 #else
188 	return 0;
189 #endif
190 }
191 
affinity_routing_is_enabled(struct gic_data * gd)192 static bool affinity_routing_is_enabled(struct gic_data *gd)
193 {
194 	return IS_ENABLED2(_CFG_ARM_V3_OR_V4) &&
195 	       io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S;
196 }
197 
probe_max_it(vaddr_t gicc_base __maybe_unused,vaddr_t gicd_base)198 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
199 {
200 	int i = 0;
201 	uint32_t old_ctlr = 0;
202 	size_t ret = 0;
203 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
204 			  GICD_TYPER_IT_LINES_NUM_MASK;
205 
206 	/*
207 	 * Probe which interrupt number is the largest.
208 	 */
209 #ifdef _CFG_ARM_V3_OR_V4
210 	old_ctlr = read_icc_ctlr();
211 	write_icc_ctlr(0);
212 #else
213 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
214 	io_write32(gicc_base + GICC_CTLR, 0);
215 #endif
216 	for (i = max_regs; i >= 0; i--) {
217 		uint32_t old_reg = 0;
218 		uint32_t reg = 0;
219 		int b = 0;
220 
221 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
222 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
223 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
224 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
225 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
226 			if (BIT32(b) & reg) {
227 				ret = i * NUM_INTS_PER_REG + b;
228 				goto out;
229 			}
230 		}
231 	}
232 out:
233 #ifdef _CFG_ARM_V3_OR_V4
234 	write_icc_ctlr(old_ctlr);
235 #else
236 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
237 #endif
238 	return ret;
239 }
240 
gicr_wait_for_pending_write(vaddr_t gicr_base)241 static void gicr_wait_for_pending_write(vaddr_t gicr_base)
242 {
243 	/*
244 	 * Wait for changes to
245 	 * - GICR_ICENABLER0
246 	 * - GICR_CTLR.DPG1S
247 	 * - GICR_CTLR.DPG1NS
248 	 * - GICR_CTLR.DPG0
249 	 * to be visible to all agents in the system.
250 	 */
251 	while (io_read32(gicr_base + GICR_CTLR) & GICR_CTLR_RWP)
252 		;
253 }
254 
gicv3_sync_redist_config(struct gic_data * gd)255 static void gicv3_sync_redist_config(struct gic_data *gd)
256 {
257 	vaddr_t gicr_base = get_gicr_base(gd);
258 	bool need_sync = false;
259 	uint32_t gmod0 = 0;
260 	uint32_t grp0 = 0;
261 	size_t n = 0;
262 
263 	/*
264 	 * If gicr_base isn't available there's no need to synchronize SGI
265 	 * configuration since gic_init_donate_sgi_to_ns() would panic.
266 	 */
267 	if (!gicr_base)
268 		return;
269 
270 	grp0 = io_read32(gicr_base + GICR_IGROUPR0);
271 	gmod0 = io_read32(gicr_base + GICR_IGRPMODR0);
272 	for (n = GIC_SGI_SEC_BASE; n < GIC_SPI_BASE; n++) {
273 		/* Ignore matching bits */
274 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)) &&
275 		    !(BIT32(n) & (gmod0 ^ gd->per_cpu_group_modifier)))
276 			continue;
277 		/*
278 		 * SGI/PPI-n differs from primary CPU configuration,
279 		 * let's sync up.
280 		 */
281 		need_sync = true;
282 
283 		/* Disable interrupt */
284 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(n));
285 
286 		/* Wait for the write to GICR_ICENABLER0 to propagate */
287 		gicr_wait_for_pending_write(gicr_base);
288 
289 		/* Make interrupt non-pending */
290 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(n));
291 
292 		if (BIT32(n) & gd->per_cpu_group_status)
293 			grp0 |= BIT32(n);
294 		else
295 			grp0 &= ~BIT32(n);
296 		if (BIT32(n) & gd->per_cpu_group_modifier)
297 			gmod0 |= BIT32(n);
298 		else
299 			gmod0 &= ~BIT32(n);
300 	}
301 
302 	if (need_sync) {
303 		io_write32(gicr_base + GICR_IGROUPR0, grp0);
304 		io_write32(gicr_base + GICR_IGRPMODR0, gmod0);
305 		io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable);
306 	}
307 }
308 
gic_legacy_sync_dist_config(struct gic_data * gd)309 static void gic_legacy_sync_dist_config(struct gic_data *gd)
310 {
311 	bool need_sync = false;
312 	uint32_t grp0 = 0;
313 	size_t n = 0;
314 
315 	grp0 = io_read32(gd->gicd_base + GICD_IGROUPR(0));
316 	for (n = GIC_SGI_SEC_BASE; n < GIC_SPI_BASE; n++) {
317 		/* Ignore matching bits */
318 		if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)))
319 			continue;
320 		/*
321 		 * SGI/PPI-n differs from primary CPU configuration,
322 		 * let's sync up.
323 		 */
324 		need_sync = true;
325 
326 		/* Disable interrupt */
327 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(n));
328 
329 		/* Make interrupt non-pending */
330 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(n));
331 
332 		if (BIT32(n) & gd->per_cpu_group_status)
333 			grp0 |= BIT32(n);
334 		else
335 			grp0 &= ~BIT32(n);
336 	}
337 
338 	if (need_sync) {
339 		io_write32(gd->gicd_base + GICD_IGROUPR(0), grp0);
340 		io_write32(gd->gicd_base + GICD_ISENABLER(0),
341 			   gd->per_cpu_enable);
342 	}
343 }
344 
init_gic_per_cpu(struct gic_data * gd)345 static void init_gic_per_cpu(struct gic_data *gd)
346 {
347 	io_write32(gd->gicd_base + GICD_IGROUPR(0), gd->per_cpu_group_status);
348 
349 	/*
350 	 * Set the priority mask to permit Non-secure interrupts, and to
351 	 * allow the Non-secure world to adjust the priority mask itself
352 	 */
353 #ifdef _CFG_ARM_V3_OR_V4
354 	write_icc_pmr(0x80);
355 	write_icc_igrpen1(1);
356 #else
357 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
358 
359 	/* Enable GIC */
360 	io_write32(gd->gicc_base + GICC_CTLR,
361 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
362 		   GICC_CTLR_FIQEN);
363 #endif
364 }
365 
gic_init_per_cpu(void)366 void gic_init_per_cpu(void)
367 {
368 	struct gic_data *gd = &gic_data;
369 
370 #ifdef _CFG_ARM_V3_OR_V4
371 	assert(gd->gicd_base);
372 #else
373 	assert(gd->gicd_base && gd->gicc_base);
374 #endif
375 
376 	if (IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
377 		/*
378 		 * GIC is already initialized by TF-A, we only need to
379 		 * handle eventual SGI or PPI configuration changes.
380 		 */
381 		if (affinity_routing_is_enabled(gd))
382 			gicv3_sync_redist_config(gd);
383 		else
384 			gic_legacy_sync_dist_config(gd);
385 	} else {
386 		/*
387 		 * Non-TF-A case where all CPU specific configuration
388 		 * of GIC must be done here.
389 		 */
390 		init_gic_per_cpu(gd);
391 	}
392 }
393 
gic_init_donate_sgi_to_ns(size_t it)394 void gic_init_donate_sgi_to_ns(size_t it)
395 {
396 	struct gic_data *gd = &gic_data;
397 
398 	assert(it >= GIC_SGI_SEC_BASE && it <= GIC_SGI_SEC_MAX);
399 
400 	/* Assert it's secure to start with. */
401 	assert(!(gd->per_cpu_group_status & BIT32(it)) &&
402 	       (gd->per_cpu_group_modifier & BIT32(it)));
403 
404 	gd->per_cpu_group_modifier &= ~BIT32(it);
405 	gd->per_cpu_group_status |= BIT32(it);
406 
407 	if (affinity_routing_is_enabled(gd)) {
408 		vaddr_t gicr_base = get_gicr_base(gd);
409 
410 		if (!gicr_base)
411 			panic("GICR_BASE missing");
412 
413 		/* Disable interrupt */
414 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(it));
415 
416 		/* Wait for the write to GICR_ICENABLER0 to propagate */
417 		gicr_wait_for_pending_write(gicr_base);
418 
419 		/* Make interrupt non-pending */
420 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(it));
421 
422 		/* Make it to non-secure */
423 		io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status);
424 		io_write32(gicr_base + GICR_IGRPMODR0,
425 			   gd->per_cpu_group_modifier);
426 	} else {
427 		/* Disable interrupt */
428 		io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(it));
429 
430 		/* Make interrupt non-pending */
431 		io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(it));
432 
433 		/* Make it to non-secure */
434 		io_write32(gd->gicd_base + GICD_IGROUPR(0),
435 			   gd->per_cpu_group_status);
436 	}
437 }
438 
gic_dt_get_irq(const uint32_t * properties,int count,uint32_t * type,uint32_t * prio)439 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
440 			  uint32_t *prio)
441 {
442 	int it_num = DT_INFO_INVALID_INTERRUPT;
443 	uint32_t detection_type = IRQ_TYPE_NONE;
444 	uint32_t interrupt_type = GIC_PPI;
445 
446 	if (!properties || count < 2 || count > 3)
447 		return DT_INFO_INVALID_INTERRUPT;
448 
449 	interrupt_type = fdt32_to_cpu(properties[0]);
450 	it_num = (int)fdt32_to_cpu(properties[1]);
451 
452 	if (count == 3) {
453 		detection_type = fdt32_to_cpu(properties[2]) & GENMASK_32(3, 0);
454 		if (interrupt_type == GIC_PPI &&
455 		    detection_type != IRQ_TYPE_EDGE_RISING) {
456 			EMSG("PPI must be edge rising");
457 			return DT_INFO_INVALID_INTERRUPT;
458 		}
459 
460 		if (interrupt_type == GIC_SPI &&
461 		    (detection_type != IRQ_TYPE_EDGE_RISING &&
462 		     detection_type != IRQ_TYPE_LEVEL_HIGH)) {
463 			EMSG("SPI must be edge rising or high level");
464 			return DT_INFO_INVALID_INTERRUPT;
465 		}
466 	}
467 
468 	switch (interrupt_type) {
469 	case GIC_PPI:
470 		it_num += 16;
471 		detection_type = IRQ_TYPE_EDGE_RISING;
472 		break;
473 	case GIC_SPI:
474 		it_num += 32;
475 		break;
476 	default:
477 		return DT_INFO_INVALID_INTERRUPT;
478 	}
479 
480 	if (type)
481 		*type = detection_type;
482 
483 	if (prio)
484 		*prio = 0;
485 
486 	return it_num;
487 }
488 
probe_redist_base_addrs(vaddr_t * gicr_base_addrs,paddr_t gicr_base_pa)489 static void __maybe_unused probe_redist_base_addrs(vaddr_t *gicr_base_addrs,
490 						   paddr_t gicr_base_pa)
491 {
492 	size_t sz = GIC_REDIST_REG_SIZE;
493 	paddr_t pa = gicr_base_pa;
494 	size_t core_pos = 0;
495 	uint64_t mt_bit = 0;
496 	uint64_t mpidr = 0;
497 	uint64_t tv = 0;
498 	vaddr_t va = 0;
499 
500 #ifdef ARM64
501 	mt_bit = read_mpidr_el1() & MPIDR_MT_MASK;
502 #endif
503 	do {
504 		va = core_mmu_get_va(pa, MEM_AREA_IO_SEC, sz);
505 		if (!va)
506 			panic();
507 		tv = io_read64(va + GICR_TYPER);
508 
509 		/*
510 		 * Extract an mpidr from the Type register to calculate the
511 		 * core position of this redistributer instance.
512 		 */
513 		mpidr = mt_bit;
514 		mpidr |= SHIFT_U64((tv >> GICR_TYPER_AFF3_SHIFT) &
515 				   MPIDR_AFFLVL_MASK, MPIDR_AFF3_SHIFT);
516 		mpidr |= (tv >> GICR_TYPER_AFF0_SHIFT) &
517 			 (MPIDR_AFF0_MASK | MPIDR_AFF1_MASK | MPIDR_AFF2_MASK);
518 		core_pos = get_core_pos_mpidr(mpidr);
519 		if (core_pos < CFG_TEE_CORE_NB_CORE) {
520 			DMSG("GICR_BASE[%zu] at %#"PRIxVA, core_pos, va);
521 			gicr_base_addrs[core_pos] = va;
522 		} else {
523 			EMSG("Skipping too large core_pos %zu from GICR_TYPER",
524 			     core_pos);
525 		}
526 		pa += sz;
527 	} while (!(tv & GICR_TYPER_LAST));
528 }
529 
gic_init_base_addr(paddr_t gicc_base_pa,paddr_t gicd_base_pa,paddr_t gicr_base_pa __maybe_unused)530 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
531 			       paddr_t gicr_base_pa __maybe_unused)
532 {
533 	struct gic_data *gd = &gic_data;
534 	vaddr_t gicc_base = 0;
535 	vaddr_t gicd_base = 0;
536 	uint32_t vers __maybe_unused = 0;
537 
538 	assert(cpu_mmu_enabled());
539 
540 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
541 				    GIC_DIST_REG_SIZE);
542 	if (!gicd_base)
543 		panic();
544 
545 	vers = io_read32(gicd_base + GICD_PIDR2);
546 	vers >>= GICD_PIDR2_ARCHREV_SHIFT;
547 	vers &= GICD_PIDR2_ARCHREV_MASK;
548 
549 	if (IS_ENABLED2(_CFG_ARM_V3_OR_V4)) {
550 		assert(vers == 4 || vers == 3);
551 	} else {
552 		assert(vers == 2 || vers == 1);
553 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
554 					    GIC_CPU_REG_SIZE);
555 		if (!gicc_base)
556 			panic();
557 	}
558 
559 	gd->gicc_base = gicc_base;
560 	gd->gicd_base = gicd_base;
561 	gd->max_it = probe_max_it(gicc_base, gicd_base);
562 #ifdef _CFG_ARM_V3_OR_V4
563 	if (affinity_routing_is_enabled(gd) && gicr_base_pa)
564 		probe_redist_base_addrs(gd->gicr_base, gicr_base_pa);
565 #endif
566 	gd->chip.ops = &gic_ops;
567 
568 	if (IS_ENABLED(CFG_DT))
569 		gd->chip.dt_get_irq = gic_dt_get_irq;
570 }
571 
gic_init_v3(paddr_t gicc_base_pa,paddr_t gicd_base_pa,paddr_t gicr_base_pa)572 void gic_init_v3(paddr_t gicc_base_pa, paddr_t gicd_base_pa,
573 		 paddr_t gicr_base_pa)
574 {
575 	struct gic_data __maybe_unused *gd = &gic_data;
576 	size_t __maybe_unused n = 0;
577 
578 	gic_init_base_addr(gicc_base_pa, gicd_base_pa, gicr_base_pa);
579 
580 #if defined(CFG_WITH_ARM_TRUSTED_FW)
581 	/* GIC configuration is initialized from TF-A when embedded */
582 	if (affinity_routing_is_enabled(gd)) {
583 		/* Secure affinity routing enabled */
584 		vaddr_t gicr_base = get_gicr_base(gd);
585 
586 		if (gicr_base) {
587 			gd->per_cpu_group_status = io_read32(gicr_base +
588 							     GICR_IGROUPR0);
589 			gd->per_cpu_group_modifier = io_read32(gicr_base +
590 							       GICR_IGRPMODR0);
591 		} else {
592 			IMSG("GIC redistributor base address not provided");
593 			IMSG("Assuming default GIC group status and modifier");
594 			gd->per_cpu_group_status = 0xffff00ff;
595 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
596 		}
597 	} else {
598 		/* Legacy operation with secure affinity routing disabled */
599 		gd->per_cpu_group_status = io_read32(gd->gicd_base +
600 						     GICD_IGROUPR(0));
601 		gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
602 	}
603 #else /*!CFG_WITH_ARM_TRUSTED_FW*/
604 	/*
605 	 * Without TF-A, GIC is always configured in for legacy operation
606 	 * with secure affinity routing disabled.
607 	 */
608 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
609 		/* Disable interrupts */
610 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
611 
612 		/* Make interrupts non-pending */
613 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
614 
615 		/* Mark interrupts non-secure */
616 		if (n == 0) {
617 			/* per-CPU inerrupts config:
618 			 * ID0-ID7(SGI)	  for Non-secure interrupts
619 			 * ID8-ID15(SGI)  for Secure interrupts.
620 			 * All PPI config as Non-secure interrupts.
621 			 */
622 			gd->per_cpu_group_status = 0xffff00ff;
623 			gd->per_cpu_group_modifier = ~gd->per_cpu_group_status;
624 			io_write32(gd->gicd_base + GICD_IGROUPR(n),
625 				   gd->per_cpu_group_status);
626 		} else {
627 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
628 		}
629 	}
630 
631 	/* Set the priority mask to permit Non-secure interrupts, and to
632 	 * allow the Non-secure world to adjust the priority mask itself
633 	 */
634 #ifdef _CFG_ARM_V3_OR_V4
635 	write_icc_pmr(0x80);
636 	write_icc_igrpen1(1);
637 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
638 #else
639 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
640 
641 	/* Enable GIC */
642 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
643 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
644 	io_setbits32(gd->gicd_base + GICD_CTLR,
645 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1NS);
646 #endif
647 #endif /*!CFG_WITH_ARM_TRUSTED_FW*/
648 
649 	interrupt_main_init(&gic_data.chip);
650 }
651 
gic_it_configure(struct gic_data * gd,size_t it)652 static void gic_it_configure(struct gic_data *gd, size_t it)
653 {
654 	size_t idx = it / NUM_INTS_PER_REG;
655 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
656 
657 	assert(gd == &gic_data);
658 
659 	/* Disable the interrupt */
660 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
661 	/* Make it non-pending */
662 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
663 	/* Assign it to group0 */
664 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
665 #ifdef _CFG_ARM_V3_OR_V4
666 	/* Assign it to group1S */
667 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
668 #endif
669 }
670 
gic_it_set_cpu_mask(struct gic_data * gd,size_t it,uint8_t cpu_mask)671 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
672 				uint8_t cpu_mask)
673 {
674 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
675 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
676 	uint32_t target = 0;
677 	uint32_t target_shift = 0;
678 	vaddr_t itargetsr = gd->gicd_base +
679 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
680 
681 	assert(gd == &gic_data);
682 
683 	/* Assigned to group0 */
684 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
685 
686 	/* Route it to selected CPUs */
687 	target = io_read32(itargetsr);
688 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
689 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
690 	target |= cpu_mask << target_shift;
691 	DMSG("cpu_mask: writing %#"PRIx32" to %#" PRIxVA, target, itargetsr);
692 	io_write32(itargetsr, target);
693 	DMSG("cpu_mask: %#"PRIx32, io_read32(itargetsr));
694 }
695 
gic_it_set_prio(struct gic_data * gd,size_t it,uint8_t prio)696 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
697 {
698 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
699 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
700 
701 	assert(gd == &gic_data);
702 
703 	/* Assigned to group0 */
704 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
705 
706 	/* Set prio it to selected CPUs */
707 	DMSG("prio: writing %#"PRIx8" to %#" PRIxVA,
708 	     prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
709 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
710 }
711 
gic_it_set_type(struct gic_data * gd,size_t it,uint32_t type)712 static void gic_it_set_type(struct gic_data *gd, size_t it, uint32_t type)
713 {
714 	size_t index = it / GICD_ICFGR_NUM_INTS_PER_REG;
715 	uint32_t shift = (it % GICD_ICFGR_NUM_INTS_PER_REG) *
716 			 GICD_ICFGR_FIELD_BITS;
717 	uint32_t icfg = 0;
718 
719 	assert(type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH);
720 
721 	if (type == IRQ_TYPE_EDGE_RISING)
722 		icfg = GICD_ICFGR_TYPE_EDGE;
723 	else
724 		icfg = GICD_ICFGR_TYPE_LEVEL;
725 
726 	io_mask32(gd->gicd_base + GICD_ICFGR(index),
727 		  SHIFT_U32(icfg, shift),
728 		  SHIFT_U32(GICD_ICFGR_FIELD_MASK, shift));
729 }
730 
gic_it_enable(struct gic_data * gd,size_t it)731 static void gic_it_enable(struct gic_data *gd, size_t it)
732 {
733 	size_t idx = it / NUM_INTS_PER_REG;
734 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
735 	vaddr_t base = gd->gicd_base;
736 
737 	assert(gd == &gic_data);
738 
739 	/* Assigned to group0 */
740 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
741 
742 	/* Enable the interrupt */
743 	io_write32(base + GICD_ISENABLER(idx), mask);
744 }
745 
gic_it_disable(struct gic_data * gd,size_t it)746 static void gic_it_disable(struct gic_data *gd, size_t it)
747 {
748 	size_t idx = it / NUM_INTS_PER_REG;
749 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
750 
751 	assert(gd == &gic_data);
752 
753 	/* Assigned to group0 */
754 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
755 
756 	/* Disable the interrupt */
757 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
758 }
759 
gic_it_set_pending(struct gic_data * gd,size_t it)760 static void gic_it_set_pending(struct gic_data *gd, size_t it)
761 {
762 	size_t idx = it / NUM_INTS_PER_REG;
763 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
764 
765 	assert(gd == &gic_data);
766 
767 	/* Should be Peripheral Interrupt */
768 	assert(it >= NUM_SGI);
769 
770 	/* Raise the interrupt */
771 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
772 }
773 
assert_cpu_mask_is_valid(uint32_t cpu_mask)774 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
775 {
776 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
777 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
778 	bool __maybe_unused to_list = cpu_mask & 0xff;
779 
780 	/* One and only one of the bit fields shall be non-zero */
781 	assert(to_others + to_current + to_list == 1);
782 }
783 
gic_it_raise_sgi(struct gic_data * gd __maybe_unused,size_t it,uint32_t cpu_mask,bool ns)784 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
785 			     uint32_t cpu_mask, bool ns)
786 {
787 #ifdef _CFG_ARM_V3_OR_V4
788 	uint32_t mask_id = it & 0xf;
789 	uint64_t mask = SHIFT_U64(mask_id, 24);
790 
791 	assert_cpu_mask_is_valid(cpu_mask);
792 
793 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
794 		mask |= BIT64(GICC_SGI_IRM_BIT);
795 	} else {
796 		uint64_t mpidr = read_mpidr();
797 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
798 				     MPIDR_AFF1_SHIFT;
799 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
800 				     MPIDR_AFF2_SHIFT;
801 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
802 				     MPIDR_AFF3_SHIFT;
803 
804 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
805 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
806 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
807 
808 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
809 			mask |= BIT32(mpidr & 0xf);
810 		} else {
811 			/*
812 			 * Only support sending SGI to the cores in the
813 			 * same cluster now.
814 			 */
815 			mask |= cpu_mask & 0xff;
816 		}
817 	}
818 
819 	/* Raise the interrupt */
820 	if (ns)
821 		write_icc_asgi1r(mask);
822 	else
823 		write_icc_sgi1r(mask);
824 #else
825 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
826 	uint32_t mask_group = ns;
827 	uint32_t mask = mask_id;
828 
829 	assert_cpu_mask_is_valid(cpu_mask);
830 
831 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
832 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
833 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
834 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
835 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
836 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
837 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
838 	} else {
839 		mask |= SHIFT_U32(cpu_mask & 0xff,
840 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
841 	}
842 
843 	/* Raise the interrupt */
844 	io_write32(gd->gicd_base + GICD_SGIR, mask);
845 #endif
846 }
847 
gic_read_iar(struct gic_data * gd __maybe_unused)848 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
849 {
850 	assert(gd == &gic_data);
851 
852 #ifdef _CFG_ARM_V3_OR_V4
853 	return read_icc_iar1();
854 #else
855 	return io_read32(gd->gicc_base + GICC_IAR);
856 #endif
857 }
858 
gic_write_eoir(struct gic_data * gd __maybe_unused,uint32_t eoir)859 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
860 {
861 	assert(gd == &gic_data);
862 
863 #ifdef _CFG_ARM_V3_OR_V4
864 	write_icc_eoir1(eoir);
865 #else
866 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
867 #endif
868 }
869 
gic_it_is_enabled(struct gic_data * gd,size_t it)870 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
871 {
872 	size_t idx = it / NUM_INTS_PER_REG;
873 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
874 
875 	assert(gd == &gic_data);
876 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
877 }
878 
gic_it_get_group(struct gic_data * gd,size_t it)879 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
880 {
881 	size_t idx = it / NUM_INTS_PER_REG;
882 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
883 
884 	assert(gd == &gic_data);
885 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
886 }
887 
gic_it_get_target(struct gic_data * gd,size_t it)888 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
889 {
890 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
891 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
892 				ITARGETSR_FIELD_BITS;
893 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
894 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
895 
896 	assert(gd == &gic_data);
897 	return (target & target_mask) >> target_shift;
898 }
899 
gic_dump_state(void)900 void gic_dump_state(void)
901 {
902 	struct gic_data *gd = &gic_data;
903 	int i = 0;
904 
905 #ifdef _CFG_ARM_V3_OR_V4
906 	DMSG("GICC_CTLR: %#"PRIx32, read_icc_ctlr());
907 #else
908 	DMSG("GICC_CTLR: %#"PRIx32, io_read32(gd->gicc_base + GICC_CTLR));
909 #endif
910 	DMSG("GICD_CTLR: %#"PRIx32, io_read32(gd->gicd_base + GICD_CTLR));
911 
912 	for (i = 0; i <= (int)gd->max_it; i++) {
913 		if (gic_it_is_enabled(gd, i)) {
914 			DMSG("irq%d: enabled, group:%d, target:%#"PRIx32, i,
915 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
916 		}
917 	}
918 }
919 
gic_spi_release_to_ns(size_t it)920 TEE_Result gic_spi_release_to_ns(size_t it)
921 {
922 	struct gic_data *gd = &gic_data;
923 	size_t idx = it / NUM_INTS_PER_REG;
924 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
925 
926 	if (it >= gd->max_it || it < GIC_SPI_BASE)
927 		return TEE_ERROR_BAD_PARAMETERS;
928 	/* Make sure it's already disabled */
929 	if (!gic_it_is_enabled(gd, it))
930 		return TEE_ERROR_BAD_STATE;
931 	/* Assert it's secure to start with */
932 	if (!gic_it_get_group(gd, it))
933 		return TEE_ERROR_BAD_STATE;
934 
935 	mutex_lock(&gic_mutex);
936 	gic_it_set_cpu_mask(gd, it, 0);
937 	gic_it_set_prio(gd, it, GIC_SPI_PRI_NS_EL1);
938 
939 	/* Clear pending status */
940 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
941 	/* Assign it to NS Group1 */
942 	io_setbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
943 #ifdef _CFG_ARM_V3_OR_V4
944 	io_clrbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
945 #endif
946 	mutex_unlock(&gic_mutex);
947 	return TEE_SUCCESS;
948 }
949 
gic_native_itr_handler(void)950 static void __maybe_unused gic_native_itr_handler(void)
951 {
952 	struct gic_data *gd = &gic_data;
953 	uint32_t iar = 0;
954 	uint32_t id = 0;
955 
956 	iar = gic_read_iar(gd);
957 	id = iar & GICC_IAR_IT_ID_MASK;
958 
959 	if (id >= 1020 && id <= 1023) {
960 		/*
961 		 * Special INTIDs
962 		 * 1020: Interrupt expected to be handled at SEL1 or SEL2.
963 		 *       PE (Processing Element) is either executing at EL3
964 		 *       in AArch64 state or in monitor mode in AArch32 state.
965 		 *       Reserved on GIC V1 and GIC V2.
966 		 * 1021: Interrupt expected to be handled at NSEL1 or NSEL2
967 		 *       PE (Processing Element) is either executing at EL3
968 		 *       in AArch64 state or in monitor mode in AArch32 state.
969 		 *       Reserved on GIC V1 and GIC V2.
970 		 * 1022: -(GICv3.3): Interrupt is an NMI
971 		 *       -(Legacy): Group 1 interrupt to be signaled to the
972 		 *        PE and acknowledged using alias registers. Reserved if
973 		 *        interrupt grouping is not supported.
974 		 * 1023: No pending interrupt with sufficient priority
975 		 *       (spurious) or the highest priority pending interrupt is
976 		 *       not appropriate for the current security state or
977 		 *       interrupt group.
978 		 */
979 		DMSG("Special interrupt %"PRIu32, id);
980 
981 		return;
982 	}
983 
984 	if (id <= gd->max_it)
985 		interrupt_call_handlers(&gd->chip, id);
986 	else
987 		EMSG("Unhandled interrupt %"PRIu32, id);
988 
989 	gic_write_eoir(gd, iar);
990 }
991 
992 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
993 /* Override interrupt_main_handler() with driver implementation */
interrupt_main_handler(void)994 void interrupt_main_handler(void)
995 {
996 	gic_native_itr_handler();
997 }
998 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
999 
gic_op_configure(struct itr_chip * chip,size_t it,uint32_t type,uint32_t prio __unused)1000 static void gic_op_configure(struct itr_chip *chip, size_t it,
1001 			     uint32_t type, uint32_t prio __unused)
1002 {
1003 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1004 
1005 	assert(gd == &gic_data);
1006 
1007 	if (it > gd->max_it)
1008 		panic();
1009 
1010 	if (it < GIC_SPI_BASE) {
1011 		if (gic_primary_done)
1012 			panic("Cannot add SGI or PPI after boot");
1013 
1014 		/* Assign it to Secure Group 1, G1S */
1015 		gd->per_cpu_group_modifier |= BIT32(it);
1016 		gd->per_cpu_group_status &= ~BIT32(it);
1017 	}
1018 
1019 	if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) {
1020 		vaddr_t gicr_base = get_gicr_base(gd);
1021 
1022 		if (!gicr_base)
1023 			panic("GICR_BASE missing");
1024 
1025 		/* Disable interrupt */
1026 		io_write32(gicr_base + GICR_ICENABLER0, BIT32(it));
1027 
1028 		/* Wait for the write to GICR_ICENABLER0 to propagate */
1029 		gicr_wait_for_pending_write(gicr_base);
1030 
1031 		/* Make interrupt non-pending */
1032 		io_write32(gicr_base + GICR_ICPENDR0, BIT32(it));
1033 
1034 		/* Make it to Secure */
1035 		io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status);
1036 		io_write32(gicr_base + GICR_IGRPMODR0,
1037 			   gd->per_cpu_group_modifier);
1038 	} else {
1039 		gic_it_configure(gd, it);
1040 		/* Set the CPU mask to deliver interrupts to any online core */
1041 		gic_it_set_cpu_mask(gd, it, 0xff);
1042 		gic_it_set_prio(gd, it, 0x1);
1043 		if (type != IRQ_TYPE_NONE)
1044 			gic_it_set_type(gd, it, type);
1045 	}
1046 }
1047 
gic_op_enable(struct itr_chip * chip,size_t it)1048 static void gic_op_enable(struct itr_chip *chip, size_t it)
1049 {
1050 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1051 
1052 	assert(gd == &gic_data);
1053 
1054 	if (it > gd->max_it)
1055 		panic();
1056 
1057 	if (it < GIC_SPI_BASE)
1058 		gd->per_cpu_enable |= BIT(it);
1059 
1060 	if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) {
1061 		vaddr_t gicr_base = get_gicr_base(gd);
1062 
1063 		if (!gicr_base)
1064 			panic("GICR_BASE missing");
1065 
1066 		/* Assigned to G1S */
1067 		assert(gd->per_cpu_group_modifier & BIT(it) &&
1068 		       !(gd->per_cpu_group_status & BIT(it)));
1069 		io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable);
1070 	} else {
1071 		gic_it_enable(gd, it);
1072 	}
1073 }
1074 
gic_op_disable(struct itr_chip * chip,size_t it)1075 static void gic_op_disable(struct itr_chip *chip, size_t it)
1076 {
1077 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1078 
1079 	assert(gd == &gic_data);
1080 
1081 	if (it > gd->max_it)
1082 		panic();
1083 
1084 	gic_it_disable(gd, it);
1085 }
1086 
gic_op_raise_pi(struct itr_chip * chip,size_t it)1087 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
1088 {
1089 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1090 
1091 	assert(gd == &gic_data);
1092 
1093 	if (it > gd->max_it)
1094 		panic();
1095 
1096 	gic_it_set_pending(gd, it);
1097 }
1098 
gic_op_raise_sgi(struct itr_chip * chip,size_t it,uint32_t cpu_mask)1099 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
1100 			     uint32_t cpu_mask)
1101 {
1102 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1103 	bool ns = false;
1104 
1105 	assert(gd == &gic_data);
1106 
1107 	/* Should be Software Generated Interrupt */
1108 	assert(it < NUM_SGI);
1109 
1110 	ns = BIT32(it) & gd->per_cpu_group_status;
1111 	gic_it_raise_sgi(gd, it, cpu_mask, ns);
1112 }
1113 
gic_op_set_affinity(struct itr_chip * chip,size_t it,uint8_t cpu_mask)1114 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
1115 			uint8_t cpu_mask)
1116 {
1117 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
1118 
1119 	assert(gd == &gic_data);
1120 
1121 	if (it > gd->max_it)
1122 		panic();
1123 
1124 	gic_it_set_cpu_mask(gd, it, cpu_mask);
1125 }
1126 
1127 #ifdef CFG_DT
1128 /* Callback for "interrupts" and "interrupts-extended" DT node properties */
dt_get_gic_chip_cb(struct dt_pargs * arg,void * priv_data,struct itr_desc * itr_desc)1129 static TEE_Result dt_get_gic_chip_cb(struct dt_pargs *arg, void *priv_data,
1130 				     struct itr_desc *itr_desc)
1131 {
1132 	int itr_num = DT_INFO_INVALID_INTERRUPT;
1133 	struct itr_chip *chip = priv_data;
1134 	uint32_t phandle_args[3] = { };
1135 	uint32_t type = 0;
1136 	uint32_t prio = 0;
1137 
1138 	assert(arg && itr_desc);
1139 
1140 	/*
1141 	 * gic_dt_get_irq() expects phandle arguments passed are still in DT
1142 	 * format (big-endian) whereas struct dt_pargs carries converted
1143 	 * formats. Therefore swap again phandle arguments. gic_dt_get_irq()
1144 	 * consumes only the 2 first arguments.
1145 	 */
1146 	if (arg->args_count < 2)
1147 		return TEE_ERROR_GENERIC;
1148 
1149 	phandle_args[0] = cpu_to_fdt32(arg->args[0]);
1150 	phandle_args[1] = cpu_to_fdt32(arg->args[1]);
1151 	if (arg->args_count >= 3)
1152 		phandle_args[2] = cpu_to_fdt32(arg->args[2]);
1153 
1154 	itr_num = gic_dt_get_irq((const void *)phandle_args, arg->args_count,
1155 				 &type, &prio);
1156 	if (itr_num == DT_INFO_INVALID_INTERRUPT)
1157 		return TEE_ERROR_GENERIC;
1158 
1159 	gic_op_configure(chip, itr_num, type, prio);
1160 
1161 	itr_desc->chip = chip;
1162 	itr_desc->itr_num = itr_num;
1163 
1164 	return TEE_SUCCESS;
1165 }
1166 
gic_probe(const void * fdt,int offs,const void * cd __unused)1167 static TEE_Result gic_probe(const void *fdt, int offs, const void *cd __unused)
1168 {
1169 	if (interrupt_register_provider(fdt, offs, dt_get_gic_chip_cb,
1170 					&gic_data.chip))
1171 		panic();
1172 
1173 	return TEE_SUCCESS;
1174 }
1175 
1176 static const struct dt_device_match gic_match_table[] = {
1177 	{ .compatible = "arm,cortex-a15-gic" },
1178 	{ .compatible = "arm,cortex-a7-gic" },
1179 	{ .compatible = "arm,cortex-a5-gic" },
1180 	{ .compatible = "arm,cortex-a9-gic" },
1181 	{ .compatible = "arm,gic-400" },
1182 	{ }
1183 };
1184 
1185 DEFINE_DT_DRIVER(gic_dt_driver) = {
1186 	.name = "gic",
1187 	.match_table = gic_match_table,
1188 	.probe = gic_probe,
1189 };
1190 #endif /*CFG_DT*/
1191 
gic_set_primary_done(void)1192 static TEE_Result gic_set_primary_done(void)
1193 {
1194 	gic_primary_done = true;
1195 	return TEE_SUCCESS;
1196 }
1197 
1198 nex_release_init_resource(gic_set_primary_done);
1199