xref: /optee_os/core/drivers/gic.c (revision bce2f88ab347b28f4149dacef2ad48ac67a500b6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <config.h>
10 #include <compiler.h>
11 #include <drivers/gic.h>
12 #include <keep.h>
13 #include <kernel/dt.h>
14 #include <kernel/interrupt.h>
15 #include <kernel/panic.h>
16 #include <mm/core_memprot.h>
17 #include <mm/core_mmu.h>
18 #include <libfdt.h>
19 #include <util.h>
20 #include <io.h>
21 #include <trace.h>
22 
23 /* Offsets from gic.gicc_base */
24 #define GICC_CTLR		(0x000)
25 #define GICC_PMR		(0x004)
26 #define GICC_IAR		(0x00C)
27 #define GICC_EOIR		(0x010)
28 
29 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
30 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
31 #define GICD_CTLR_ENABLEGRP1S	(1 << 2)
32 #define GICC_CTLR_FIQEN		(1 << 3)
33 
34 /* Offsets from gic.gicd_base */
35 #define GICD_CTLR		(0x000)
36 #define GICD_TYPER		(0x004)
37 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
38 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
39 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
40 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
41 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
42 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
43 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
44 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
45 #define GICD_SGIR		(0xF00)
46 
47 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
48 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
49 
50 /* Number of Private Peripheral Interrupt */
51 #define NUM_PPI	32
52 
53 /* Number of Software Generated Interrupt */
54 #define NUM_SGI			16
55 
56 /* Number of Non-secure Software Generated Interrupt */
57 #define NUM_NS_SGI		8
58 
59 /* Number of interrupts in one register */
60 #define NUM_INTS_PER_REG	32
61 
62 /* Number of targets in one register */
63 #define NUM_TARGETS_PER_REG	4
64 
65 /* Accessors to access ITARGETSRn */
66 #define ITARGETSR_FIELD_BITS	8
67 #define ITARGETSR_FIELD_MASK	0xff
68 
69 #define GICD_TYPER_IT_LINES_NUM_MASK	0x1f
70 #define GICC_IAR_IT_ID_MASK	0x3ff
71 #define GICC_IAR_CPU_ID_MASK	0x7
72 #define GICC_IAR_CPU_ID_SHIFT	10
73 
74 #define GICC_SGI_IRM_BIT	40
75 #define GICC_SGI_AFF1_SHIFT	16
76 #define GICC_SGI_AFF2_SHIFT	32
77 #define GICC_SGI_AFF3_SHIFT	48
78 
79 #define GICD_SGIR_SIGINTID_MASK			0xf
80 #define GICD_SGIR_TO_OTHER_CPUS			0x1
81 #define GICD_SGIR_TO_THIS_CPU			0x2
82 #define GICD_SGIR_TARGET_LIST_FILTER_SHIFT	24
83 #define GICD_SGIR_NSATT_SHIFT			15
84 #define GICD_SGIR_CPU_TARGET_LIST_SHIFT		16
85 
86 struct gic_data {
87 	vaddr_t gicc_base;
88 	vaddr_t gicd_base;
89 	size_t max_it;
90 	struct itr_chip chip;
91 };
92 
93 static struct gic_data gic_data __nex_bss;
94 
95 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
96 		       uint32_t prio);
97 static void gic_op_enable(struct itr_chip *chip, size_t it);
98 static void gic_op_disable(struct itr_chip *chip, size_t it);
99 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
100 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
101 			     uint32_t cpu_mask);
102 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
103 			uint8_t cpu_mask);
104 
105 static const struct itr_ops gic_ops = {
106 	.add = gic_op_add,
107 	.mask = gic_op_disable,
108 	.unmask = gic_op_enable,
109 	.enable = gic_op_enable,
110 	.disable = gic_op_disable,
111 	.raise_pi = gic_op_raise_pi,
112 	.raise_sgi = gic_op_raise_sgi,
113 	.set_affinity = gic_op_set_affinity,
114 };
115 DECLARE_KEEP_PAGER(gic_ops);
116 
117 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
118 {
119 	int i;
120 	uint32_t old_ctlr;
121 	size_t ret = 0;
122 	size_t max_regs = io_read32(gicd_base + GICD_TYPER) &
123 			  GICD_TYPER_IT_LINES_NUM_MASK;
124 
125 	/*
126 	 * Probe which interrupt number is the largest.
127 	 */
128 #if defined(CFG_ARM_GICV3)
129 	old_ctlr = read_icc_ctlr();
130 	write_icc_ctlr(0);
131 #else
132 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
133 	io_write32(gicc_base + GICC_CTLR, 0);
134 #endif
135 	for (i = max_regs; i >= 0; i--) {
136 		uint32_t old_reg;
137 		uint32_t reg;
138 		int b;
139 
140 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
141 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
142 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
143 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
144 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
145 			if (BIT32(b) & reg) {
146 				ret = i * NUM_INTS_PER_REG + b;
147 				goto out;
148 			}
149 		}
150 	}
151 out:
152 #if defined(CFG_ARM_GICV3)
153 	write_icc_ctlr(old_ctlr);
154 #else
155 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
156 #endif
157 	return ret;
158 }
159 
160 void gic_cpu_init(void)
161 {
162 	struct gic_data *gd = &gic_data;
163 
164 #if defined(CFG_ARM_GICV3)
165 	assert(gd->gicd_base);
166 #else
167 	assert(gd->gicd_base && gd->gicc_base);
168 #endif
169 
170 	/* per-CPU interrupts config:
171 	 * ID0-ID7(SGI)   for Non-secure interrupts
172 	 * ID8-ID15(SGI)  for Secure interrupts.
173 	 * All PPI config as Non-secure interrupts.
174 	 */
175 	io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
176 
177 	/* Set the priority mask to permit Non-secure interrupts, and to
178 	 * allow the Non-secure world to adjust the priority mask itself
179 	 */
180 #if defined(CFG_ARM_GICV3)
181 	write_icc_pmr(0x80);
182 	write_icc_igrpen1(1);
183 #else
184 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
185 
186 	/* Enable GIC */
187 	io_write32(gd->gicc_base + GICC_CTLR,
188 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
189 		   GICC_CTLR_FIQEN);
190 #endif
191 }
192 
193 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
194 			  uint32_t *prio)
195 {
196 	int it_num = DT_INFO_INVALID_INTERRUPT;
197 
198 	if (type)
199 		*type = IRQ_TYPE_NONE;
200 
201 	if (prio)
202 		*prio = 0;
203 
204 	if (!properties || count < 2)
205 		return DT_INFO_INVALID_INTERRUPT;
206 
207 	it_num = fdt32_to_cpu(properties[1]);
208 
209 	switch (fdt32_to_cpu(properties[0])) {
210 	case 1:
211 		it_num += 16;
212 		break;
213 	case 0:
214 		it_num += 32;
215 		break;
216 	default:
217 		it_num = DT_INFO_INVALID_INTERRUPT;
218 	}
219 
220 	return it_num;
221 }
222 
223 static void gic_init_base_addr(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
224 {
225 	struct gic_data *gd = &gic_data;
226 	vaddr_t gicc_base = 0;
227 	vaddr_t gicd_base = 0;
228 
229 	assert(cpu_mmu_enabled());
230 
231 	gicd_base = core_mmu_get_va(gicd_base_pa, MEM_AREA_IO_SEC,
232 				    GIC_DIST_REG_SIZE);
233 	if (!gicd_base)
234 		panic();
235 
236 	if (!IS_ENABLED(CFG_ARM_GICV3)) {
237 		gicc_base = core_mmu_get_va(gicc_base_pa, MEM_AREA_IO_SEC,
238 					    GIC_CPU_REG_SIZE);
239 		if (!gicc_base)
240 			panic();
241 	}
242 
243 	gd->gicc_base = gicc_base;
244 	gd->gicd_base = gicd_base;
245 	gd->max_it = probe_max_it(gicc_base, gicd_base);
246 	gd->chip.ops = &gic_ops;
247 
248 	if (IS_ENABLED(CFG_DT))
249 		gd->chip.dt_get_irq = gic_dt_get_irq;
250 }
251 
252 void gic_init(paddr_t gicc_base_pa, paddr_t gicd_base_pa)
253 {
254 	struct gic_data __maybe_unused *gd = &gic_data;
255 	size_t __maybe_unused n = 0;
256 
257 	gic_init_base_addr(gicc_base_pa, gicd_base_pa);
258 
259 	/* GIC configuration is initialized from TF-A when embedded */
260 #ifndef CFG_WITH_ARM_TRUSTED_FW
261 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
262 		/* Disable interrupts */
263 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
264 
265 		/* Make interrupts non-pending */
266 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
267 
268 		/* Mark interrupts non-secure */
269 		if (n == 0) {
270 			/* per-CPU inerrupts config:
271 			 * ID0-ID7(SGI)	  for Non-secure interrupts
272 			 * ID8-ID15(SGI)  for Secure interrupts.
273 			 * All PPI config as Non-secure interrupts.
274 			 */
275 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
276 		} else {
277 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
278 		}
279 	}
280 
281 	/* Set the priority mask to permit Non-secure interrupts, and to
282 	 * allow the Non-secure world to adjust the priority mask itself
283 	 */
284 #if defined(CFG_ARM_GICV3)
285 	write_icc_pmr(0x80);
286 	write_icc_igrpen1(1);
287 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
288 #else
289 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
290 
291 	/* Enable GIC */
292 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
293 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
294 	io_setbits32(gd->gicd_base + GICD_CTLR,
295 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
296 #endif
297 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
298 
299 	interrupt_main_init(&gic_data.chip);
300 }
301 
302 static void gic_it_add(struct gic_data *gd, size_t it)
303 {
304 	size_t idx = it / NUM_INTS_PER_REG;
305 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
306 
307 	assert(gd == &gic_data);
308 
309 	/* Disable the interrupt */
310 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
311 	/* Make it non-pending */
312 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
313 	/* Assign it to group0 */
314 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
315 #if defined(CFG_ARM_GICV3)
316 	/* Assign it to group1S */
317 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
318 #endif
319 }
320 
321 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
322 				uint8_t cpu_mask)
323 {
324 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
325 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
326 	uint32_t target, target_shift;
327 	vaddr_t itargetsr = gd->gicd_base +
328 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
329 
330 	assert(gd == &gic_data);
331 
332 	/* Assigned to group0 */
333 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
334 
335 	/* Route it to selected CPUs */
336 	target = io_read32(itargetsr);
337 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
338 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
339 	target |= cpu_mask << target_shift;
340 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
341 	io_write32(itargetsr, target);
342 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
343 }
344 
345 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
346 {
347 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
348 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
349 
350 	assert(gd == &gic_data);
351 
352 	/* Assigned to group0 */
353 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
354 
355 	/* Set prio it to selected CPUs */
356 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
357 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
358 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
359 }
360 
361 static void gic_it_enable(struct gic_data *gd, size_t it)
362 {
363 	size_t idx = it / NUM_INTS_PER_REG;
364 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
365 	vaddr_t base = gd->gicd_base;
366 
367 	assert(gd == &gic_data);
368 
369 	/* Assigned to group0 */
370 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
371 
372 	/* Enable the interrupt */
373 	io_write32(base + GICD_ISENABLER(idx), mask);
374 }
375 
376 static void gic_it_disable(struct gic_data *gd, size_t it)
377 {
378 	size_t idx = it / NUM_INTS_PER_REG;
379 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
380 
381 	assert(gd == &gic_data);
382 
383 	/* Assigned to group0 */
384 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
385 
386 	/* Disable the interrupt */
387 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
388 }
389 
390 static void gic_it_set_pending(struct gic_data *gd, size_t it)
391 {
392 	size_t idx = it / NUM_INTS_PER_REG;
393 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
394 
395 	assert(gd == &gic_data);
396 
397 	/* Should be Peripheral Interrupt */
398 	assert(it >= NUM_SGI);
399 
400 	/* Raise the interrupt */
401 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
402 }
403 
404 static void assert_cpu_mask_is_valid(uint32_t cpu_mask)
405 {
406 	bool __maybe_unused to_others = cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS;
407 	bool __maybe_unused to_current = cpu_mask & ITR_CPU_MASK_TO_THIS_CPU;
408 	bool __maybe_unused to_list = cpu_mask & 0xff;
409 
410 	/* One and only one of the bit fields shall be non-zero */
411 	assert(to_others + to_current + to_list == 1);
412 }
413 
414 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it,
415 			     uint32_t cpu_mask, uint8_t group)
416 {
417 #if defined(CFG_ARM_GICV3)
418 	uint32_t mask_id = it & 0xf;
419 	uint64_t mask = SHIFT_U64(mask_id, 24);
420 
421 	assert_cpu_mask_is_valid(cpu_mask);
422 
423 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
424 		mask |= BIT64(GICC_SGI_IRM_BIT);
425 	} else {
426 		uint64_t mpidr = read_mpidr();
427 		uint64_t mask_aff1 = (mpidr & MPIDR_AFF1_MASK) >>
428 				     MPIDR_AFF1_SHIFT;
429 		uint64_t mask_aff2 = (mpidr & MPIDR_AFF2_MASK) >>
430 				     MPIDR_AFF2_SHIFT;
431 		uint64_t mask_aff3 = (mpidr & MPIDR_AFF3_MASK) >>
432 				     MPIDR_AFF3_SHIFT;
433 
434 		mask |= SHIFT_U64(mask_aff1, GICC_SGI_AFF1_SHIFT);
435 		mask |= SHIFT_U64(mask_aff2, GICC_SGI_AFF2_SHIFT);
436 		mask |= SHIFT_U64(mask_aff3, GICC_SGI_AFF3_SHIFT);
437 
438 		if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
439 			mask |= BIT32(mpidr & 0xf);
440 		} else {
441 			/*
442 			 * Only support sending SGI to the cores in the
443 			 * same cluster now.
444 			 */
445 			mask |= cpu_mask & 0xff;
446 		}
447 	}
448 
449 	/* Raise the interrupt */
450 	if (group)
451 		write_icc_asgi1r(mask);
452 	else
453 		write_icc_sgi1r(mask);
454 #else
455 	uint32_t mask_id = it & GICD_SGIR_SIGINTID_MASK;
456 	uint32_t mask_group = group & 0x1;
457 	uint32_t mask = mask_id;
458 
459 	assert_cpu_mask_is_valid(cpu_mask);
460 
461 	mask |= SHIFT_U32(mask_group, GICD_SGIR_NSATT_SHIFT);
462 	if (cpu_mask & ITR_CPU_MASK_TO_OTHER_CPUS) {
463 		mask |= SHIFT_U32(GICD_SGIR_TO_OTHER_CPUS,
464 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
465 	} else if (cpu_mask & ITR_CPU_MASK_TO_THIS_CPU) {
466 		mask |= SHIFT_U32(GICD_SGIR_TO_THIS_CPU,
467 				  GICD_SGIR_TARGET_LIST_FILTER_SHIFT);
468 	} else {
469 		mask |= SHIFT_U32(cpu_mask & 0xff,
470 				  GICD_SGIR_CPU_TARGET_LIST_SHIFT);
471 	}
472 
473 	/* Raise the interrupt */
474 	io_write32(gd->gicd_base + GICD_SGIR, mask);
475 #endif
476 }
477 
478 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
479 {
480 	assert(gd == &gic_data);
481 
482 #if defined(CFG_ARM_GICV3)
483 	return read_icc_iar1();
484 #else
485 	return io_read32(gd->gicc_base + GICC_IAR);
486 #endif
487 }
488 
489 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
490 {
491 	assert(gd == &gic_data);
492 
493 #if defined(CFG_ARM_GICV3)
494 	write_icc_eoir1(eoir);
495 #else
496 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
497 #endif
498 }
499 
500 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
501 {
502 	size_t idx = it / NUM_INTS_PER_REG;
503 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
504 
505 	assert(gd == &gic_data);
506 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
507 }
508 
509 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
510 {
511 	size_t idx = it / NUM_INTS_PER_REG;
512 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
513 
514 	assert(gd == &gic_data);
515 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
516 }
517 
518 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
519 {
520 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
521 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
522 				ITARGETSR_FIELD_BITS;
523 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
524 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
525 
526 	assert(gd == &gic_data);
527 	return (target & target_mask) >> target_shift;
528 }
529 
530 void gic_dump_state(void)
531 {
532 	struct gic_data *gd = &gic_data;
533 	int i = 0;
534 
535 #if defined(CFG_ARM_GICV3)
536 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
537 #else
538 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
539 #endif
540 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
541 
542 	for (i = 0; i <= (int)gd->max_it; i++) {
543 		if (gic_it_is_enabled(gd, i)) {
544 			DMSG("irq%d: enabled, group:%d, target:%x", i,
545 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
546 		}
547 	}
548 }
549 
550 static void __maybe_unused gic_native_itr_handler(void)
551 {
552 	struct gic_data *gd = &gic_data;
553 	uint32_t iar = 0;
554 	uint32_t id = 0;
555 
556 	iar = gic_read_iar(gd);
557 	id = iar & GICC_IAR_IT_ID_MASK;
558 
559 	if (id <= gd->max_it)
560 		interrupt_call_handlers(&gd->chip, id);
561 	else
562 		DMSG("ignoring interrupt %" PRIu32, id);
563 
564 	gic_write_eoir(gd, iar);
565 }
566 
567 #ifndef CFG_CORE_WORKAROUND_ARM_NMFI
568 /* Override interrupt_main_handler() with driver implementation */
569 void interrupt_main_handler(void)
570 {
571 	gic_native_itr_handler();
572 }
573 #endif /*CFG_CORE_WORKAROUND_ARM_NMFI*/
574 
575 static void gic_op_add(struct itr_chip *chip, size_t it,
576 		       uint32_t type __unused,
577 		       uint32_t prio __unused)
578 {
579 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
580 
581 	assert(gd == &gic_data);
582 
583 	if (it > gd->max_it)
584 		panic();
585 
586 	gic_it_add(gd, it);
587 	/* Set the CPU mask to deliver interrupts to any online core */
588 	gic_it_set_cpu_mask(gd, it, 0xff);
589 	gic_it_set_prio(gd, it, 0x1);
590 }
591 
592 static void gic_op_enable(struct itr_chip *chip, size_t it)
593 {
594 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
595 
596 	assert(gd == &gic_data);
597 
598 	if (it > gd->max_it)
599 		panic();
600 
601 	gic_it_enable(gd, it);
602 }
603 
604 static void gic_op_disable(struct itr_chip *chip, size_t it)
605 {
606 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
607 
608 	assert(gd == &gic_data);
609 
610 	if (it > gd->max_it)
611 		panic();
612 
613 	gic_it_disable(gd, it);
614 }
615 
616 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
617 {
618 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
619 
620 	assert(gd == &gic_data);
621 
622 	if (it > gd->max_it)
623 		panic();
624 
625 	gic_it_set_pending(gd, it);
626 }
627 
628 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
629 			     uint32_t cpu_mask)
630 {
631 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
632 
633 	assert(gd == &gic_data);
634 
635 	/* Should be Software Generated Interrupt */
636 	assert(it < NUM_SGI);
637 
638 	if (it > gd->max_it)
639 		panic();
640 
641 	if (it < NUM_NS_SGI)
642 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
643 	else
644 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
645 }
646 
647 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
648 			uint8_t cpu_mask)
649 {
650 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
651 
652 	assert(gd == &gic_data);
653 
654 	if (it > gd->max_it)
655 		panic();
656 
657 	gic_it_set_cpu_mask(gd, it, cpu_mask);
658 }
659