Lines Matching refs:gd

175 static vaddr_t __maybe_unused get_gicr_base(struct gic_data *gd __maybe_unused)  in get_gicr_base()
178 return gd->gicr_base[get_core_pos()]; in get_gicr_base()
184 static bool affinity_routing_is_enabled(struct gic_data *gd) in affinity_routing_is_enabled() argument
187 io_read32(gd->gicd_base + GICD_CTLR) & GICD_CTLR_ARE_S; in affinity_routing_is_enabled()
247 static void gicv3_sync_redist_config(struct gic_data *gd) in gicv3_sync_redist_config() argument
249 vaddr_t gicr_base = get_gicr_base(gd); in gicv3_sync_redist_config()
266 if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status)) && in gicv3_sync_redist_config()
267 !(BIT32(n) & (gmod0 ^ gd->per_cpu_group_modifier))) in gicv3_sync_redist_config()
284 if (BIT32(n) & gd->per_cpu_group_status) in gicv3_sync_redist_config()
288 if (BIT32(n) & gd->per_cpu_group_modifier) in gicv3_sync_redist_config()
297 io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable); in gicv3_sync_redist_config()
301 static void gic_legacy_sync_dist_config(struct gic_data *gd) in gic_legacy_sync_dist_config() argument
307 grp0 = io_read32(gd->gicd_base + GICD_IGROUPR(0)); in gic_legacy_sync_dist_config()
310 if (!(BIT32(n) & (grp0 ^ gd->per_cpu_group_status))) in gic_legacy_sync_dist_config()
319 io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(n)); in gic_legacy_sync_dist_config()
322 io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(n)); in gic_legacy_sync_dist_config()
324 if (BIT32(n) & gd->per_cpu_group_status) in gic_legacy_sync_dist_config()
331 io_write32(gd->gicd_base + GICD_IGROUPR(0), grp0); in gic_legacy_sync_dist_config()
332 io_write32(gd->gicd_base + GICD_ISENABLER(0), in gic_legacy_sync_dist_config()
333 gd->per_cpu_enable); in gic_legacy_sync_dist_config()
337 static void init_gic_per_cpu(struct gic_data *gd) in init_gic_per_cpu() argument
339 io_write32(gd->gicd_base + GICD_IGROUPR(0), gd->per_cpu_group_status); in init_gic_per_cpu()
349 io_write32(gd->gicc_base + GICC_PMR, 0x80); in init_gic_per_cpu()
352 io_write32(gd->gicc_base + GICC_CTLR, in init_gic_per_cpu()
360 struct gic_data *gd = &gic_data; in gic_init_per_cpu() local
363 assert(gd->gicd_base); in gic_init_per_cpu()
365 assert(gd->gicd_base && gd->gicc_base); in gic_init_per_cpu()
373 if (affinity_routing_is_enabled(gd)) in gic_init_per_cpu()
374 gicv3_sync_redist_config(gd); in gic_init_per_cpu()
376 gic_legacy_sync_dist_config(gd); in gic_init_per_cpu()
382 init_gic_per_cpu(gd); in gic_init_per_cpu()
388 struct gic_data *gd = &gic_data; in gic_init_donate_sgi_to_ns() local
393 assert(!(gd->per_cpu_group_status & BIT32(it)) && in gic_init_donate_sgi_to_ns()
394 (gd->per_cpu_group_modifier & BIT32(it))); in gic_init_donate_sgi_to_ns()
396 gd->per_cpu_group_modifier &= ~BIT32(it); in gic_init_donate_sgi_to_ns()
397 gd->per_cpu_group_status |= BIT32(it); in gic_init_donate_sgi_to_ns()
399 if (affinity_routing_is_enabled(gd)) { in gic_init_donate_sgi_to_ns()
400 vaddr_t gicr_base = get_gicr_base(gd); in gic_init_donate_sgi_to_ns()
415 io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status); in gic_init_donate_sgi_to_ns()
417 gd->per_cpu_group_modifier); in gic_init_donate_sgi_to_ns()
420 io_write32(gd->gicd_base + GICD_ICENABLER(0), BIT(it)); in gic_init_donate_sgi_to_ns()
423 io_write32(gd->gicd_base + GICD_ICPENDR(0), BIT(it)); in gic_init_donate_sgi_to_ns()
426 io_write32(gd->gicd_base + GICD_IGROUPR(0), in gic_init_donate_sgi_to_ns()
427 gd->per_cpu_group_status); in gic_init_donate_sgi_to_ns()
525 struct gic_data *gd = &gic_data; in gic_init_base_addr() local
551 gd->gicc_base = gicc_base; in gic_init_base_addr()
552 gd->gicd_base = gicd_base; in gic_init_base_addr()
553 gd->max_it = probe_max_it(gicc_base, gicd_base); in gic_init_base_addr()
555 if (affinity_routing_is_enabled(gd) && gicr_base_pa) in gic_init_base_addr()
556 probe_redist_base_addrs(gd->gicr_base, gicr_base_pa); in gic_init_base_addr()
558 gd->chip.ops = &gic_ops; in gic_init_base_addr()
561 gd->chip.dt_get_irq = gic_dt_get_irq; in gic_init_base_addr()
567 struct gic_data __maybe_unused *gd = &gic_data; in gic_init_v3() local
574 if (affinity_routing_is_enabled(gd)) { in gic_init_v3()
576 vaddr_t gicr_base = get_gicr_base(gd); in gic_init_v3()
579 gd->per_cpu_group_status = io_read32(gicr_base + in gic_init_v3()
581 gd->per_cpu_group_modifier = io_read32(gicr_base + in gic_init_v3()
586 gd->per_cpu_group_status = 0xffff00ff; in gic_init_v3()
587 gd->per_cpu_group_modifier = ~gd->per_cpu_group_status; in gic_init_v3()
591 gd->per_cpu_group_status = io_read32(gd->gicd_base + in gic_init_v3()
593 gd->per_cpu_group_modifier = ~gd->per_cpu_group_status; in gic_init_v3()
600 for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) { in gic_init_v3()
602 io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff); in gic_init_v3()
605 io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff); in gic_init_v3()
614 gd->per_cpu_group_status = 0xffff00ff; in gic_init_v3()
615 gd->per_cpu_group_modifier = ~gd->per_cpu_group_status; in gic_init_v3()
616 io_write32(gd->gicd_base + GICD_IGROUPR(n), in gic_init_v3()
617 gd->per_cpu_group_status); in gic_init_v3()
619 io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff); in gic_init_v3()
629 io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S); in gic_init_v3()
631 io_write32(gd->gicc_base + GICC_PMR, 0x80); in gic_init_v3()
634 io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN | in gic_init_v3()
636 io_setbits32(gd->gicd_base + GICD_CTLR, in gic_init_v3()
644 static void gic_it_configure(struct gic_data *gd, size_t it) in gic_it_configure() argument
649 assert(gd == &gic_data); in gic_it_configure()
652 io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask); in gic_it_configure()
654 io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask); in gic_it_configure()
656 io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask); in gic_it_configure()
659 io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask); in gic_it_configure()
663 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it, in gic_it_set_cpu_mask() argument
670 vaddr_t itargetsr = gd->gicd_base + in gic_it_set_cpu_mask()
673 assert(gd == &gic_data); in gic_it_set_cpu_mask()
676 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); in gic_it_set_cpu_mask()
688 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio) in gic_it_set_prio() argument
693 assert(gd == &gic_data); in gic_it_set_prio()
696 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); in gic_it_set_prio()
700 prio, gd->gicd_base + GICD_IPRIORITYR(0) + it); in gic_it_set_prio()
701 io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio); in gic_it_set_prio()
704 static void gic_it_set_type(struct gic_data *gd, size_t it, uint32_t type) in gic_it_set_type() argument
718 io_mask32(gd->gicd_base + GICD_ICFGR(index), in gic_it_set_type()
723 static void gic_it_enable(struct gic_data *gd, size_t it) in gic_it_enable() argument
727 vaddr_t base = gd->gicd_base; in gic_it_enable()
729 assert(gd == &gic_data); in gic_it_enable()
738 static void gic_it_disable(struct gic_data *gd, size_t it) in gic_it_disable() argument
743 assert(gd == &gic_data); in gic_it_disable()
746 assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask)); in gic_it_disable()
749 io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask); in gic_it_disable()
752 static void gic_it_set_pending(struct gic_data *gd, size_t it) in gic_it_set_pending() argument
757 assert(gd == &gic_data); in gic_it_set_pending()
763 io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask); in gic_it_set_pending()
776 static void gic_it_raise_sgi(struct gic_data *gd __maybe_unused, size_t it, in gic_it_raise_sgi()
836 io_write32(gd->gicd_base + GICD_SGIR, mask); in gic_it_raise_sgi()
840 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused) in gic_read_iar()
842 assert(gd == &gic_data); in gic_read_iar()
847 return io_read32(gd->gicc_base + GICC_IAR); in gic_read_iar()
851 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir) in gic_write_eoir()
853 assert(gd == &gic_data); in gic_write_eoir()
858 io_write32(gd->gicc_base + GICC_EOIR, eoir); in gic_write_eoir()
862 static bool gic_it_is_enabled(struct gic_data *gd, size_t it) in gic_it_is_enabled() argument
867 assert(gd == &gic_data); in gic_it_is_enabled()
868 return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask); in gic_it_is_enabled()
871 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it) in gic_it_get_group() argument
876 assert(gd == &gic_data); in gic_it_get_group()
877 return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask); in gic_it_get_group()
880 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it) in gic_it_get_target() argument
886 uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx)); in gic_it_get_target()
888 assert(gd == &gic_data); in gic_it_get_target()
894 struct gic_data *gd = &gic_data; in gic_dump_state() local
900 DMSG("GICC_CTLR: %#"PRIx32, io_read32(gd->gicc_base + GICC_CTLR)); in gic_dump_state()
902 DMSG("GICD_CTLR: %#"PRIx32, io_read32(gd->gicd_base + GICD_CTLR)); in gic_dump_state()
904 for (i = 0; i <= (int)gd->max_it; i++) { in gic_dump_state()
905 if (gic_it_is_enabled(gd, i)) { in gic_dump_state()
907 gic_it_get_group(gd, i), gic_it_get_target(gd, i)); in gic_dump_state()
914 struct gic_data *gd = &gic_data; in gic_spi_release_to_ns() local
918 if (it >= gd->max_it || it < GIC_SPI_BASE) in gic_spi_release_to_ns()
921 if (!gic_it_is_enabled(gd, it)) in gic_spi_release_to_ns()
924 if (!gic_it_get_group(gd, it)) in gic_spi_release_to_ns()
928 gic_it_set_cpu_mask(gd, it, 0); in gic_spi_release_to_ns()
929 gic_it_set_prio(gd, it, GIC_SPI_PRI_NS_EL1); in gic_spi_release_to_ns()
932 io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask); in gic_spi_release_to_ns()
934 io_setbits32(gd->gicd_base + GICD_IGROUPR(idx), mask); in gic_spi_release_to_ns()
936 io_clrbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask); in gic_spi_release_to_ns()
944 struct gic_data *gd = &gic_data; in gic_native_itr_handler() local
948 iar = gic_read_iar(gd); in gic_native_itr_handler()
976 if (id <= gd->max_it) in gic_native_itr_handler()
977 interrupt_call_handlers(&gd->chip, id); in gic_native_itr_handler()
981 gic_write_eoir(gd, iar); in gic_native_itr_handler()
995 struct gic_data *gd = container_of(chip, struct gic_data, chip); in gic_op_configure() local
997 assert(gd == &gic_data); in gic_op_configure()
999 if (it > gd->max_it) in gic_op_configure()
1007 gd->per_cpu_group_modifier |= BIT32(it); in gic_op_configure()
1008 gd->per_cpu_group_status &= ~BIT32(it); in gic_op_configure()
1011 if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) { in gic_op_configure()
1012 vaddr_t gicr_base = get_gicr_base(gd); in gic_op_configure()
1027 io_write32(gicr_base + GICR_IGROUPR0, gd->per_cpu_group_status); in gic_op_configure()
1029 gd->per_cpu_group_modifier); in gic_op_configure()
1031 gic_it_configure(gd, it); in gic_op_configure()
1033 gic_it_set_cpu_mask(gd, it, 0xff); in gic_op_configure()
1034 gic_it_set_prio(gd, it, 0x1); in gic_op_configure()
1036 gic_it_set_type(gd, it, type); in gic_op_configure()
1042 struct gic_data *gd = container_of(chip, struct gic_data, chip); in gic_op_enable() local
1044 assert(gd == &gic_data); in gic_op_enable()
1046 if (it > gd->max_it) in gic_op_enable()
1050 gd->per_cpu_enable |= BIT(it); in gic_op_enable()
1052 if (it < GIC_SPI_BASE && affinity_routing_is_enabled(gd)) { in gic_op_enable()
1053 vaddr_t gicr_base = get_gicr_base(gd); in gic_op_enable()
1059 assert(gd->per_cpu_group_modifier & BIT(it) && in gic_op_enable()
1060 !(gd->per_cpu_group_status & BIT(it))); in gic_op_enable()
1061 io_write32(gicr_base + GICR_ISENABLER0, gd->per_cpu_enable); in gic_op_enable()
1063 gic_it_enable(gd, it); in gic_op_enable()
1069 struct gic_data *gd = container_of(chip, struct gic_data, chip); in gic_op_disable() local
1071 assert(gd == &gic_data); in gic_op_disable()
1073 if (it > gd->max_it) in gic_op_disable()
1076 gic_it_disable(gd, it); in gic_op_disable()
1081 struct gic_data *gd = container_of(chip, struct gic_data, chip); in gic_op_raise_pi() local
1083 assert(gd == &gic_data); in gic_op_raise_pi()
1085 if (it > gd->max_it) in gic_op_raise_pi()
1088 gic_it_set_pending(gd, it); in gic_op_raise_pi()
1094 struct gic_data *gd = container_of(chip, struct gic_data, chip); in gic_op_raise_sgi() local
1097 assert(gd == &gic_data); in gic_op_raise_sgi()
1102 ns = BIT32(it) & gd->per_cpu_group_status; in gic_op_raise_sgi()
1103 gic_it_raise_sgi(gd, it, cpu_mask, ns); in gic_op_raise_sgi()
1109 struct gic_data *gd = container_of(chip, struct gic_data, chip); in gic_op_set_affinity() local
1111 assert(gd == &gic_data); in gic_op_set_affinity()
1113 if (it > gd->max_it) in gic_op_set_affinity()
1116 gic_it_set_cpu_mask(gd, it, cpu_mask); in gic_op_set_affinity()