Lines Matching refs:cci_pmu

41 #define CCI_PMU_CNTR_LAST(cci_pmu)	(cci_pmu->num_cntrs - 1)  argument
77 struct cci_pmu;
92 int (*validate_hw_event)(struct cci_pmu *, unsigned long);
93 int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
94 void (*write_counters)(struct cci_pmu *, unsigned long *);
99 struct cci_pmu { struct
115 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) argument
117 static struct cci_pmu *g_cci_pmu;
131 static void pmu_write_counters(struct cci_pmu *cci_pmu,
312 static int cci400_get_event_idx(struct cci_pmu *cci_pmu, in cci400_get_event_idx() argument
326 for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) in cci400_get_event_idx()
334 static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) in cci400_validate_hw_event() argument
365 if (ev_code >= cci_pmu->model->event_ranges[if_type].min && in cci400_validate_hw_event()
366 ev_code <= cci_pmu->model->event_ranges[if_type].max) in cci400_validate_hw_event()
372 static int probe_cci400_revision(struct cci_pmu *cci_pmu) in probe_cci400_revision() argument
375 rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; in probe_cci400_revision()
384 static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) in probe_cci_model() argument
387 return &cci_pmu_models[probe_cci400_revision(cci_pmu)]; in probe_cci_model()
391 static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) in probe_cci_model() argument
541 static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, in cci500_validate_hw_event() argument
576 if (ev_code >= cci_pmu->model->event_ranges[if_type].min && in cci500_validate_hw_event()
577 ev_code <= cci_pmu->model->event_ranges[if_type].max) in cci500_validate_hw_event()
592 static int cci550_validate_hw_event(struct cci_pmu *cci_pmu, in cci550_validate_hw_event() argument
628 if (ev_code >= cci_pmu->model->event_ranges[if_type].min && in cci550_validate_hw_event()
629 ev_code <= cci_pmu->model->event_ranges[if_type].max) in cci550_validate_hw_event()
642 static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) in cci_pmu_sync_counters() argument
645 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; in cci_pmu_sync_counters()
648 bitmap_zero(mask, cci_pmu->num_cntrs); in cci_pmu_sync_counters()
649 for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { in cci_pmu_sync_counters()
664 pmu_write_counters(cci_pmu, mask); in cci_pmu_sync_counters()
668 static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu) in __cci_pmu_enable_nosync() argument
673 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; in __cci_pmu_enable_nosync()
674 writel(val, cci_pmu->ctrl_base + CCI_PMCR); in __cci_pmu_enable_nosync()
678 static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu) in __cci_pmu_enable_sync() argument
680 cci_pmu_sync_counters(cci_pmu); in __cci_pmu_enable_sync()
681 __cci_pmu_enable_nosync(cci_pmu); in __cci_pmu_enable_sync()
685 static void __cci_pmu_disable(struct cci_pmu *cci_pmu) in __cci_pmu_disable() argument
690 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; in __cci_pmu_disable()
691 writel(val, cci_pmu->ctrl_base + CCI_PMCR); in __cci_pmu_disable()
712 static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) in pmu_is_valid_counter() argument
714 return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); in pmu_is_valid_counter()
717 static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) in pmu_read_register() argument
719 return readl_relaxed(cci_pmu->base + in pmu_read_register()
720 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); in pmu_read_register()
723 static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, in pmu_write_register() argument
726 writel_relaxed(value, cci_pmu->base + in pmu_write_register()
727 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); in pmu_write_register()
730 static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) in pmu_disable_counter() argument
732 pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); in pmu_disable_counter()
735 static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) in pmu_enable_counter() argument
737 pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); in pmu_enable_counter()
741 pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx) in pmu_counter_is_enabled() argument
743 return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0; in pmu_counter_is_enabled()
746 static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) in pmu_set_event() argument
748 pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); in pmu_set_event()
764 pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask) in pmu_save_counters() argument
768 for (i = 0; i < cci_pmu->num_cntrs; i++) { in pmu_save_counters()
769 if (pmu_counter_is_enabled(cci_pmu, i)) { in pmu_save_counters()
771 pmu_disable_counter(cci_pmu, i); in pmu_save_counters()
781 pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask) in pmu_restore_counters() argument
785 for_each_set_bit(i, mask, cci_pmu->num_cntrs) in pmu_restore_counters()
786 pmu_enable_counter(cci_pmu, i); in pmu_restore_counters()
793 static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu) in pmu_get_max_counters() argument
795 return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & in pmu_get_max_counters()
801 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_get_event_idx() local
805 if (cci_pmu->model->get_event_idx) in pmu_get_event_idx()
806 return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); in pmu_get_event_idx()
809 for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) in pmu_get_event_idx()
819 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_map_event() local
822 !cci_pmu->model->validate_hw_event) in pmu_map_event()
825 return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); in pmu_map_event()
828 static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) in pmu_request_irq() argument
831 struct platform_device *pmu_device = cci_pmu->plat_device; in pmu_request_irq()
836 if (cci_pmu->nr_irqs < 1) { in pmu_request_irq()
848 for (i = 0; i < cci_pmu->nr_irqs; i++) { in pmu_request_irq()
849 int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, in pmu_request_irq()
850 "arm-cci-pmu", cci_pmu); in pmu_request_irq()
853 cci_pmu->irqs[i]); in pmu_request_irq()
857 set_bit(i, &cci_pmu->active_irqs); in pmu_request_irq()
863 static void pmu_free_irq(struct cci_pmu *cci_pmu) in pmu_free_irq() argument
867 for (i = 0; i < cci_pmu->nr_irqs; i++) { in pmu_free_irq()
868 if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) in pmu_free_irq()
871 free_irq(cci_pmu->irqs[i], cci_pmu); in pmu_free_irq()
877 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_read_counter() local
882 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { in pmu_read_counter()
883 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in pmu_read_counter()
886 value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); in pmu_read_counter()
891 static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx) in pmu_write_counter() argument
893 pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); in pmu_write_counter()
896 static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) in __pmu_write_counters() argument
899 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; in __pmu_write_counters()
901 for_each_set_bit(i, mask, cci_pmu->num_cntrs) { in __pmu_write_counters()
906 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); in __pmu_write_counters()
910 static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) in pmu_write_counters() argument
912 if (cci_pmu->model->write_counters) in pmu_write_counters()
913 cci_pmu->model->write_counters(cci_pmu, mask); in pmu_write_counters()
915 __pmu_write_counters(cci_pmu, mask); in pmu_write_counters()
949 static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) in cci5xx_pmu_write_counters() argument
954 bitmap_zero(saved_mask, cci_pmu->num_cntrs); in cci5xx_pmu_write_counters()
955 pmu_save_counters(cci_pmu, saved_mask); in cci5xx_pmu_write_counters()
961 __cci_pmu_enable_nosync(cci_pmu); in cci5xx_pmu_write_counters()
963 for_each_set_bit(i, mask, cci_pmu->num_cntrs) { in cci5xx_pmu_write_counters()
964 struct perf_event *event = cci_pmu->hw_events.events[i]; in cci5xx_pmu_write_counters()
969 pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT); in cci5xx_pmu_write_counters()
970 pmu_enable_counter(cci_pmu, i); in cci5xx_pmu_write_counters()
971 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); in cci5xx_pmu_write_counters()
972 pmu_disable_counter(cci_pmu, i); in cci5xx_pmu_write_counters()
973 pmu_set_event(cci_pmu, i, event->hw.config_base); in cci5xx_pmu_write_counters()
976 __cci_pmu_disable(cci_pmu); in cci5xx_pmu_write_counters()
978 pmu_restore_counters(cci_pmu, saved_mask); in cci5xx_pmu_write_counters()
1030 struct cci_pmu *cci_pmu = dev; in pmu_handle_irq() local
1031 struct cci_pmu_hw_events *events = &cci_pmu->hw_events; in pmu_handle_irq()
1037 __cci_pmu_disable(cci_pmu); in pmu_handle_irq()
1043 for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { in pmu_handle_irq()
1050 if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & in pmu_handle_irq()
1054 pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, in pmu_handle_irq()
1063 __cci_pmu_enable_sync(cci_pmu); in pmu_handle_irq()
1069 static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) in cci_pmu_get_hw() argument
1071 int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); in cci_pmu_get_hw()
1073 pmu_free_irq(cci_pmu); in cci_pmu_get_hw()
1079 static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) in cci_pmu_put_hw() argument
1081 pmu_free_irq(cci_pmu); in cci_pmu_put_hw()
1086 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in hw_perf_event_destroy() local
1087 atomic_t *active_events = &cci_pmu->active_events; in hw_perf_event_destroy()
1088 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; in hw_perf_event_destroy()
1091 cci_pmu_put_hw(cci_pmu); in hw_perf_event_destroy()
1098 struct cci_pmu *cci_pmu = to_cci_pmu(pmu); in cci_pmu_enable() local
1099 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_enable()
1100 int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); in cci_pmu_enable()
1107 __cci_pmu_enable_sync(cci_pmu); in cci_pmu_enable()
1114 struct cci_pmu *cci_pmu = to_cci_pmu(pmu); in cci_pmu_disable() local
1115 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_disable()
1119 __cci_pmu_disable(cci_pmu); in cci_pmu_disable()
1128 static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) in pmu_fixed_hw_idx() argument
1130 return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); in pmu_fixed_hw_idx()
1135 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_start() local
1136 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_start()
1150 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { in cci_pmu_start()
1151 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in cci_pmu_start()
1158 if (!pmu_fixed_hw_idx(cci_pmu, idx)) in cci_pmu_start()
1159 pmu_set_event(cci_pmu, idx, hwc->config_base); in cci_pmu_start()
1162 pmu_enable_counter(cci_pmu, idx); in cci_pmu_start()
1169 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_stop() local
1176 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { in cci_pmu_stop()
1177 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in cci_pmu_stop()
1185 pmu_disable_counter(cci_pmu, idx); in cci_pmu_stop()
1192 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_add() local
1193 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_add()
1217 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_del() local
1218 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_del()
1229 static int validate_event(struct pmu *cci_pmu, in validate_event() argument
1241 if (event->pmu != cci_pmu) in validate_event()
1256 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in validate_group() local
1265 memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); in validate_group()
1319 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_event_init() local
1320 atomic_t *active_events = &cci_pmu->active_events; in cci_pmu_event_init()
1341 event->cpu = cci_pmu->cpu; in cci_pmu_event_init()
1345 mutex_lock(&cci_pmu->reserve_mutex); in cci_pmu_event_init()
1347 err = cci_pmu_get_hw(cci_pmu); in cci_pmu_event_init()
1350 mutex_unlock(&cci_pmu->reserve_mutex); in cci_pmu_event_init()
1366 struct cci_pmu *cci_pmu = to_cci_pmu(pmu); in pmu_cpumask_attr_show() local
1368 return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu)); in pmu_cpumask_attr_show()
1400 static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) in cci_pmu_init() argument
1402 const struct cci_pmu_model *model = cci_pmu->model; in cci_pmu_init()
1414 cci_pmu->pmu = (struct pmu) { in cci_pmu_init()
1416 .name = cci_pmu->model->name, in cci_pmu_init()
1430 cci_pmu->plat_device = pdev; in cci_pmu_init()
1431 num_cntrs = pmu_get_max_counters(cci_pmu); in cci_pmu_init()
1432 if (num_cntrs > cci_pmu->model->num_hw_cntrs) { in cci_pmu_init()
1436 num_cntrs, cci_pmu->model->num_hw_cntrs); in cci_pmu_init()
1437 num_cntrs = cci_pmu->model->num_hw_cntrs; in cci_pmu_init()
1439 cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; in cci_pmu_init()
1441 return perf_pmu_register(&cci_pmu->pmu, name, -1); in cci_pmu_init()
1595 static struct cci_pmu *cci_pmu_alloc(struct device *dev) in cci_pmu_alloc()
1597 struct cci_pmu *cci_pmu; in cci_pmu_alloc() local
1605 cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL); in cci_pmu_alloc()
1606 if (!cci_pmu) in cci_pmu_alloc()
1609 cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data; in cci_pmu_alloc()
1615 model = probe_cci_model(cci_pmu); in cci_pmu_alloc()
1622 cci_pmu->model = model; in cci_pmu_alloc()
1623 cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model), in cci_pmu_alloc()
1624 sizeof(*cci_pmu->irqs), GFP_KERNEL); in cci_pmu_alloc()
1625 if (!cci_pmu->irqs) in cci_pmu_alloc()
1627 cci_pmu->hw_events.events = devm_kcalloc(dev, in cci_pmu_alloc()
1629 sizeof(*cci_pmu->hw_events.events), in cci_pmu_alloc()
1631 if (!cci_pmu->hw_events.events) in cci_pmu_alloc()
1633 cci_pmu->hw_events.used_mask = devm_kcalloc(dev, in cci_pmu_alloc()
1635 sizeof(*cci_pmu->hw_events.used_mask), in cci_pmu_alloc()
1637 if (!cci_pmu->hw_events.used_mask) in cci_pmu_alloc()
1640 return cci_pmu; in cci_pmu_alloc()
1645 struct cci_pmu *cci_pmu; in cci_pmu_probe() local
1648 cci_pmu = cci_pmu_alloc(&pdev->dev); in cci_pmu_probe()
1649 if (IS_ERR(cci_pmu)) in cci_pmu_probe()
1650 return PTR_ERR(cci_pmu); in cci_pmu_probe()
1652 cci_pmu->base = devm_platform_ioremap_resource(pdev, 0); in cci_pmu_probe()
1653 if (IS_ERR(cci_pmu->base)) in cci_pmu_probe()
1660 cci_pmu->nr_irqs = 0; in cci_pmu_probe()
1661 for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { in cci_pmu_probe()
1666 if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) in cci_pmu_probe()
1669 cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; in cci_pmu_probe()
1676 if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { in cci_pmu_probe()
1678 i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); in cci_pmu_probe()
1682 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); in cci_pmu_probe()
1683 mutex_init(&cci_pmu->reserve_mutex); in cci_pmu_probe()
1684 atomic_set(&cci_pmu->active_events, 0); in cci_pmu_probe()
1686 cci_pmu->cpu = raw_smp_processor_id(); in cci_pmu_probe()
1687 g_cci_pmu = cci_pmu; in cci_pmu_probe()
1692 ret = cci_pmu_init(cci_pmu, pdev); in cci_pmu_probe()
1696 pr_info("ARM %s PMU driver probed", cci_pmu->model->name); in cci_pmu_probe()