Lines Matching refs:cci_pmu

86 #define CCI_PMU_CNTR_LAST(cci_pmu)	(cci_pmu->num_cntrs - 1)  argument
112 struct cci_pmu;
129 int (*validate_hw_event)(struct cci_pmu *, unsigned long);
130 int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
135 struct cci_pmu { struct
151 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) argument
338 static int cci400_get_event_idx(struct cci_pmu *cci_pmu, in cci400_get_event_idx() argument
352 for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) in cci400_get_event_idx()
360 static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) in cci400_validate_hw_event() argument
391 if (ev_code >= cci_pmu->model->event_ranges[if_type].min && in cci400_validate_hw_event()
392 ev_code <= cci_pmu->model->event_ranges[if_type].max) in cci400_validate_hw_event()
561 static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, in cci500_validate_hw_event() argument
596 if (ev_code >= cci_pmu->model->event_ranges[if_type].min && in cci500_validate_hw_event()
597 ev_code <= cci_pmu->model->event_ranges[if_type].max) in cci500_validate_hw_event()
622 static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) in pmu_is_valid_counter() argument
624 return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); in pmu_is_valid_counter()
627 static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) in pmu_read_register() argument
629 return readl_relaxed(cci_pmu->base + in pmu_read_register()
630 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); in pmu_read_register()
633 static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, in pmu_write_register() argument
636 return writel_relaxed(value, cci_pmu->base + in pmu_write_register()
637 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); in pmu_write_register()
640 static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) in pmu_disable_counter() argument
642 pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); in pmu_disable_counter()
645 static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) in pmu_enable_counter() argument
647 pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); in pmu_enable_counter()
650 static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) in pmu_set_event() argument
652 pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); in pmu_set_event()
667 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_get_event_idx() local
671 if (cci_pmu->model->get_event_idx) in pmu_get_event_idx()
672 return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); in pmu_get_event_idx()
675 for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) in pmu_get_event_idx()
685 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_map_event() local
688 !cci_pmu->model->validate_hw_event) in pmu_map_event()
691 return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); in pmu_map_event()
694 static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) in pmu_request_irq() argument
697 struct platform_device *pmu_device = cci_pmu->plat_device; in pmu_request_irq()
702 if (cci_pmu->nr_irqs < 1) { in pmu_request_irq()
714 for (i = 0; i < cci_pmu->nr_irqs; i++) { in pmu_request_irq()
715 int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, in pmu_request_irq()
716 "arm-cci-pmu", cci_pmu); in pmu_request_irq()
719 cci_pmu->irqs[i]); in pmu_request_irq()
723 set_bit(i, &cci_pmu->active_irqs); in pmu_request_irq()
729 static void pmu_free_irq(struct cci_pmu *cci_pmu) in pmu_free_irq() argument
733 for (i = 0; i < cci_pmu->nr_irqs; i++) { in pmu_free_irq()
734 if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) in pmu_free_irq()
737 free_irq(cci_pmu->irqs[i], cci_pmu); in pmu_free_irq()
743 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_read_counter() local
748 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { in pmu_read_counter()
749 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in pmu_read_counter()
752 value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); in pmu_read_counter()
759 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_write_counter() local
763 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) in pmu_write_counter()
764 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in pmu_write_counter()
766 pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); in pmu_write_counter()
809 struct cci_pmu *cci_pmu = dev; in pmu_handle_irq() local
810 struct cci_pmu_hw_events *events = &cci_pmu->hw_events; in pmu_handle_irq()
819 for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { in pmu_handle_irq()
829 if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & in pmu_handle_irq()
833 pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, in pmu_handle_irq()
845 static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) in cci_pmu_get_hw() argument
847 int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); in cci_pmu_get_hw()
849 pmu_free_irq(cci_pmu); in cci_pmu_get_hw()
855 static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) in cci_pmu_put_hw() argument
857 pmu_free_irq(cci_pmu); in cci_pmu_put_hw()
862 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in hw_perf_event_destroy() local
863 atomic_t *active_events = &cci_pmu->active_events; in hw_perf_event_destroy()
864 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; in hw_perf_event_destroy()
867 cci_pmu_put_hw(cci_pmu); in hw_perf_event_destroy()
874 struct cci_pmu *cci_pmu = to_cci_pmu(pmu); in cci_pmu_enable() local
875 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_enable()
876 int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); in cci_pmu_enable()
894 struct cci_pmu *cci_pmu = to_cci_pmu(pmu); in cci_pmu_disable() local
895 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_disable()
912 static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) in pmu_fixed_hw_idx() argument
914 return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); in pmu_fixed_hw_idx()
919 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_start() local
920 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_start()
934 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { in cci_pmu_start()
935 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in cci_pmu_start()
942 if (!pmu_fixed_hw_idx(cci_pmu, idx)) in cci_pmu_start()
943 pmu_set_event(cci_pmu, idx, hwc->config_base); in cci_pmu_start()
946 pmu_enable_counter(cci_pmu, idx); in cci_pmu_start()
953 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_stop() local
960 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { in cci_pmu_stop()
961 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in cci_pmu_stop()
969 pmu_disable_counter(cci_pmu, idx); in cci_pmu_stop()
976 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_add() local
977 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_add()
1008 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_del() local
1009 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_del()
1021 validate_event(struct pmu *cci_pmu, in validate_event() argument
1033 if (event->pmu != cci_pmu) in validate_event()
1049 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in validate_group() local
1050 unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)]; in validate_group()
1058 memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); in validate_group()
1122 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_event_init() local
1123 atomic_t *active_events = &cci_pmu->active_events; in cci_pmu_event_init()
1152 cpu = cpumask_first(&cci_pmu->cpus); in cci_pmu_event_init()
1159 mutex_lock(&cci_pmu->reserve_mutex); in cci_pmu_event_init()
1161 err = cci_pmu_get_hw(cci_pmu); in cci_pmu_event_init()
1164 mutex_unlock(&cci_pmu->reserve_mutex); in cci_pmu_event_init()
1181 struct cci_pmu *cci_pmu = eattr->var; in pmu_cpumask_attr_show() local
1184 cpumask_pr_args(&cci_pmu->cpus)); in pmu_cpumask_attr_show()
1237 static int cci_pmu_init_attrs(struct cci_pmu *cci_pmu, struct platform_device *pdev) in cci_pmu_init_attrs() argument
1239 const struct cci_pmu_model *model = cci_pmu->model; in cci_pmu_init_attrs()
1261 pmu_cpumask_attr.var = cci_pmu; in cci_pmu_init_attrs()
1266 static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) in cci_pmu_init() argument
1268 char *name = cci_pmu->model->name; in cci_pmu_init()
1272 rc = cci_pmu_init_attrs(cci_pmu, pdev); in cci_pmu_init()
1276 cci_pmu->pmu = (struct pmu) { in cci_pmu_init()
1277 .name = cci_pmu->model->name, in cci_pmu_init()
1290 cci_pmu->plat_device = pdev; in cci_pmu_init()
1292 if (num_cntrs > cci_pmu->model->num_hw_cntrs) { in cci_pmu_init()
1296 num_cntrs, cci_pmu->model->num_hw_cntrs); in cci_pmu_init()
1297 num_cntrs = cci_pmu->model->num_hw_cntrs; in cci_pmu_init()
1299 cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; in cci_pmu_init()
1301 return perf_pmu_register(&cci_pmu->pmu, name, -1); in cci_pmu_init()
1307 struct cci_pmu *cci_pmu = container_of(self, in cci_pmu_cpu_notifier() local
1308 struct cci_pmu, cpu_nb); in cci_pmu_cpu_notifier()
1314 if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) in cci_pmu_cpu_notifier()
1323 cpumask_set_cpu(target, &cci_pmu->cpus); in cci_pmu_cpu_notifier()
1456 static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev) in cci_pmu_alloc()
1458 struct cci_pmu *cci_pmu; in cci_pmu_alloc() local
1472 cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL); in cci_pmu_alloc()
1473 if (!cci_pmu) in cci_pmu_alloc()
1476 cci_pmu->model = model; in cci_pmu_alloc()
1477 cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model), in cci_pmu_alloc()
1478 sizeof(*cci_pmu->irqs), GFP_KERNEL); in cci_pmu_alloc()
1479 if (!cci_pmu->irqs) in cci_pmu_alloc()
1481 cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev, in cci_pmu_alloc()
1483 sizeof(*cci_pmu->hw_events.events), in cci_pmu_alloc()
1485 if (!cci_pmu->hw_events.events) in cci_pmu_alloc()
1487 cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev, in cci_pmu_alloc()
1489 sizeof(*cci_pmu->hw_events.used_mask), in cci_pmu_alloc()
1491 if (!cci_pmu->hw_events.used_mask) in cci_pmu_alloc()
1494 return cci_pmu; in cci_pmu_alloc()
1501 struct cci_pmu *cci_pmu; in cci_pmu_probe() local
1504 cci_pmu = cci_pmu_alloc(pdev); in cci_pmu_probe()
1505 if (IS_ERR(cci_pmu)) in cci_pmu_probe()
1506 return PTR_ERR(cci_pmu); in cci_pmu_probe()
1509 cci_pmu->base = devm_ioremap_resource(&pdev->dev, res); in cci_pmu_probe()
1510 if (IS_ERR(cci_pmu->base)) in cci_pmu_probe()
1517 cci_pmu->nr_irqs = 0; in cci_pmu_probe()
1518 for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { in cci_pmu_probe()
1523 if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) in cci_pmu_probe()
1526 cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; in cci_pmu_probe()
1533 if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { in cci_pmu_probe()
1535 i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); in cci_pmu_probe()
1539 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); in cci_pmu_probe()
1540 mutex_init(&cci_pmu->reserve_mutex); in cci_pmu_probe()
1541 atomic_set(&cci_pmu->active_events, 0); in cci_pmu_probe()
1542 cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); in cci_pmu_probe()
1544 cci_pmu->cpu_nb = (struct notifier_block) { in cci_pmu_probe()
1553 ret = register_cpu_notifier(&cci_pmu->cpu_nb); in cci_pmu_probe()
1557 ret = cci_pmu_init(cci_pmu, pdev); in cci_pmu_probe()
1559 unregister_cpu_notifier(&cci_pmu->cpu_nb); in cci_pmu_probe()
1563 pr_info("ARM %s PMU driver probed", cci_pmu->model->name); in cci_pmu_probe()