Lines Matching refs:idx
84 #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) argument
342 int idx; in cci400_get_event_idx() local
352 for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) in cci400_get_event_idx()
353 if (!test_and_set_bit(idx, hw->used_mask)) in cci400_get_event_idx()
354 return idx; in cci400_get_event_idx()
622 static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) in pmu_is_valid_counter() argument
624 return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); in pmu_is_valid_counter()
627 static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) in pmu_read_register() argument
630 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); in pmu_read_register()
634 int idx, unsigned int offset) in pmu_write_register() argument
637 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); in pmu_write_register()
640 static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) in pmu_disable_counter() argument
642 pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); in pmu_disable_counter()
645 static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) in pmu_enable_counter() argument
647 pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); in pmu_enable_counter()
650 static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) in pmu_set_event() argument
652 pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); in pmu_set_event()
669 int idx; in pmu_get_event_idx() local
675 for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) in pmu_get_event_idx()
676 if (!test_and_set_bit(idx, hw->used_mask)) in pmu_get_event_idx()
677 return idx; in pmu_get_event_idx()
745 int idx = hw_counter->idx; in pmu_read_counter() local
748 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { in pmu_read_counter()
749 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in pmu_read_counter()
752 value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); in pmu_read_counter()
761 int idx = hw_counter->idx; in pmu_write_counter() local
763 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) in pmu_write_counter()
764 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in pmu_write_counter()
766 pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); in pmu_write_counter()
811 int idx, handled = IRQ_NONE; in pmu_handle_irq() local
819 for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { in pmu_handle_irq()
820 struct perf_event *event = events->events[idx]; in pmu_handle_irq()
829 if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & in pmu_handle_irq()
833 pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, in pmu_handle_irq()
912 static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) in pmu_fixed_hw_idx() argument
914 return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); in pmu_fixed_hw_idx()
922 int idx = hwc->idx; in cci_pmu_start() local
934 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { in cci_pmu_start()
935 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in cci_pmu_start()
942 if (!pmu_fixed_hw_idx(cci_pmu, idx)) in cci_pmu_start()
943 pmu_set_event(cci_pmu, idx, hwc->config_base); in cci_pmu_start()
946 pmu_enable_counter(cci_pmu, idx); in cci_pmu_start()
955 int idx = hwc->idx; in cci_pmu_stop() local
960 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { in cci_pmu_stop()
961 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in cci_pmu_stop()
969 pmu_disable_counter(cci_pmu, idx); in cci_pmu_stop()
979 int idx; in cci_pmu_add() local
985 idx = pmu_get_event_idx(hw_events, event); in cci_pmu_add()
986 if (idx < 0) { in cci_pmu_add()
987 err = idx; in cci_pmu_add()
991 event->hw.idx = idx; in cci_pmu_add()
992 hw_events->events[idx] = event; in cci_pmu_add()
1011 int idx = hwc->idx; in cci_pmu_del() local
1014 hw_events->events[idx] = NULL; in cci_pmu_del()
1015 clear_bit(idx, hw_events->used_mask); in cci_pmu_del()
1093 hwc->idx = -1; in __hw_perf_event_init()