Lines Matching refs:idx
1392 int idx = hwc->idx - INTEL_PMC_IDX_FIXED; in intel_pmu_disable_fixed() local
1395 mask = 0xfULL << (idx * 4); in intel_pmu_disable_fixed()
1412 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { in intel_pmu_disable_event()
1418 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); in intel_pmu_disable_event()
1419 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); in intel_pmu_disable_event()
1420 cpuc->intel_cp_status &= ~(1ull << hwc->idx); in intel_pmu_disable_event()
1442 int idx = hwc->idx - INTEL_PMC_IDX_FIXED; in intel_pmu_enable_fixed() local
1462 bits <<= (idx * 4); in intel_pmu_enable_fixed()
1463 mask = 0xfULL << (idx * 4); in intel_pmu_enable_fixed()
1476 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { in intel_pmu_enable_event()
1491 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx); in intel_pmu_enable_event()
1493 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx); in intel_pmu_enable_event()
1496 cpuc->intel_cp_status |= (1ull << hwc->idx); in intel_pmu_enable_event()
1534 int idx; in intel_pmu_reset() local
1543 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in intel_pmu_reset()
1544 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); in intel_pmu_reset()
1545 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); in intel_pmu_reset()
1547 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) in intel_pmu_reset()
1548 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); in intel_pmu_reset()
1702 static int intel_alt_er(int idx) in intel_alt_er() argument
1705 return idx; in intel_alt_er()
1707 if (idx == EXTRA_REG_RSP_0) in intel_alt_er()
1710 if (idx == EXTRA_REG_RSP_1) in intel_alt_er()
1713 return idx; in intel_alt_er()
1716 static void intel_fixup_er(struct perf_event *event, int idx) in intel_fixup_er() argument
1718 event->hw.extra_reg.idx = idx; in intel_fixup_er()
1720 if (idx == EXTRA_REG_RSP_0) { in intel_fixup_er()
1724 } else if (idx == EXTRA_REG_RSP_1) { in intel_fixup_er()
1746 int idx = reg->idx; in __intel_shared_reg_get_constraints() local
1757 era = &cpuc->shared_regs->regs[idx]; in __intel_shared_reg_get_constraints()
1777 if (idx != reg->idx) in __intel_shared_reg_get_constraints()
1778 intel_fixup_er(event, idx); in __intel_shared_reg_get_constraints()
1802 idx = intel_alt_er(idx); in __intel_shared_reg_get_constraints()
1803 if (idx != reg->idx) { in __intel_shared_reg_get_constraints()
1830 era = &cpuc->shared_regs->regs[reg->idx]; in __intel_shared_reg_put_constraints()
1847 if (xreg->idx != EXTRA_REG_NONE) { in intel_shared_regs_constraints()
1853 if (breg->idx != EXTRA_REG_NONE) { in intel_shared_regs_constraints()
1864 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in x86_get_event_constraints() argument
1882 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in __intel_get_event_constraints() argument
1899 return x86_get_event_constraints(cpuc, idx, event); in __intel_get_event_constraints()
1976 int idx, struct event_constraint *c) in intel_get_excl_constraints() argument
2028 if (idx < 0) in intel_get_excl_constraints()
2034 cx = &cpuc->constraint_list[idx]; in intel_get_excl_constraints()
2098 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in intel_get_event_constraints() argument
2104 if (idx >= 0) /* fake does < 0 */ in intel_get_event_constraints()
2105 c1 = cpuc->event_constraint[idx]; in intel_get_event_constraints()
2112 c2 = __intel_get_event_constraints(cpuc, idx, event); in intel_get_event_constraints()
2120 return intel_get_excl_constraints(cpuc, event, idx, c2); in intel_get_event_constraints()
2166 if (hwc->idx >= 0) in intel_put_excl_constraints()
2167 xlo->state[hwc->idx] = INTEL_EXCL_UNUSED; in intel_put_excl_constraints()
2180 if (reg->idx != EXTRA_REG_NONE) in intel_put_shared_regs_event_constraints()
2184 if (reg->idx != EXTRA_REG_NONE) in intel_put_shared_regs_event_constraints()
2202 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) in intel_commit_scheduling() argument
2205 struct event_constraint *c = cpuc->event_constraint[idx]; in intel_commit_scheduling()
2371 int idx; in core_guest_get_msrs() local
2373 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in core_guest_get_msrs()
2374 struct perf_event *event = cpuc->events[idx]; in core_guest_get_msrs()
2376 arr[idx].msr = x86_pmu_config_addr(idx); in core_guest_get_msrs()
2377 arr[idx].host = arr[idx].guest = 0; in core_guest_get_msrs()
2379 if (!test_bit(idx, cpuc->active_mask)) in core_guest_get_msrs()
2382 arr[idx].host = arr[idx].guest = in core_guest_get_msrs()
2386 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE; in core_guest_get_msrs()
2388 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE; in core_guest_get_msrs()
2404 int idx; in core_pmu_enable_all() local
2406 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in core_pmu_enable_all()
2407 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in core_pmu_enable_all()
2409 if (!test_bit(idx, cpuc->active_mask) || in core_pmu_enable_all()
2410 cpuc->events[idx]->attr.exclude_host) in core_pmu_enable_all()
2458 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in hsw_get_event_constraints() argument
2463 c = intel_get_event_constraints(cpuc, idx, event); in hsw_get_event_constraints()
3363 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) in intel_pmu_init()