Home
last modified time | relevance | path

Searched refs:x86_pmu (Results 1 – 10 of 10) sorted by relevance

/linux-4.1.27/arch/x86/kernel/cpu/
Dperf_event_intel_lbr.c176 for (i = 0; i < x86_pmu.lbr_nr; i++) in intel_pmu_lbr_reset_32()
177 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_32()
184 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_reset_64()
185 wrmsrl(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_64()
186 wrmsrl(x86_pmu.lbr_to + i, 0); in intel_pmu_lbr_reset_64()
192 if (!x86_pmu.lbr_nr) in intel_pmu_lbr_reset()
195 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) in intel_pmu_lbr_reset()
208 rdmsrl(x86_pmu.lbr_tos, tos); in intel_pmu_lbr_tos()
229 mask = x86_pmu.lbr_nr - 1; in __intel_pmu_lbr_restore()
231 for (i = 0; i < x86_pmu.lbr_nr; i++) { in __intel_pmu_lbr_restore()
[all …]
Dperf_event.c42 struct x86_pmu x86_pmu __read_mostly;
67 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update()
117 if (!x86_pmu.extra_regs) in x86_pmu_extra_regs()
120 for (er = x86_pmu.extra_regs; er->msr; er++) { in x86_pmu_extra_regs()
146 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
151 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
162 i = x86_pmu.num_counters; in reserve_pmc_hardware()
175 for (i = 0; i < x86_pmu.num_counters; i++) { in release_pmc_hardware()
199 for (i = 0; i < x86_pmu.num_counters; i++) { in check_hw_exists()
213 if (x86_pmu.num_counters_fixed) { in check_hw_exists()
[all …]
Dperf_event_intel.c1276 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); in __intel_pmu_enable_all()
1459 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) in intel_pmu_enable_fixed()
1536 if (!x86_pmu.num_counters) in intel_pmu_reset()
1543 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in intel_pmu_reset()
1547 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) in intel_pmu_reset()
1554 if (x86_pmu.version >= 2) { in intel_pmu_reset()
1560 if (x86_pmu.lbr_nr) { in intel_pmu_reset()
1586 if (!x86_pmu.late_ack) in intel_pmu_handle_irq()
1627 x86_pmu.drain_pebs(regs); in intel_pmu_handle_irq()
1679 if (x86_pmu.late_ack) in intel_pmu_handle_irq()
[all …]
Dperf_event_intel_ds.c256 if (!x86_pmu.pebs) in alloc_pebs_buffer()
267 if (x86_pmu.intel_cap.pebs_format < 2) { in alloc_pebs_buffer()
276 max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size; in alloc_pebs_buffer()
281 max * x86_pmu.pebs_record_size; in alloc_pebs_buffer()
284 thresh * x86_pmu.pebs_record_size; in alloc_pebs_buffer()
293 if (!ds || !x86_pmu.pebs) in release_pebs_buffer()
310 if (!x86_pmu.bts) in alloc_bts_buffer()
336 if (!ds || !x86_pmu.bts) in release_bts_buffer()
372 if (!x86_pmu.bts && !x86_pmu.pebs) in release_ds_buffers()
392 x86_pmu.bts_active = 0; in reserve_ds_buffers()
[all …]
Dperf_event.h492 struct x86_pmu { struct
623 __quirk.next = x86_pmu.quirks; \ argument
624 x86_pmu.quirks = &__quirk; \
652 extern struct x86_pmu x86_pmu __read_mostly;
656 return x86_pmu.lbr_sel_map && in x86_pmu_has_lbr_callstack()
657 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; in x86_pmu_has_lbr_callstack()
687 return x86_pmu.eventsel + (x86_pmu.addr_offset ? in x86_pmu_config_addr()
688 x86_pmu.addr_offset(index, true) : index); in x86_pmu_config_addr()
693 return x86_pmu.perfctr + (x86_pmu.addr_offset ? in x86_pmu_event_addr()
694 x86_pmu.addr_offset(index, false) : index); in x86_pmu_event_addr()
[all …]
Dperf_event_amd.c249 for (i = 0; i < x86_pmu.num_counters; i++) { in __amd_put_nb_event_constraints()
316 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { in __amd_get_nb_event_constraints()
359 for (i = 0; i < x86_pmu.num_counters; i++) { in amd_alloc_nb()
622 static __initconst const struct x86_pmu amd_pmu = {
661 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; in amd_core_pmu_init()
674 x86_pmu.eventsel = MSR_F15H_PERF_CTL; in amd_core_pmu_init()
675 x86_pmu.perfctr = MSR_F15H_PERF_CTR; in amd_core_pmu_init()
676 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; in amd_core_pmu_init()
690 x86_pmu = amd_pmu; in amd_pmu_init()
Dperf_event_p6.c200 static __initconst const struct x86_pmu p6_pmu = {
241 x86_pmu.attr_rdpmc_broken = 1; in p6_pmu_rdpmc_quirk()
242 x86_pmu.attr_rdpmc = 0; in p6_pmu_rdpmc_quirk()
248 x86_pmu = p6_pmu; in p6_pmu_init()
Dperf_event_knc.c287 static const struct x86_pmu knc_pmu __initconst = {
313 x86_pmu = knc_pmu; in knc_pmu_init()
Dperf_event_p4.c921 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_disable_all()
990 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_enable_all()
1009 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in p4_pmu_handle_irq()
1028 if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) in p4_pmu_handle_irq()
1301 static __initconst const struct x86_pmu p4_pmu = {
1359 x86_pmu = p4_pmu; in p4_pmu_init()
1370 for (i = 0; i < x86_pmu.num_counters; i++) { in p4_pmu_init()
Dperf_event_intel_bts.c517 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) in bts_init()