Lines Matching refs:x86_pmu
42 struct x86_pmu x86_pmu __read_mostly;
67 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update()
117 if (!x86_pmu.extra_regs) in x86_pmu_extra_regs()
120 for (er = x86_pmu.extra_regs; er->msr; er++) { in x86_pmu_extra_regs()
146 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
151 for (i = 0; i < x86_pmu.num_counters; i++) { in reserve_pmc_hardware()
162 i = x86_pmu.num_counters; in reserve_pmc_hardware()
175 for (i = 0; i < x86_pmu.num_counters; i++) { in release_pmc_hardware()
199 for (i = 0; i < x86_pmu.num_counters; i++) { in check_hw_exists()
213 if (x86_pmu.num_counters_fixed) { in check_hw_exists()
218 for (i = 0; i < x86_pmu.num_counters_fixed; i++) { in check_hw_exists()
286 return x86_pmu.handle_irq != NULL; in x86_pmu_initialized()
360 if (atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) in x86_add_exclusive()
364 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { in x86_add_exclusive()
365 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) in x86_add_exclusive()
369 atomic_inc(&x86_pmu.lbr_exclusive[what]); in x86_add_exclusive()
379 atomic_dec(&x86_pmu.lbr_exclusive[what]); in x86_del_exclusive()
389 hwc->sample_period = x86_pmu.max_period; in x86_setup_perfctr()
400 if (attr->config >= x86_pmu.max_events) in x86_setup_perfctr()
406 config = x86_pmu.event_map(attr->config); in x86_setup_perfctr()
420 if (!x86_pmu.bts_active) in x86_setup_perfctr()
475 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { in x86_pmu_hw_config()
479 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) in x86_pmu_hw_config()
490 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { in x86_pmu_hw_config()
537 if (event->attr.sample_period && x86_pmu.limit_period) { in x86_pmu_hw_config()
538 if (x86_pmu.limit_period(event, event->attr.sample_period) > in x86_pmu_hw_config()
570 return x86_pmu.hw_config(event); in __x86_pmu_event_init()
578 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_disable_all()
605 x86_pmu.disable_all(); in x86_pmu_disable()
613 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_enable_all()
823 if (x86_pmu.start_scheduling) in x86_schedule_events()
824 x86_pmu.start_scheduling(cpuc); in x86_schedule_events()
828 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); in x86_schedule_events()
861 int gpmax = x86_pmu.num_counters; in x86_schedule_events()
895 if (x86_pmu.commit_scheduling) in x86_schedule_events()
896 x86_pmu.commit_scheduling(cpuc, i, assign[i]); in x86_schedule_events()
914 if (x86_pmu.put_event_constraints) in x86_schedule_events()
915 x86_pmu.put_event_constraints(cpuc, e); in x86_schedule_events()
919 if (x86_pmu.stop_scheduling) in x86_schedule_events()
920 x86_pmu.stop_scheduling(cpuc); in x86_schedule_events()
934 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed; in collect_events()
1065 x86_pmu.enable_all(added); in x86_pmu_enable()
1106 if (left > x86_pmu.max_period) in x86_perf_event_set_period()
1107 left = x86_pmu.max_period; in x86_perf_event_set_period()
1109 if (x86_pmu.limit_period) in x86_perf_event_set_period()
1110 left = x86_pmu.limit_period(event, left); in x86_perf_event_set_period()
1120 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1127 if (x86_pmu.perfctr_second_write) { in x86_perf_event_set_period()
1129 (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1176 ret = x86_pmu.schedule_events(cpuc, n, assign); in x86_pmu_add()
1220 x86_pmu.enable(event); in x86_pmu_start()
1232 if (!x86_pmu.num_counters) in perf_event_print_debug()
1240 if (x86_pmu.version >= 2) { in perf_event_print_debug()
1251 if (x86_pmu.pebs_constraints) { in perf_event_print_debug()
1255 if (x86_pmu.lbr_nr) { in perf_event_print_debug()
1262 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in perf_event_print_debug()
1275 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { in perf_event_print_debug()
1290 x86_pmu.disable(event); in x86_pmu_stop()
1344 if (x86_pmu.put_event_constraints) in x86_pmu_del()
1345 x86_pmu.put_event_constraints(cpuc, event); in x86_pmu_del()
1377 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_handle_irq()
1392 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) in x86_pmu_handle_irq()
1416 if (!x86_pmu.apic || !x86_pmu_initialized()) in perf_events_lapic_init()
1436 ret = x86_pmu.handle_irq(regs); in perf_event_nmi_handler()
1459 if (x86_pmu.cpu_prepare) in x86_pmu_notifier()
1460 ret = x86_pmu.cpu_prepare(cpu); in x86_pmu_notifier()
1464 if (x86_pmu.cpu_starting) in x86_pmu_notifier()
1465 x86_pmu.cpu_starting(cpu); in x86_pmu_notifier()
1476 if (x86_pmu.cpu_dying) in x86_pmu_notifier()
1477 x86_pmu.cpu_dying(cpu); in x86_pmu_notifier()
1482 if (x86_pmu.cpu_dead) in x86_pmu_notifier()
1483 x86_pmu.cpu_dead(cpu); in x86_pmu_notifier()
1498 x86_pmu.apic = 0; in pmu_check_apic()
1533 if (x86_pmu.event_map(i)) in filter_events()
1575 u64 config = x86_pmu.event_map(pmu_attr->id); in events_sysfs_show()
1581 return x86_pmu.events_sysfs_show(page, config); in events_sysfs_show()
1683 pr_cont("%s PMU driver.\n", x86_pmu.name); in init_hw_perf_events()
1685 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ in init_hw_perf_events()
1687 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) in init_hw_perf_events()
1690 if (!x86_pmu.intel_ctrl) in init_hw_perf_events()
1691 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; in init_hw_perf_events()
1697 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, in init_hw_perf_events()
1698 0, x86_pmu.num_counters, 0, 0); in init_hw_perf_events()
1700 x86_pmu_format_group.attrs = x86_pmu.format_attrs; in init_hw_perf_events()
1702 if (x86_pmu.event_attrs) in init_hw_perf_events()
1703 x86_pmu_events_group.attrs = x86_pmu.event_attrs; in init_hw_perf_events()
1705 if (!x86_pmu.events_sysfs_show) in init_hw_perf_events()
1710 if (x86_pmu.cpu_events) { in init_hw_perf_events()
1713 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events); in init_hw_perf_events()
1718 pr_info("... version: %d\n", x86_pmu.version); in init_hw_perf_events()
1719 pr_info("... bit width: %d\n", x86_pmu.cntval_bits); in init_hw_perf_events()
1720 pr_info("... generic registers: %d\n", x86_pmu.num_counters); in init_hw_perf_events()
1721 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); in init_hw_perf_events()
1722 pr_info("... max period: %016Lx\n", x86_pmu.max_period); in init_hw_perf_events()
1723 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); in init_hw_perf_events()
1724 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); in init_hw_perf_events()
1785 ret = x86_pmu.schedule_events(cpuc, n, assign); in x86_pmu_commit_txn()
1823 if (x86_pmu.extra_regs) { in allocate_fake_cpuc()
1848 c = x86_pmu.get_event_constraints(fake_cpuc, -1, event); in validate_event()
1853 if (x86_pmu.put_event_constraints) in validate_event()
1854 x86_pmu.put_event_constraints(fake_cpuc, event); in validate_event()
1898 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); in validate_group()
1942 if (ACCESS_ONCE(x86_pmu.attr_rdpmc)) in x86_pmu_event_init()
1982 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { in x86_pmu_event_idx()
1994 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); in get_attr_rdpmc()
2011 if (x86_pmu.attr_rdpmc_broken) in set_attr_rdpmc()
2014 if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) { in set_attr_rdpmc()
2027 x86_pmu.attr_rdpmc = val; in set_attr_rdpmc()
2052 if (x86_pmu.sched_task) in x86_pmu_sched_task()
2053 x86_pmu.sched_task(ctx, sched_in); in x86_pmu_sched_task()
2058 if (x86_pmu.check_microcode) in perf_check_microcode()
2059 x86_pmu.check_microcode(); in perf_check_microcode()
2098 userpg->pmc_width = x86_pmu.cntval_bits; in arch_perf_update_userpage()
2352 cap->version = x86_pmu.version; in perf_get_x86_pmu_capability()
2353 cap->num_counters_gp = x86_pmu.num_counters; in perf_get_x86_pmu_capability()
2354 cap->num_counters_fixed = x86_pmu.num_counters_fixed; in perf_get_x86_pmu_capability()
2355 cap->bit_width_gp = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2356 cap->bit_width_fixed = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2357 cap->events_mask = (unsigned int)x86_pmu.events_maskl; in perf_get_x86_pmu_capability()
2358 cap->events_mask_len = x86_pmu.events_mask_len; in perf_get_x86_pmu_capability()