Lines Matching refs:cpuc

575 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);  in x86_pmu_disable_all()  local
581 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_disable_all()
593 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_disable() local
598 if (!cpuc->enabled) in x86_pmu_disable()
601 cpuc->n_added = 0; in x86_pmu_disable()
602 cpuc->enabled = 0; in x86_pmu_disable()
610 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_enable_all() local
614 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_enable_all()
616 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_enable_all()
813 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) in x86_schedule_events() argument
824 x86_pmu.start_scheduling(cpuc); in x86_schedule_events()
827 cpuc->event_constraint[i] = NULL; in x86_schedule_events()
828 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); in x86_schedule_events()
829 cpuc->event_constraint[i] = c; in x86_schedule_events()
839 hwc = &cpuc->event_list[i]->hw; in x86_schedule_events()
840 c = cpuc->event_constraint[i]; in x86_schedule_events()
873 if (is_ht_workaround_enabled() && !cpuc->is_fake && in x86_schedule_events()
874 READ_ONCE(cpuc->excl_cntrs->exclusive_present)) in x86_schedule_events()
877 unsched = perf_assign_events(cpuc->event_constraint, n, wmin, in x86_schedule_events()
893 e = cpuc->event_list[i]; in x86_schedule_events()
896 x86_pmu.commit_scheduling(cpuc, i, assign[i]); in x86_schedule_events()
903 e = cpuc->event_list[i]; in x86_schedule_events()
915 x86_pmu.put_event_constraints(cpuc, e); in x86_schedule_events()
920 x86_pmu.stop_scheduling(cpuc); in x86_schedule_events()
929 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) in collect_events() argument
937 n = cpuc->n_events; in collect_events()
942 cpuc->event_list[n] = leader; in collect_events()
956 cpuc->event_list[n] = event; in collect_events()
963 struct cpu_hw_events *cpuc, int i) in x86_assign_hw_event() argument
967 hwc->idx = cpuc->assign[i]; in x86_assign_hw_event()
969 hwc->last_tag = ++cpuc->tags[i]; in x86_assign_hw_event()
986 struct cpu_hw_events *cpuc, in match_prev_assignment() argument
989 return hwc->idx == cpuc->assign[i] && in match_prev_assignment()
991 hwc->last_tag == cpuc->tags[i]; in match_prev_assignment()
998 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_enable() local
1001 int i, added = cpuc->n_added; in x86_pmu_enable()
1006 if (cpuc->enabled) in x86_pmu_enable()
1009 if (cpuc->n_added) { in x86_pmu_enable()
1010 int n_running = cpuc->n_events - cpuc->n_added; in x86_pmu_enable()
1018 event = cpuc->event_list[i]; in x86_pmu_enable()
1028 match_prev_assignment(hwc, cpuc, i)) in x86_pmu_enable()
1044 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_enable()
1045 event = cpuc->event_list[i]; in x86_pmu_enable()
1048 if (!match_prev_assignment(hwc, cpuc, i)) in x86_pmu_enable()
1049 x86_assign_hw_event(event, cpuc, i); in x86_pmu_enable()
1058 cpuc->n_added = 0; in x86_pmu_enable()
1062 cpuc->enabled = 1; in x86_pmu_enable()
1152 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_add() local
1159 n0 = cpuc->n_events; in x86_pmu_add()
1160 ret = n = collect_events(cpuc, event, false); in x86_pmu_add()
1173 if (cpuc->group_flag & PERF_EVENT_TXN) in x86_pmu_add()
1176 ret = x86_pmu.schedule_events(cpuc, n, assign); in x86_pmu_add()
1183 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_add()
1190 cpuc->n_events = n; in x86_pmu_add()
1191 cpuc->n_added += n - n0; in x86_pmu_add()
1192 cpuc->n_txn += n - n0; in x86_pmu_add()
1201 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_start() local
1217 cpuc->events[idx] = event; in x86_pmu_start()
1218 __set_bit(idx, cpuc->active_mask); in x86_pmu_start()
1219 __set_bit(idx, cpuc->running); in x86_pmu_start()
1228 struct cpu_hw_events *cpuc; in perf_event_print_debug() local
1238 cpuc = &per_cpu(cpu_hw_events, cpu); in perf_event_print_debug()
1260 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); in perf_event_print_debug()
1286 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_stop() local
1289 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { in x86_pmu_stop()
1291 cpuc->events[hwc->idx] = NULL; in x86_pmu_stop()
1308 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_del() local
1324 if (cpuc->group_flag & PERF_EVENT_TXN) in x86_pmu_del()
1332 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_del()
1333 if (event == cpuc->event_list[i]) in x86_pmu_del()
1337 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */ in x86_pmu_del()
1341 if (i >= cpuc->n_events - cpuc->n_added) in x86_pmu_del()
1342 --cpuc->n_added; in x86_pmu_del()
1345 x86_pmu.put_event_constraints(cpuc, event); in x86_pmu_del()
1348 while (++i < cpuc->n_events) { in x86_pmu_del()
1349 cpuc->event_list[i-1] = cpuc->event_list[i]; in x86_pmu_del()
1350 cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; in x86_pmu_del()
1352 --cpuc->n_events; in x86_pmu_del()
1360 struct cpu_hw_events *cpuc; in x86_pmu_handle_irq() local
1365 cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_handle_irq()
1378 if (!test_bit(idx, cpuc->active_mask)) { in x86_pmu_handle_irq()
1384 if (__test_and_clear_bit(idx, cpuc->running)) in x86_pmu_handle_irq()
1389 event = cpuc->events[idx]; in x86_pmu_handle_irq()
1452 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in x86_pmu_notifier() local
1458 cpuc->kfree_on_online[i] = NULL; in x86_pmu_notifier()
1470 kfree(cpuc->kfree_on_online[i]); in x86_pmu_notifier()
1471 cpuc->kfree_on_online[i] = NULL; in x86_pmu_notifier()
1776 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_commit_txn() local
1780 n = cpuc->n_events; in x86_pmu_commit_txn()
1785 ret = x86_pmu.schedule_events(cpuc, n, assign); in x86_pmu_commit_txn()
1793 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_commit_txn()
1795 cpuc->group_flag &= ~PERF_EVENT_TXN; in x86_pmu_commit_txn()
1807 static void free_fake_cpuc(struct cpu_hw_events *cpuc) in free_fake_cpuc() argument
1809 kfree(cpuc->shared_regs); in free_fake_cpuc()
1810 kfree(cpuc); in free_fake_cpuc()
1815 struct cpu_hw_events *cpuc; in allocate_fake_cpuc() local
1818 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); in allocate_fake_cpuc()
1819 if (!cpuc) in allocate_fake_cpuc()
1824 cpuc->shared_regs = allocate_shared_regs(cpu); in allocate_fake_cpuc()
1825 if (!cpuc->shared_regs) in allocate_fake_cpuc()
1828 cpuc->is_fake = 1; in allocate_fake_cpuc()
1829 return cpuc; in allocate_fake_cpuc()
1831 free_fake_cpuc(cpuc); in allocate_fake_cpuc()