Lines Matching refs:evsel

166 static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)  in perf_evsel__cpus()  argument
168 return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; in perf_evsel__cpus()
171 static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) in perf_evsel__nr_cpus() argument
173 return perf_evsel__cpus(evsel)->nr; in perf_evsel__nr_cpus()
176 static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) in perf_evsel__reset_stat_priv() argument
179 struct perf_stat *ps = evsel->priv; in perf_evsel__reset_stat_priv()
185 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) in perf_evsel__alloc_stat_priv() argument
187 evsel->priv = zalloc(sizeof(struct perf_stat)); in perf_evsel__alloc_stat_priv()
188 if (evsel->priv == NULL) in perf_evsel__alloc_stat_priv()
190 perf_evsel__reset_stat_priv(evsel); in perf_evsel__alloc_stat_priv()
194 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) in perf_evsel__free_stat_priv() argument
196 zfree(&evsel->priv); in perf_evsel__free_stat_priv()
199 static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel) in perf_evsel__alloc_prev_raw_counts() argument
204 sz = sizeof(*evsel->counts) + in perf_evsel__alloc_prev_raw_counts()
205 (perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values)); in perf_evsel__alloc_prev_raw_counts()
211 evsel->prev_raw_counts = addr; in perf_evsel__alloc_prev_raw_counts()
216 static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) in perf_evsel__free_prev_raw_counts() argument
218 zfree(&evsel->prev_raw_counts); in perf_evsel__free_prev_raw_counts()
223 struct perf_evsel *evsel; in perf_evlist__free_stats() local
225 evlist__for_each(evlist, evsel) { in perf_evlist__free_stats()
226 perf_evsel__free_stat_priv(evsel); in perf_evlist__free_stats()
227 perf_evsel__free_counts(evsel); in perf_evlist__free_stats()
228 perf_evsel__free_prev_raw_counts(evsel); in perf_evlist__free_stats()
234 struct perf_evsel *evsel; in perf_evlist__alloc_stats() local
236 evlist__for_each(evlist, evsel) { in perf_evlist__alloc_stats()
237 if (perf_evsel__alloc_stat_priv(evsel) < 0 || in perf_evlist__alloc_stats()
238 perf_evsel__alloc_counts(evsel, perf_evsel__nr_cpus(evsel)) < 0 || in perf_evlist__alloc_stats()
239 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel) < 0)) in perf_evlist__alloc_stats()
268 struct perf_evsel *evsel; in perf_stat__reset_stats() local
270 evlist__for_each(evlist, evsel) { in perf_stat__reset_stats()
271 perf_evsel__reset_stat_priv(evsel); in perf_stat__reset_stats()
272 perf_evsel__reset_counts(evsel, perf_evsel__nr_cpus(evsel)); in perf_stat__reset_stats()
294 static int create_perf_stat_counter(struct perf_evsel *evsel) in create_perf_stat_counter() argument
296 struct perf_event_attr *attr = &evsel->attr; in create_perf_stat_counter()
305 return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); in create_perf_stat_counter()
307 if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) { in create_perf_stat_counter()
313 return perf_evsel__open_per_thread(evsel, evsel_list->threads); in create_perf_stat_counter()
319 static inline int nsec_counter(struct perf_evsel *evsel) in nsec_counter() argument
321 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) || in nsec_counter()
322 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) in nsec_counter()
428 static int read_cb(struct perf_evsel *evsel, int cpu, int thread __maybe_unused, in read_cb() argument
431 struct perf_counts_values *aggr = &evsel->counts->aggr; in read_cb()
435 if (check_per_pkg(evsel, cpu, &skip)) { in read_cb()
447 if (!evsel->snapshot) in read_cb()
448 perf_evsel__compute_deltas(evsel, cpu, count); in read_cb()
450 evsel->counts->cpu[cpu] = *count; in read_cb()
452 update_shadow_stats(evsel, count->values, cpu); in read_cb()
798 static void print_noise(struct perf_evsel *evsel, double avg) in print_noise() argument
805 ps = evsel->priv; in print_noise()
809 static void aggr_printout(struct perf_evsel *evsel, int id, int nr) in aggr_printout() argument
834 perf_evsel__cpus(evsel)->map[id], csv_sep); in aggr_printout()
842 static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg) in nsec_printout() argument
851 aggr_printout(evsel, id, nr); in nsec_printout()
854 perf_evsel__name(evsel), csv_output ? "" : " (msec)"); in nsec_printout()
859 fprintf(output, "%s%s", evsel->unit, csv_sep); in nsec_printout()
861 fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep); in nsec_printout()
865 if (evsel->cgrp) in nsec_printout()
866 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); in nsec_printout()
871 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) in nsec_printout()
906 struct perf_evsel *evsel in print_stalled_cycles_frontend()
925 struct perf_evsel *evsel in print_stalled_cycles_backend()
944 struct perf_evsel *evsel __maybe_unused, in print_branch_misses()
963 struct perf_evsel *evsel __maybe_unused, in print_l1_dcache_misses()
982 struct perf_evsel *evsel __maybe_unused, in print_l1_icache_misses()
1001 struct perf_evsel *evsel __maybe_unused, in print_dtlb_cache_misses()
1020 struct perf_evsel *evsel __maybe_unused, in print_itlb_cache_misses()
1039 struct perf_evsel *evsel __maybe_unused, in print_ll_cache_misses()
1057 static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg) in abs_printout() argument
1060 double sc = evsel->scale; in abs_printout()
1073 aggr_printout(evsel, id, nr); in abs_printout()
1080 if (evsel->unit) in abs_printout()
1083 evsel->unit, csv_sep); in abs_printout()
1085 fprintf(output, "%-*s", csv_output ? 0 : 25, perf_evsel__name(evsel)); in abs_printout()
1087 if (evsel->cgrp) in abs_printout()
1088 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); in abs_printout()
1093 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { in abs_printout()
1112 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && in abs_printout()
1114 print_branch_misses(cpu, evsel, avg); in abs_printout()
1116 evsel->attr.type == PERF_TYPE_HW_CACHE && in abs_printout()
1117 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | in abs_printout()
1121 print_l1_dcache_misses(cpu, evsel, avg); in abs_printout()
1123 evsel->attr.type == PERF_TYPE_HW_CACHE && in abs_printout()
1124 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I | in abs_printout()
1128 print_l1_icache_misses(cpu, evsel, avg); in abs_printout()
1130 evsel->attr.type == PERF_TYPE_HW_CACHE && in abs_printout()
1131 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB | in abs_printout()
1135 print_dtlb_cache_misses(cpu, evsel, avg); in abs_printout()
1137 evsel->attr.type == PERF_TYPE_HW_CACHE && in abs_printout()
1138 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB | in abs_printout()
1142 print_itlb_cache_misses(cpu, evsel, avg); in abs_printout()
1144 evsel->attr.type == PERF_TYPE_HW_CACHE && in abs_printout()
1145 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL | in abs_printout()
1149 print_ll_cache_misses(cpu, evsel, avg); in abs_printout()
1150 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && in abs_printout()
1159 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { in abs_printout()
1160 print_stalled_cycles_frontend(cpu, evsel, avg); in abs_printout()
1161 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { in abs_printout()
1162 print_stalled_cycles_backend(cpu, evsel, avg); in abs_printout()
1163 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { in abs_printout()
1173 perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX))) { in abs_printout()
1180 perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX_CP))) { in abs_printout()
1190 perf_evsel__cmp(evsel, nth_evsel(T_TRANSACTION_START)) && in abs_printout()
1200 perf_evsel__cmp(evsel, nth_evsel(T_ELISION_START)) && in abs_printout()