nr_events        1852 block/genhd.c  	int nr_events = 0, i;
nr_events        1880 block/genhd.c  			envp[nr_events++] = disk_uevents[i];
nr_events        1882 block/genhd.c  	if (nr_events)
nr_events         146 drivers/firmware/efi/libstub/tpm.c 	if (final_events_table && final_events_table->nr_events) {
nr_events         151 drivers/firmware/efi/libstub/tpm.c 		int i = final_events_table->nr_events;
nr_events         155 drivers/firmware/efi/libstub/tpm.c 			sizeof(final_events_table->nr_events);
nr_events          82 drivers/firmware/efi/tpm.c 	if (final_tbl->nr_events != 0) {
nr_events          85 drivers/firmware/efi/tpm.c 				+ sizeof(final_tbl->nr_events);
nr_events          88 drivers/firmware/efi/tpm.c 						    final_tbl->nr_events,
nr_events        1079 drivers/gpu/drm/etnaviv/etnaviv_gpu.c static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
nr_events        1085 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	for (i = 0; i < nr_events; i++) {
nr_events        1101 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	for (i = 0; i < nr_events; i++) {
nr_events        1267 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	unsigned int i, nr_events = 1, event[3];
nr_events        1285 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		nr_events = 3;
nr_events        1287 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	ret = event_alloc(gpu, nr_events, event);
nr_events        1297 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		for (i = 0; i < nr_events; i++)
nr_events         317 drivers/pci/controller/pcie-iproc-msi.c 	u32 eq, head, tail, nr_events;
nr_events         347 drivers/pci/controller/pcie-iproc-msi.c 		nr_events = (tail < head) ?
nr_events         349 drivers/pci/controller/pcie-iproc-msi.c 		if (!nr_events)
nr_events         353 drivers/pci/controller/pcie-iproc-msi.c 		while (nr_events--) {
nr_events          18 drivers/s390/net/fsm.c 		int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order)
nr_events          41 drivers/s390/net/fsm.c 	f->nr_events = nr_events;
nr_events          47 drivers/s390/net/fsm.c 	m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order);
nr_events          58 drivers/s390/net/fsm.c 		    (tmpl[i].cond_event >= nr_events)   ) {
nr_events          62 drivers/s390/net/fsm.c 				(long)tmpl[i].cond_event, (long)f->nr_events);
nr_events          45 drivers/s390/net/fsm.h 	int nr_events;
nr_events         112 drivers/s390/net/fsm.h 	 int nr_states, int nr_events, const fsm_node *tmpl,
nr_events         150 drivers/s390/net/fsm.h 	    (event >= fi->f->nr_events)       ) {
nr_events         153 drivers/s390/net/fsm.h 			(long)fi->f->nr_events);
nr_events         122 fs/aio.c       	unsigned		nr_events;
nr_events         461 fs/aio.c       static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
nr_events         471 fs/aio.c       	nr_events += 2;	/* 1 is required, 2 for good luck */
nr_events         474 fs/aio.c       	size += sizeof(struct io_event) * nr_events;
nr_events         487 fs/aio.c       	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
nr_events         542 fs/aio.c       	ctx->nr_events = nr_events; /* trusted copy */
nr_events         545 fs/aio.c       	ring->nr = nr_events;	/* user copy */
nr_events         702 fs/aio.c       static struct kioctx *ioctx_alloc(unsigned nr_events)
nr_events         712 fs/aio.c       	unsigned int max_reqs = nr_events;
nr_events         723 fs/aio.c       	nr_events = max(nr_events, num_possible_cpus() * 4);
nr_events         724 fs/aio.c       	nr_events *= 2;
nr_events         727 fs/aio.c       	if (nr_events > (0x10000000U / sizeof(struct io_event))) {
nr_events         732 fs/aio.c       	if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
nr_events         761 fs/aio.c       	err = aio_setup_ring(ctx, nr_events);
nr_events         765 fs/aio.c       	atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
nr_events         766 fs/aio.c       	ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
nr_events         792 fs/aio.c       		 ctx, ctx->user_id, mm, ctx->nr_events);
nr_events         963 fs/aio.c       	head %= ctx->nr_events;
nr_events         967 fs/aio.c       		events_in_ring = ctx->nr_events - (head - tail);
nr_events        1107 fs/aio.c       	if (++tail >= ctx->nr_events)
nr_events        1203 fs/aio.c       	pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
nr_events        1208 fs/aio.c       	head %= ctx->nr_events;
nr_events        1209 fs/aio.c       	tail %= ctx->nr_events;
nr_events        1216 fs/aio.c       		avail = (head <= tail ?  tail : ctx->nr_events) - head;
nr_events        1239 fs/aio.c       		head %= ctx->nr_events;
nr_events        1313 fs/aio.c       SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
nr_events        1324 fs/aio.c       	if (unlikely(ctx || nr_events == 0)) {
nr_events        1326 fs/aio.c       		         ctx, nr_events);
nr_events        1330 fs/aio.c       	ioctx = ioctx_alloc(nr_events);
nr_events        1344 fs/aio.c       COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
nr_events        1355 fs/aio.c       	if (unlikely(ctx || nr_events == 0)) {
nr_events        1357 fs/aio.c       		         ctx, nr_events);
nr_events        1361 fs/aio.c       	ioctx = ioctx_alloc(nr_events);
nr_events        1932 fs/aio.c       	if (nr > ctx->nr_events)
nr_events        1933 fs/aio.c       		nr = ctx->nr_events;
nr_events        1974 fs/aio.c       	if (nr > ctx->nr_events)
nr_events        1975 fs/aio.c       		nr = ctx->nr_events;
nr_events         765 fs/io_uring.c  static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
nr_events         778 fs/io_uring.c  		(*nr_events)++;
nr_events         801 fs/io_uring.c  static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
nr_events         813 fs/io_uring.c  	spin = !ctx->poll_multi_file && *nr_events < min;
nr_events         841 fs/io_uring.c  		io_iopoll_complete(ctx, nr_events, &done);
nr_events         851 fs/io_uring.c  static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
nr_events         857 fs/io_uring.c  		ret = io_do_iopoll(ctx, nr_events, min);
nr_events         860 fs/io_uring.c  		if (!min || *nr_events >= min)
nr_events         878 fs/io_uring.c  		unsigned int nr_events = 0;
nr_events         880 fs/io_uring.c  		io_iopoll_getevents(ctx, &nr_events, 1);
nr_events         891 fs/io_uring.c  static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
nr_events         928 fs/io_uring.c  		if (*nr_events < min)
nr_events         929 fs/io_uring.c  			tmin = min - *nr_events;
nr_events         931 fs/io_uring.c  		ret = io_iopoll_getevents(ctx, nr_events, tmin);
nr_events         935 fs/io_uring.c  	} while (min && !*nr_events && !need_resched());
nr_events        2765 fs/io_uring.c  			unsigned nr_events = 0;
nr_events        2778 fs/io_uring.c  					io_iopoll_getevents(ctx, &nr_events, 0);
nr_events        2787 fs/io_uring.c  				nr_events = inflight;
nr_events        2790 fs/io_uring.c  			inflight -= nr_events;
nr_events        3775 fs/io_uring.c  		unsigned nr_events = 0;
nr_events        3780 fs/io_uring.c  			ret = io_iopoll_check(ctx, &nr_events, min_complete);
nr_events        1728 include/linux/efi.h 	u64 nr_events;
nr_events         223 include/linux/hrtimer.h 	unsigned int			nr_events;
nr_events         761 include/linux/perf_event.h 	int				nr_events;
nr_events        1692 kernel/events/core.c 	ctx->nr_events++;
nr_events        1873 kernel/events/core.c 	ctx->nr_events--;
nr_events        2172 kernel/events/core.c 	if (!ctx->nr_events && ctx->is_active) {
nr_events        3009 kernel/events/core.c 	if (likely(!ctx->nr_events)) {
nr_events        3476 kernel/events/core.c 	if (likely(!ctx->nr_events))
nr_events        3531 kernel/events/core.c 	if (!ctx->nr_events)
nr_events        3900 kernel/events/core.c 	if (!ctx || !ctx->nr_events)
nr_events        1619 kernel/time/hrtimer.c 	cpu_base->nr_events++;
nr_events         150 kernel/time/timer_list.c 	P(nr_events);
nr_events        1367 kernel/trace/trace.h 	int				nr_events;
nr_events         699 kernel/trace/trace_events.c 	if (!--dir->nr_events) {
nr_events        1407 kernel/trace/trace_events.c 				if (dir->nr_events) {
nr_events        1885 kernel/trace/trace_events.c 			dir->nr_events++;
nr_events        1920 kernel/trace/trace_events.c 	dir->nr_events = 1;
nr_events        1859 kernel/trace/trace_events_filter.c 	if (!dir->nr_events) {
nr_events          21 tools/lib/traceevent/event-parse-api.c 	if (tep && tep->events && index < tep->nr_events)
nr_events          49 tools/lib/traceevent/event-parse-api.c 		return tep->nr_events;
nr_events          52 tools/lib/traceevent/event-parse-local.h 	int nr_events;
nr_events         821 tools/lib/traceevent/event-parse.c 					    (tep->nr_events + 1));
nr_events         827 tools/lib/traceevent/event-parse.c 	for (i = 0; i < tep->nr_events; i++) {
nr_events         831 tools/lib/traceevent/event-parse.c 	if (i < tep->nr_events)
nr_events         834 tools/lib/traceevent/event-parse.c 			sizeof(event) * (tep->nr_events - i));
nr_events         837 tools/lib/traceevent/event-parse.c 	tep->nr_events++;
nr_events        3550 tools/lib/traceevent/event-parse.c 	eventptr = bsearch(&pkey, tep->events, tep->nr_events,
nr_events        3582 tools/lib/traceevent/event-parse.c 	for (i = 0; i < tep->nr_events; i++) {
nr_events        3591 tools/lib/traceevent/event-parse.c 	if (i == tep->nr_events)
nr_events        5755 tools/lib/traceevent/event-parse.c 	events = malloc(sizeof(*events) * (tep->nr_events + 1));
nr_events        5759 tools/lib/traceevent/event-parse.c 	memcpy(events, tep->events, sizeof(*events) * tep->nr_events);
nr_events        5760 tools/lib/traceevent/event-parse.c 	events[tep->nr_events] = NULL;
nr_events        5764 tools/lib/traceevent/event-parse.c static void list_events_sort(struct tep_event **events, int nr_events,
nr_events        5784 tools/lib/traceevent/event-parse.c 		qsort(events, nr_events, sizeof(*events), sort);
nr_events        5823 tools/lib/traceevent/event-parse.c 	list_events_sort(events, tep->nr_events, sort_type);
nr_events        5855 tools/lib/traceevent/event-parse.c 	list_events_sort(events, tep->nr_events, sort_type);
nr_events        7053 tools/lib/traceevent/event-parse.c 	for (i = 0; i < tep->nr_events; i++)
nr_events         302 tools/lib/traceevent/parse-filter.c 	for (i = 0; i < tep->nr_events; i++) {
nr_events         430 tools/perf/builtin-annotate.c 		u32 nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
nr_events         421 tools/perf/builtin-report.c 	unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
nr_events         422 tools/perf/builtin-report.c 	u64 nr_events = hists->stats.total_period;
nr_events         433 tools/perf/builtin-report.c 		nr_events = hists->stats.total_non_filtered_period;
nr_events         447 tools/perf/builtin-report.c 				nr_events += pos_hists->stats.total_non_filtered_period;
nr_events         449 tools/perf/builtin-report.c 				nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
nr_events         450 tools/perf/builtin-report.c 				nr_events += pos_hists->stats.total_period;
nr_events         471 tools/perf/builtin-report.c 		ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
nr_events         474 tools/perf/builtin-report.c 		ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
nr_events          61 tools/perf/builtin-sched.c 	unsigned long		nr_events;
nr_events         207 tools/perf/builtin-sched.c 	unsigned long	 nr_events;
nr_events         358 tools/perf/builtin-sched.c 	unsigned long idx = task->nr_events;
nr_events         364 tools/perf/builtin-sched.c 	task->nr_events++;
nr_events         365 tools/perf/builtin-sched.c 	size = sizeof(struct sched_atom *) * task->nr_events;
nr_events         376 tools/perf/builtin-sched.c 	if (!task->nr_events)
nr_events         379 tools/perf/builtin-sched.c 	return task->atoms[task->nr_events - 1];
nr_events         496 tools/perf/builtin-sched.c 			task->nr, task->comm, task->pid, task->nr_events);
nr_events         641 tools/perf/builtin-sched.c 	for (i = 0; i < this_task->nr_events; i++) {
nr_events        1819 tools/perf/builtin-sched.c 		sched->nr_events      = session->evlist->stats.nr_events[0];
nr_events        1821 tools/perf/builtin-sched.c 		sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
nr_events        3046 tools/perf/builtin-sched.c 	sched->nr_events      = evlist->stats.nr_events[0];
nr_events        3048 tools/perf/builtin-sched.c 	sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
nr_events        3068 tools/perf/builtin-sched.c 	if (sched->nr_lost_events && sched->nr_events) {
nr_events        3070 tools/perf/builtin-sched.c 			(double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
nr_events        3071 tools/perf/builtin-sched.c 			sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
nr_events         330 tools/perf/builtin-top.c 	    hists->stats.nr_events[PERF_RECORD_LOST])) {
nr_events         332 tools/perf/builtin-top.c 			      hists->stats.nr_events[PERF_RECORD_LOST];
nr_events        1090 tools/perf/builtin-top.c 		if (!in->nr_events) {
nr_events         117 tools/perf/builtin-trace.c 	unsigned long		nr_events;
nr_events        1051 tools/perf/builtin-trace.c 	unsigned long	  nr_events;
nr_events        1097 tools/perf/builtin-trace.c 	++ttrace->nr_events;
nr_events        2561 tools/perf/builtin-trace.c 		++trace->nr_events;
nr_events        3446 tools/perf/builtin-trace.c 	before = trace->nr_events;
nr_events        3457 tools/perf/builtin-trace.c 			++trace->nr_events;
nr_events        3476 tools/perf/builtin-trace.c 	if (trace->nr_events == before) {
nr_events        3733 tools/perf/builtin-trace.c 	ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
nr_events        3736 tools/perf/builtin-trace.c 	printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
nr_events        3754 tools/perf/builtin-trace.c 	return ttrace ? ttrace->nr_events : 0;
nr_events         153 tools/perf/tests/hists_filter.c 				hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
nr_events         159 tools/perf/tests/hists_filter.c 				hists->stats.nr_events[PERF_RECORD_SAMPLE] ==
nr_events         178 tools/perf/tests/hists_filter.c 				hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
nr_events         207 tools/perf/tests/hists_filter.c 				hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
nr_events         242 tools/perf/tests/hists_filter.c 				hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
nr_events         271 tools/perf/tests/hists_filter.c 				hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
nr_events         302 tools/perf/tests/hists_filter.c 				hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10);
nr_events          42 tools/perf/tests/mmap-basic.c 	unsigned int nr_events[nsyscalls],
nr_events          99 tools/perf/tests/mmap-basic.c 		nr_events[i] = 0;
nr_events         141 tools/perf/tests/mmap-basic.c 		nr_events[evsel->idx]++;
nr_events         149 tools/perf/tests/mmap-basic.c 		if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
nr_events         152 tools/perf/tests/mmap-basic.c 				 perf_evsel__name(evsel), nr_events[evsel->idx]);
nr_events          40 tools/perf/tests/openat-syscall-tp-fields.c 	int err = -1, i, nr_events = 0, nr_polls = 0;
nr_events          88 tools/perf/tests/openat-syscall-tp-fields.c 		int before = nr_events;
nr_events         103 tools/perf/tests/openat-syscall-tp-fields.c 				++nr_events;
nr_events         129 tools/perf/tests/openat-syscall-tp-fields.c 		if (nr_events == before)
nr_events          69 tools/perf/tests/perf-record.c 	int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
nr_events         182 tools/perf/tests/perf-record.c 					nr_events[type]++;
nr_events         300 tools/perf/tests/perf-record.c 	if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
nr_events         305 tools/perf/tests/perf-record.c 	if (nr_events[PERF_RECORD_COMM] == 0) {
nr_events         663 tools/perf/ui/browsers/hists.c 			    browser->hists->stats.nr_events[PERF_RECORD_LOST])) {
nr_events         665 tools/perf/ui/browsers/hists.c 					browser->hists->stats.nr_events[PERF_RECORD_LOST];
nr_events        2823 tools/perf/ui/browsers/hists.c static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
nr_events        2925 tools/perf/ui/browsers/hists.c 			if (nr_events == 1)
nr_events        3228 tools/perf/ui/browsers/hists.c 	unsigned long nr_events = hists->stats.nr_events[PERF_RECORD_SAMPLE];
nr_events        3244 tools/perf/ui/browsers/hists.c 			nr_events += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
nr_events        3248 tools/perf/ui/browsers/hists.c 	nr_events = convert_unit(nr_events, &unit);
nr_events        3249 tools/perf/ui/browsers/hists.c 	printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
nr_events        3253 tools/perf/ui/browsers/hists.c 	nr_events = hists->stats.nr_events[PERF_RECORD_LOST];
nr_events        3254 tools/perf/ui/browsers/hists.c 	if (nr_events != 0) {
nr_events        3258 tools/perf/ui/browsers/hists.c 		nr_events = convert_unit(nr_events, &unit);
nr_events        3260 tools/perf/ui/browsers/hists.c 				     nr_events, unit, unit == ' ' ? "" : " ");
nr_events        3271 tools/perf/ui/browsers/hists.c 				int nr_events, const char *help,
nr_events        3313 tools/perf/ui/browsers/hists.c 			key = perf_evsel__hists_browse(pos, nr_events, help,
nr_events         375 tools/perf/ui/hist.c HPP_RAW_FNS(samples, nr_events)
nr_events         885 tools/perf/ui/stdio/hist.c 		ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);
nr_events        2748 tools/perf/util/annotate.c 	for (i = 0; i < notes->nr_events; i++) {
nr_events        2836 tools/perf/util/annotate.c 		for (i = 0; i < notes->nr_events; i++) {
nr_events        3008 tools/perf/util/annotate.c 	notes->nr_events = nr_pcnt;
nr_events         280 tools/perf/util/annotate.h 	int			nr_events;
nr_events         308 tools/perf/util/annotate.h 	return (notes->options->show_total_period ? 12 : 7) * notes->nr_events;
nr_events          36 tools/perf/util/events_stats.h 	u32 nr_events[PERF_RECORD_HEADER_MAX];
nr_events         275 tools/perf/util/hist.c 	he_stat->nr_events	+= 1;
nr_events         285 tools/perf/util/hist.c 	dest->nr_events		+= src->nr_events;
nr_events         292 tools/perf/util/hist.c 	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
nr_events         702 tools/perf/util/hist.c 			.nr_events = 1,
nr_events        2033 tools/perf/util/hist.c 	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
nr_events        2260 tools/perf/util/hist.c 	++stats->nr_events[0];
nr_events        2261 tools/perf/util/hist.c 	++stats->nr_events[type];
nr_events        2632 tools/perf/util/hist.c 	unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
nr_events        2633 tools/perf/util/hist.c 	u64 nr_events = hists->stats.total_period;
nr_events        2643 tools/perf/util/hist.c 		nr_events = hists->stats.total_non_filtered_period;
nr_events        2657 tools/perf/util/hist.c 				nr_events += pos_hists->stats.total_non_filtered_period;
nr_events        2659 tools/perf/util/hist.c 				nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
nr_events        2660 tools/perf/util/hist.c 				nr_events += pos_hists->stats.total_period;
nr_events        2676 tools/perf/util/hist.c 			   ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
nr_events          24 tools/perf/util/ordered-events.c 	++oe->nr_events;
nr_events          27 tools/perf/util/ordered-events.c 	pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
nr_events         189 tools/perf/util/ordered-events.c 	oe->nr_events--;
nr_events         236 tools/perf/util/ordered-events.c 		ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
nr_events         280 tools/perf/util/ordered-events.c 	if (oe->nr_events == 0)
nr_events         320 tools/perf/util/ordered-events.c 		   str[how], oe->nr_events);
nr_events         333 tools/perf/util/ordered-events.c 		   str[how], oe->nr_events);
nr_events          48 tools/perf/util/ordered-events.h 	unsigned int			 nr_events;
nr_events         542 tools/perf/util/scripting-engines/trace-event-perl.c 	int i, not_first, count, nr_events;
nr_events         607 tools/perf/util/scripting-engines/trace-event-perl.c 	nr_events = tep_get_events_count(pevent);
nr_events         610 tools/perf/util/scripting-engines/trace-event-perl.c 	for (i = 0; all_events && i < nr_events; i++) {
nr_events        1690 tools/perf/util/scripting-engines/trace-event-python.c 	int i, not_first, count, nr_events;
nr_events        1739 tools/perf/util/scripting-engines/trace-event-python.c 	nr_events = tep_get_events_count(pevent);
nr_events        1742 tools/perf/util/scripting-engines/trace-event-python.c 	for (i = 0; all_events && i < nr_events; i++) {
nr_events        1748 tools/perf/util/session.c 	    stats->nr_events[PERF_RECORD_LOST] != 0) {
nr_events        1751 tools/perf/util/session.c 			    stats->nr_events[0],
nr_events        1752 tools/perf/util/session.c 			    stats->nr_events[PERF_RECORD_LOST]);
nr_events        1759 tools/perf/util/session.c 			    (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
nr_events        1762 tools/perf/util/session.c 				    stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
nr_events        1771 tools/perf/util/session.c 			    stats->nr_events[PERF_RECORD_AUX]);
nr_events        1784 tools/perf/util/session.c 			    stats->nr_events[PERF_RECORD_AUX],
nr_events        1810 tools/perf/util/session.c 			    stats->nr_events[PERF_RECORD_SAMPLE]);
nr_events        1299 tools/perf/util/sort.c 	return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
nr_events          51 tools/perf/util/sort.h 	u32			nr_events;