1#include <linux/types.h>
2#include <sys/mman.h>
3#include "event.h"
4#include "debug.h"
5#include "hist.h"
6#include "machine.h"
7#include "sort.h"
8#include "string.h"
9#include "strlist.h"
10#include "thread.h"
11#include "thread_map.h"
12#include "symbol/kallsyms.h"
13
14static const char *perf_event__names[] = {
15	[0]					= "TOTAL",
16	[PERF_RECORD_MMAP]			= "MMAP",
17	[PERF_RECORD_MMAP2]			= "MMAP2",
18	[PERF_RECORD_LOST]			= "LOST",
19	[PERF_RECORD_COMM]			= "COMM",
20	[PERF_RECORD_EXIT]			= "EXIT",
21	[PERF_RECORD_THROTTLE]			= "THROTTLE",
22	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
23	[PERF_RECORD_FORK]			= "FORK",
24	[PERF_RECORD_READ]			= "READ",
25	[PERF_RECORD_SAMPLE]			= "SAMPLE",
26	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
27	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
28	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
29	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
30	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
31	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
32};
33
34const char *perf_event__name(unsigned int id)
35{
36	if (id >= ARRAY_SIZE(perf_event__names))
37		return "INVALID";
38	if (!perf_event__names[id])
39		return "UNKNOWN";
40	return perf_event__names[id];
41}
42
43static struct perf_sample synth_sample = {
44	.pid	   = -1,
45	.tid	   = -1,
46	.time	   = -1,
47	.stream_id = -1,
48	.cpu	   = -1,
49	.period	   = 1,
50};
51
52/*
53 * Assumes that the first 4095 bytes of /proc/pid/stat contains
54 * the comm, tgid and ppid.
55 */
56static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
57				    pid_t *tgid, pid_t *ppid)
58{
59	char filename[PATH_MAX];
60	char bf[4096];
61	int fd;
62	size_t size = 0, n;
63	char *nl, *name, *tgids, *ppids;
64
65	*tgid = -1;
66	*ppid = -1;
67
68	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
69
70	fd = open(filename, O_RDONLY);
71	if (fd < 0) {
72		pr_debug("couldn't open %s\n", filename);
73		return -1;
74	}
75
76	n = read(fd, bf, sizeof(bf) - 1);
77	close(fd);
78	if (n <= 0) {
79		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
80			   pid);
81		return -1;
82	}
83	bf[n] = '\0';
84
85	name = strstr(bf, "Name:");
86	tgids = strstr(bf, "Tgid:");
87	ppids = strstr(bf, "PPid:");
88
89	if (name) {
90		name += 5;  /* strlen("Name:") */
91
92		while (*name && isspace(*name))
93			++name;
94
95		nl = strchr(name, '\n');
96		if (nl)
97			*nl = '\0';
98
99		size = strlen(name);
100		if (size >= len)
101			size = len - 1;
102		memcpy(comm, name, size);
103		comm[size] = '\0';
104	} else {
105		pr_debug("Name: string not found for pid %d\n", pid);
106	}
107
108	if (tgids) {
109		tgids += 5;  /* strlen("Tgid:") */
110		*tgid = atoi(tgids);
111	} else {
112		pr_debug("Tgid: string not found for pid %d\n", pid);
113	}
114
115	if (ppids) {
116		ppids += 5;  /* strlen("PPid:") */
117		*ppid = atoi(ppids);
118	} else {
119		pr_debug("PPid: string not found for pid %d\n", pid);
120	}
121
122	return 0;
123}
124
125static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
126				    struct machine *machine,
127				    pid_t *tgid, pid_t *ppid)
128{
129	size_t size;
130
131	*ppid = -1;
132
133	memset(&event->comm, 0, sizeof(event->comm));
134
135	if (machine__is_host(machine)) {
136		if (perf_event__get_comm_ids(pid, event->comm.comm,
137					     sizeof(event->comm.comm),
138					     tgid, ppid) != 0) {
139			return -1;
140		}
141	} else {
142		*tgid = machine->pid;
143	}
144
145	if (*tgid < 0)
146		return -1;
147
148	event->comm.pid = *tgid;
149	event->comm.header.type = PERF_RECORD_COMM;
150
151	size = strlen(event->comm.comm) + 1;
152	size = PERF_ALIGN(size, sizeof(u64));
153	memset(event->comm.comm + size, 0, machine->id_hdr_size);
154	event->comm.header.size = (sizeof(event->comm) -
155				(sizeof(event->comm.comm) - size) +
156				machine->id_hdr_size);
157	event->comm.tid = pid;
158
159	return 0;
160}
161
162static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
163					 union perf_event *event, pid_t pid,
164					 perf_event__handler_t process,
165					 struct machine *machine)
166{
167	pid_t tgid, ppid;
168
169	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
170		return -1;
171
172	if (process(tool, event, &synth_sample, machine) != 0)
173		return -1;
174
175	return tgid;
176}
177
178static int perf_event__synthesize_fork(struct perf_tool *tool,
179				       union perf_event *event,
180				       pid_t pid, pid_t tgid, pid_t ppid,
181				       perf_event__handler_t process,
182				       struct machine *machine)
183{
184	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
185
186	/*
187	 * for main thread set parent to ppid from status file. For other
188	 * threads set parent pid to main thread. ie., assume main thread
189	 * spawns all threads in a process
190	*/
191	if (tgid == pid) {
192		event->fork.ppid = ppid;
193		event->fork.ptid = ppid;
194	} else {
195		event->fork.ppid = tgid;
196		event->fork.ptid = tgid;
197	}
198	event->fork.pid  = tgid;
199	event->fork.tid  = pid;
200	event->fork.header.type = PERF_RECORD_FORK;
201
202	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
203
204	if (process(tool, event, &synth_sample, machine) != 0)
205		return -1;
206
207	return 0;
208}
209
210int perf_event__synthesize_mmap_events(struct perf_tool *tool,
211				       union perf_event *event,
212				       pid_t pid, pid_t tgid,
213				       perf_event__handler_t process,
214				       struct machine *machine,
215				       bool mmap_data)
216{
217	char filename[PATH_MAX];
218	FILE *fp;
219	int rc = 0;
220
221	if (machine__is_default_guest(machine))
222		return 0;
223
224	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
225		 machine->root_dir, pid);
226
227	fp = fopen(filename, "r");
228	if (fp == NULL) {
229		/*
230		 * We raced with a task exiting - just return:
231		 */
232		pr_debug("couldn't open %s\n", filename);
233		return -1;
234	}
235
236	event->header.type = PERF_RECORD_MMAP2;
237
238	while (1) {
239		char bf[BUFSIZ];
240		char prot[5];
241		char execname[PATH_MAX];
242		char anonstr[] = "//anon";
243		unsigned int ino;
244		size_t size;
245		ssize_t n;
246
247		if (fgets(bf, sizeof(bf), fp) == NULL)
248			break;
249
250		/* ensure null termination since stack will be reused. */
251		strcpy(execname, "");
252
253		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
254		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
255		       &event->mmap2.start, &event->mmap2.len, prot,
256		       &event->mmap2.pgoff, &event->mmap2.maj,
257		       &event->mmap2.min,
258		       &ino, execname);
259
260		/*
261 		 * Anon maps don't have the execname.
262 		 */
263		if (n < 7)
264			continue;
265
266		event->mmap2.ino = (u64)ino;
267
268		/*
269		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
270		 */
271		if (machine__is_host(machine))
272			event->header.misc = PERF_RECORD_MISC_USER;
273		else
274			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
275
276		/* map protection and flags bits */
277		event->mmap2.prot = 0;
278		event->mmap2.flags = 0;
279		if (prot[0] == 'r')
280			event->mmap2.prot |= PROT_READ;
281		if (prot[1] == 'w')
282			event->mmap2.prot |= PROT_WRITE;
283		if (prot[2] == 'x')
284			event->mmap2.prot |= PROT_EXEC;
285
286		if (prot[3] == 's')
287			event->mmap2.flags |= MAP_SHARED;
288		else
289			event->mmap2.flags |= MAP_PRIVATE;
290
291		if (prot[2] != 'x') {
292			if (!mmap_data || prot[0] != 'r')
293				continue;
294
295			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
296		}
297
298		if (!strcmp(execname, ""))
299			strcpy(execname, anonstr);
300
301		size = strlen(execname) + 1;
302		memcpy(event->mmap2.filename, execname, size);
303		size = PERF_ALIGN(size, sizeof(u64));
304		event->mmap2.len -= event->mmap.start;
305		event->mmap2.header.size = (sizeof(event->mmap2) -
306					(sizeof(event->mmap2.filename) - size));
307		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
308		event->mmap2.header.size += machine->id_hdr_size;
309		event->mmap2.pid = tgid;
310		event->mmap2.tid = pid;
311
312		if (process(tool, event, &synth_sample, machine) != 0) {
313			rc = -1;
314			break;
315		}
316	}
317
318	fclose(fp);
319	return rc;
320}
321
322int perf_event__synthesize_modules(struct perf_tool *tool,
323				   perf_event__handler_t process,
324				   struct machine *machine)
325{
326	int rc = 0;
327	struct rb_node *nd;
328	struct map_groups *kmaps = &machine->kmaps;
329	union perf_event *event = zalloc((sizeof(event->mmap) +
330					  machine->id_hdr_size));
331	if (event == NULL) {
332		pr_debug("Not enough memory synthesizing mmap event "
333			 "for kernel modules\n");
334		return -1;
335	}
336
337	event->header.type = PERF_RECORD_MMAP;
338
339	/*
340	 * kernel uses 0 for user space maps, see kernel/perf_event.c
341	 * __perf_event_mmap
342	 */
343	if (machine__is_host(machine))
344		event->header.misc = PERF_RECORD_MISC_KERNEL;
345	else
346		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
347
348	for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
349	     nd; nd = rb_next(nd)) {
350		size_t size;
351		struct map *pos = rb_entry(nd, struct map, rb_node);
352
353		if (pos->dso->kernel)
354			continue;
355
356		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
357		event->mmap.header.type = PERF_RECORD_MMAP;
358		event->mmap.header.size = (sizeof(event->mmap) -
359				        (sizeof(event->mmap.filename) - size));
360		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
361		event->mmap.header.size += machine->id_hdr_size;
362		event->mmap.start = pos->start;
363		event->mmap.len   = pos->end - pos->start;
364		event->mmap.pid   = machine->pid;
365
366		memcpy(event->mmap.filename, pos->dso->long_name,
367		       pos->dso->long_name_len + 1);
368		if (process(tool, event, &synth_sample, machine) != 0) {
369			rc = -1;
370			break;
371		}
372	}
373
374	free(event);
375	return rc;
376}
377
378static int __event__synthesize_thread(union perf_event *comm_event,
379				      union perf_event *mmap_event,
380				      union perf_event *fork_event,
381				      pid_t pid, int full,
382					  perf_event__handler_t process,
383				      struct perf_tool *tool,
384				      struct machine *machine, bool mmap_data)
385{
386	char filename[PATH_MAX];
387	DIR *tasks;
388	struct dirent dirent, *next;
389	pid_t tgid, ppid;
390	int rc = 0;
391
392	/* special case: only send one comm event using passed in pid */
393	if (!full) {
394		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
395						   process, machine);
396
397		if (tgid == -1)
398			return -1;
399
400		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
401							  process, machine, mmap_data);
402	}
403
404	if (machine__is_default_guest(machine))
405		return 0;
406
407	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
408		 machine->root_dir, pid);
409
410	tasks = opendir(filename);
411	if (tasks == NULL) {
412		pr_debug("couldn't open %s\n", filename);
413		return 0;
414	}
415
416	while (!readdir_r(tasks, &dirent, &next) && next) {
417		char *end;
418		pid_t _pid;
419
420		_pid = strtol(dirent.d_name, &end, 10);
421		if (*end)
422			continue;
423
424		rc = -1;
425		if (perf_event__prepare_comm(comm_event, _pid, machine,
426					     &tgid, &ppid) != 0)
427			break;
428
429		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
430						ppid, process, machine) < 0)
431			break;
432		/*
433		 * Send the prepared comm event
434		 */
435		if (process(tool, comm_event, &synth_sample, machine) != 0)
436			break;
437
438		rc = 0;
439		if (_pid == pid) {
440			/* process the parent's maps too */
441			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
442						process, machine, mmap_data);
443			if (rc)
444				break;
445		}
446	}
447
448	closedir(tasks);
449	return rc;
450}
451
452int perf_event__synthesize_thread_map(struct perf_tool *tool,
453				      struct thread_map *threads,
454				      perf_event__handler_t process,
455				      struct machine *machine,
456				      bool mmap_data)
457{
458	union perf_event *comm_event, *mmap_event, *fork_event;
459	int err = -1, thread, j;
460
461	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
462	if (comm_event == NULL)
463		goto out;
464
465	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
466	if (mmap_event == NULL)
467		goto out_free_comm;
468
469	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
470	if (fork_event == NULL)
471		goto out_free_mmap;
472
473	err = 0;
474	for (thread = 0; thread < threads->nr; ++thread) {
475		if (__event__synthesize_thread(comm_event, mmap_event,
476					       fork_event,
477					       threads->map[thread], 0,
478					       process, tool, machine,
479					       mmap_data)) {
480			err = -1;
481			break;
482		}
483
484		/*
485		 * comm.pid is set to thread group id by
486		 * perf_event__synthesize_comm
487		 */
488		if ((int) comm_event->comm.pid != threads->map[thread]) {
489			bool need_leader = true;
490
491			/* is thread group leader in thread_map? */
492			for (j = 0; j < threads->nr; ++j) {
493				if ((int) comm_event->comm.pid == threads->map[j]) {
494					need_leader = false;
495					break;
496				}
497			}
498
499			/* if not, generate events for it */
500			if (need_leader &&
501			    __event__synthesize_thread(comm_event, mmap_event,
502						       fork_event,
503						       comm_event->comm.pid, 0,
504						       process, tool, machine,
505						       mmap_data)) {
506				err = -1;
507				break;
508			}
509		}
510	}
511	free(fork_event);
512out_free_mmap:
513	free(mmap_event);
514out_free_comm:
515	free(comm_event);
516out:
517	return err;
518}
519
520int perf_event__synthesize_threads(struct perf_tool *tool,
521				   perf_event__handler_t process,
522				   struct machine *machine, bool mmap_data)
523{
524	DIR *proc;
525	char proc_path[PATH_MAX];
526	struct dirent dirent, *next;
527	union perf_event *comm_event, *mmap_event, *fork_event;
528	int err = -1;
529
530	if (machine__is_default_guest(machine))
531		return 0;
532
533	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
534	if (comm_event == NULL)
535		goto out;
536
537	mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
538	if (mmap_event == NULL)
539		goto out_free_comm;
540
541	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
542	if (fork_event == NULL)
543		goto out_free_mmap;
544
545	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
546	proc = opendir(proc_path);
547
548	if (proc == NULL)
549		goto out_free_fork;
550
551	while (!readdir_r(proc, &dirent, &next) && next) {
552		char *end;
553		pid_t pid = strtol(dirent.d_name, &end, 10);
554
555		if (*end) /* only interested in proper numerical dirents */
556			continue;
557		/*
558 		 * We may race with exiting thread, so don't stop just because
559 		 * one thread couldn't be synthesized.
560 		 */
561		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
562					   1, process, tool, machine, mmap_data);
563	}
564
565	err = 0;
566	closedir(proc);
567out_free_fork:
568	free(fork_event);
569out_free_mmap:
570	free(mmap_event);
571out_free_comm:
572	free(comm_event);
573out:
574	return err;
575}
576
577struct process_symbol_args {
578	const char *name;
579	u64	   start;
580};
581
582static int find_symbol_cb(void *arg, const char *name, char type,
583			  u64 start)
584{
585	struct process_symbol_args *args = arg;
586
587	/*
588	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
589	 * an 'A' to the same address as "_stext".
590	 */
591	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
592	      type == 'A') || strcmp(name, args->name))
593		return 0;
594
595	args->start = start;
596	return 1;
597}
598
599u64 kallsyms__get_function_start(const char *kallsyms_filename,
600				 const char *symbol_name)
601{
602	struct process_symbol_args args = { .name = symbol_name, };
603
604	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
605		return 0;
606
607	return args.start;
608}
609
610int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
611				       perf_event__handler_t process,
612				       struct machine *machine)
613{
614	size_t size;
615	const char *mmap_name;
616	char name_buff[PATH_MAX];
617	struct map *map;
618	struct kmap *kmap;
619	int err;
620	union perf_event *event;
621
622	if (machine->vmlinux_maps[0] == NULL)
623		return -1;
624
625	/*
626	 * We should get this from /sys/kernel/sections/.text, but till that is
627	 * available use this, and after it is use this as a fallback for older
628	 * kernels.
629	 */
630	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
631	if (event == NULL) {
632		pr_debug("Not enough memory synthesizing mmap event "
633			 "for kernel modules\n");
634		return -1;
635	}
636
637	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
638	if (machine__is_host(machine)) {
639		/*
640		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
641		 * see kernel/perf_event.c __perf_event_mmap
642		 */
643		event->header.misc = PERF_RECORD_MISC_KERNEL;
644	} else {
645		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
646	}
647
648	map = machine->vmlinux_maps[MAP__FUNCTION];
649	kmap = map__kmap(map);
650	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
651			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
652	size = PERF_ALIGN(size, sizeof(u64));
653	event->mmap.header.type = PERF_RECORD_MMAP;
654	event->mmap.header.size = (sizeof(event->mmap) -
655			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
656	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
657	event->mmap.start = map->start;
658	event->mmap.len   = map->end - event->mmap.start;
659	event->mmap.pid   = machine->pid;
660
661	err = process(tool, event, &synth_sample, machine);
662	free(event);
663
664	return err;
665}
666
667size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
668{
669	const char *s;
670
671	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
672		s = " exec";
673	else
674		s = "";
675
676	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
677}
678
679int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
680			     union perf_event *event,
681			     struct perf_sample *sample,
682			     struct machine *machine)
683{
684	return machine__process_comm_event(machine, event, sample);
685}
686
687int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
688			     union perf_event *event,
689			     struct perf_sample *sample,
690			     struct machine *machine)
691{
692	return machine__process_lost_event(machine, event, sample);
693}
694
695size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
696{
697	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
698		       event->mmap.pid, event->mmap.tid, event->mmap.start,
699		       event->mmap.len, event->mmap.pgoff,
700		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
701		       event->mmap.filename);
702}
703
704size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
705{
706	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
707			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
708		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
709		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
710		       event->mmap2.min, event->mmap2.ino,
711		       event->mmap2.ino_generation,
712		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
713		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
714		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
715		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
716		       event->mmap2.filename);
717}
718
719int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
720			     union perf_event *event,
721			     struct perf_sample *sample,
722			     struct machine *machine)
723{
724	return machine__process_mmap_event(machine, event, sample);
725}
726
727int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
728			     union perf_event *event,
729			     struct perf_sample *sample,
730			     struct machine *machine)
731{
732	return machine__process_mmap2_event(machine, event, sample);
733}
734
735size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
736{
737	return fprintf(fp, "(%d:%d):(%d:%d)\n",
738		       event->fork.pid, event->fork.tid,
739		       event->fork.ppid, event->fork.ptid);
740}
741
742int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
743			     union perf_event *event,
744			     struct perf_sample *sample,
745			     struct machine *machine)
746{
747	return machine__process_fork_event(machine, event, sample);
748}
749
750int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
751			     union perf_event *event,
752			     struct perf_sample *sample,
753			     struct machine *machine)
754{
755	return machine__process_exit_event(machine, event, sample);
756}
757
758size_t perf_event__fprintf(union perf_event *event, FILE *fp)
759{
760	size_t ret = fprintf(fp, "PERF_RECORD_%s",
761			     perf_event__name(event->header.type));
762
763	switch (event->header.type) {
764	case PERF_RECORD_COMM:
765		ret += perf_event__fprintf_comm(event, fp);
766		break;
767	case PERF_RECORD_FORK:
768	case PERF_RECORD_EXIT:
769		ret += perf_event__fprintf_task(event, fp);
770		break;
771	case PERF_RECORD_MMAP:
772		ret += perf_event__fprintf_mmap(event, fp);
773		break;
774	case PERF_RECORD_MMAP2:
775		ret += perf_event__fprintf_mmap2(event, fp);
776		break;
777	default:
778		ret += fprintf(fp, "\n");
779	}
780
781	return ret;
782}
783
784int perf_event__process(struct perf_tool *tool __maybe_unused,
785			union perf_event *event,
786			struct perf_sample *sample,
787			struct machine *machine)
788{
789	return machine__process_event(machine, event, sample);
790}
791
792void thread__find_addr_map(struct thread *thread, u8 cpumode,
793			   enum map_type type, u64 addr,
794			   struct addr_location *al)
795{
796	struct map_groups *mg = thread->mg;
797	struct machine *machine = mg->machine;
798	bool load_map = false;
799
800	al->machine = machine;
801	al->thread = thread;
802	al->addr = addr;
803	al->cpumode = cpumode;
804	al->filtered = 0;
805
806	if (machine == NULL) {
807		al->map = NULL;
808		return;
809	}
810
811	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
812		al->level = 'k';
813		mg = &machine->kmaps;
814		load_map = true;
815	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
816		al->level = '.';
817	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
818		al->level = 'g';
819		mg = &machine->kmaps;
820		load_map = true;
821	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
822		al->level = 'u';
823	} else {
824		al->level = 'H';
825		al->map = NULL;
826
827		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
828			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
829			!perf_guest)
830			al->filtered |= (1 << HIST_FILTER__GUEST);
831		if ((cpumode == PERF_RECORD_MISC_USER ||
832			cpumode == PERF_RECORD_MISC_KERNEL) &&
833			!perf_host)
834			al->filtered |= (1 << HIST_FILTER__HOST);
835
836		return;
837	}
838try_again:
839	al->map = map_groups__find(mg, type, al->addr);
840	if (al->map == NULL) {
841		/*
842		 * If this is outside of all known maps, and is a negative
843		 * address, try to look it up in the kernel dso, as it might be
844		 * a vsyscall or vdso (which executes in user-mode).
845		 *
846		 * XXX This is nasty, we should have a symbol list in the
847		 * "[vdso]" dso, but for now lets use the old trick of looking
848		 * in the whole kernel symbol list.
849		 */
850		if (cpumode == PERF_RECORD_MISC_USER && machine &&
851		    mg != &machine->kmaps &&
852		    machine__kernel_ip(machine, al->addr)) {
853			mg = &machine->kmaps;
854			load_map = true;
855			goto try_again;
856		}
857	} else {
858		/*
859		 * Kernel maps might be changed when loading symbols so loading
860		 * must be done prior to using kernel maps.
861		 */
862		if (load_map)
863			map__load(al->map, machine->symbol_filter);
864		al->addr = al->map->map_ip(al->map, al->addr);
865	}
866}
867
868void thread__find_addr_location(struct thread *thread,
869				u8 cpumode, enum map_type type, u64 addr,
870				struct addr_location *al)
871{
872	thread__find_addr_map(thread, cpumode, type, addr, al);
873	if (al->map != NULL)
874		al->sym = map__find_symbol(al->map, al->addr,
875					   thread->mg->machine->symbol_filter);
876	else
877		al->sym = NULL;
878}
879
880int perf_event__preprocess_sample(const union perf_event *event,
881				  struct machine *machine,
882				  struct addr_location *al,
883				  struct perf_sample *sample)
884{
885	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
886	struct thread *thread = machine__findnew_thread(machine, sample->pid,
887							sample->tid);
888
889	if (thread == NULL)
890		return -1;
891
892	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
893	/*
894	 * Have we already created the kernel maps for this machine?
895	 *
896	 * This should have happened earlier, when we processed the kernel MMAP
897	 * events, but for older perf.data files there was no such thing, so do
898	 * it now.
899	 */
900	if (cpumode == PERF_RECORD_MISC_KERNEL &&
901	    machine->vmlinux_maps[MAP__FUNCTION] == NULL)
902		machine__create_kernel_maps(machine);
903
904	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al);
905	dump_printf(" ...... dso: %s\n",
906		    al->map ? al->map->dso->long_name :
907			al->level == 'H' ? "[hypervisor]" : "<not found>");
908
909	if (thread__is_filtered(thread))
910		al->filtered |= (1 << HIST_FILTER__THREAD);
911
912	al->sym = NULL;
913	al->cpu = sample->cpu;
914
915	if (al->map) {
916		struct dso *dso = al->map->dso;
917
918		if (symbol_conf.dso_list &&
919		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
920						  dso->short_name) ||
921			       (dso->short_name != dso->long_name &&
922				strlist__has_entry(symbol_conf.dso_list,
923						   dso->long_name))))) {
924			al->filtered |= (1 << HIST_FILTER__DSO);
925		}
926
927		al->sym = map__find_symbol(al->map, al->addr,
928					   machine->symbol_filter);
929	}
930
931	if (symbol_conf.sym_list &&
932		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
933						al->sym->name))) {
934		al->filtered |= (1 << HIST_FILTER__SYMBOL);
935	}
936
937	return 0;
938}
939
940bool is_bts_event(struct perf_event_attr *attr)
941{
942	return attr->type == PERF_TYPE_HARDWARE &&
943	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
944	       attr->sample_period == 1;
945}
946
947bool sample_addr_correlates_sym(struct perf_event_attr *attr)
948{
949	if (attr->type == PERF_TYPE_SOFTWARE &&
950	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
951	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
952	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
953		return true;
954
955	if (is_bts_event(attr))
956		return true;
957
958	return false;
959}
960
961void perf_event__preprocess_sample_addr(union perf_event *event,
962					struct perf_sample *sample,
963					struct thread *thread,
964					struct addr_location *al)
965{
966	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
967
968	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->addr, al);
969	if (!al->map)
970		thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
971				      sample->addr, al);
972
973	al->cpu = sample->cpu;
974	al->sym = NULL;
975
976	if (al->map)
977		al->sym = map__find_symbol(al->map, al->addr, NULL);
978}
979