1/*
2 * builtin-timechart.c - make an svg timechart of system activity
3 *
4 * (C) Copyright 2009 Intel Corporation
5 *
6 * Authors:
7 *     Arjan van de Ven <arjan@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
12 * of the License.
13 */
14
15#include <traceevent/event-parse.h>
16
17#include "builtin.h"
18
19#include "util/util.h"
20
21#include "util/color.h"
22#include <linux/list.h>
23#include "util/cache.h"
24#include "util/evlist.h"
25#include "util/evsel.h"
26#include <linux/rbtree.h>
27#include "util/symbol.h"
28#include "util/callchain.h"
29#include "util/strlist.h"
30
31#include "perf.h"
32#include "util/header.h"
33#include "util/parse-options.h"
34#include "util/parse-events.h"
35#include "util/event.h"
36#include "util/session.h"
37#include "util/svghelper.h"
38#include "util/tool.h"
39#include "util/data.h"
40#include "util/debug.h"
41
42#define SUPPORT_OLD_POWER_EVENTS 1
43#define PWR_EVENT_EXIT -1
44
45struct per_pid;
46struct power_event;
47struct wake_event;
48
49struct timechart {
50	struct perf_tool	tool;
51	struct per_pid		*all_data;
52	struct power_event	*power_events;
53	struct wake_event	*wake_events;
54	int			proc_num;
55	unsigned int		numcpus;
56	u64			min_freq,	/* Lowest CPU frequency seen */
57				max_freq,	/* Highest CPU frequency seen */
58				turbo_frequency,
59				first_time, last_time;
60	bool			power_only,
61				tasks_only,
62				with_backtrace,
63				topology;
64	/* IO related settings */
65	u64			io_events;
66	bool			io_only,
67				skip_eagain;
68	u64			min_time,
69				merge_dist;
70	bool			force;
71};
72
73struct per_pidcomm;
74struct cpu_sample;
75struct io_sample;
76
77/*
78 * Datastructure layout:
79 * We keep an list of "pid"s, matching the kernels notion of a task struct.
80 * Each "pid" entry, has a list of "comm"s.
81 *	this is because we want to track different programs different, while
82 *	exec will reuse the original pid (by design).
83 * Each comm has a list of samples that will be used to draw
84 * final graph.
85 */
86
87struct per_pid {
88	struct per_pid *next;
89
90	int		pid;
91	int		ppid;
92
93	u64		start_time;
94	u64		end_time;
95	u64		total_time;
96	u64		total_bytes;
97	int		display;
98
99	struct per_pidcomm *all;
100	struct per_pidcomm *current;
101};
102
103
104struct per_pidcomm {
105	struct per_pidcomm *next;
106
107	u64		start_time;
108	u64		end_time;
109	u64		total_time;
110	u64		max_bytes;
111	u64		total_bytes;
112
113	int		Y;
114	int		display;
115
116	long		state;
117	u64		state_since;
118
119	char		*comm;
120
121	struct cpu_sample *samples;
122	struct io_sample  *io_samples;
123};
124
125struct sample_wrapper {
126	struct sample_wrapper *next;
127
128	u64		timestamp;
129	unsigned char	data[0];
130};
131
132#define TYPE_NONE	0
133#define TYPE_RUNNING	1
134#define TYPE_WAITING	2
135#define TYPE_BLOCKED	3
136
137struct cpu_sample {
138	struct cpu_sample *next;
139
140	u64 start_time;
141	u64 end_time;
142	int type;
143	int cpu;
144	const char *backtrace;
145};
146
147enum {
148	IOTYPE_READ,
149	IOTYPE_WRITE,
150	IOTYPE_SYNC,
151	IOTYPE_TX,
152	IOTYPE_RX,
153	IOTYPE_POLL,
154};
155
156struct io_sample {
157	struct io_sample *next;
158
159	u64 start_time;
160	u64 end_time;
161	u64 bytes;
162	int type;
163	int fd;
164	int err;
165	int merges;
166};
167
168#define CSTATE 1
169#define PSTATE 2
170
171struct power_event {
172	struct power_event *next;
173	int type;
174	int state;
175	u64 start_time;
176	u64 end_time;
177	int cpu;
178};
179
180struct wake_event {
181	struct wake_event *next;
182	int waker;
183	int wakee;
184	u64 time;
185	const char *backtrace;
186};
187
188struct process_filter {
189	char			*name;
190	int			pid;
191	struct process_filter	*next;
192};
193
194static struct process_filter *process_filter;
195
196
197static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
198{
199	struct per_pid *cursor = tchart->all_data;
200
201	while (cursor) {
202		if (cursor->pid == pid)
203			return cursor;
204		cursor = cursor->next;
205	}
206	cursor = zalloc(sizeof(*cursor));
207	assert(cursor != NULL);
208	cursor->pid = pid;
209	cursor->next = tchart->all_data;
210	tchart->all_data = cursor;
211	return cursor;
212}
213
214static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
215{
216	struct per_pid *p;
217	struct per_pidcomm *c;
218	p = find_create_pid(tchart, pid);
219	c = p->all;
220	while (c) {
221		if (c->comm && strcmp(c->comm, comm) == 0) {
222			p->current = c;
223			return;
224		}
225		if (!c->comm) {
226			c->comm = strdup(comm);
227			p->current = c;
228			return;
229		}
230		c = c->next;
231	}
232	c = zalloc(sizeof(*c));
233	assert(c != NULL);
234	c->comm = strdup(comm);
235	p->current = c;
236	c->next = p->all;
237	p->all = c;
238}
239
240static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
241{
242	struct per_pid *p, *pp;
243	p = find_create_pid(tchart, pid);
244	pp = find_create_pid(tchart, ppid);
245	p->ppid = ppid;
246	if (pp->current && pp->current->comm && !p->current)
247		pid_set_comm(tchart, pid, pp->current->comm);
248
249	p->start_time = timestamp;
250	if (p->current && !p->current->start_time) {
251		p->current->start_time = timestamp;
252		p->current->state_since = timestamp;
253	}
254}
255
256static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
257{
258	struct per_pid *p;
259	p = find_create_pid(tchart, pid);
260	p->end_time = timestamp;
261	if (p->current)
262		p->current->end_time = timestamp;
263}
264
265static void pid_put_sample(struct timechart *tchart, int pid, int type,
266			   unsigned int cpu, u64 start, u64 end,
267			   const char *backtrace)
268{
269	struct per_pid *p;
270	struct per_pidcomm *c;
271	struct cpu_sample *sample;
272
273	p = find_create_pid(tchart, pid);
274	c = p->current;
275	if (!c) {
276		c = zalloc(sizeof(*c));
277		assert(c != NULL);
278		p->current = c;
279		c->next = p->all;
280		p->all = c;
281	}
282
283	sample = zalloc(sizeof(*sample));
284	assert(sample != NULL);
285	sample->start_time = start;
286	sample->end_time = end;
287	sample->type = type;
288	sample->next = c->samples;
289	sample->cpu = cpu;
290	sample->backtrace = backtrace;
291	c->samples = sample;
292
293	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
294		c->total_time += (end-start);
295		p->total_time += (end-start);
296	}
297
298	if (c->start_time == 0 || c->start_time > start)
299		c->start_time = start;
300	if (p->start_time == 0 || p->start_time > start)
301		p->start_time = start;
302}
303
304#define MAX_CPUS 4096
305
306static u64 cpus_cstate_start_times[MAX_CPUS];
307static int cpus_cstate_state[MAX_CPUS];
308static u64 cpus_pstate_start_times[MAX_CPUS];
309static u64 cpus_pstate_state[MAX_CPUS];
310
311static int process_comm_event(struct perf_tool *tool,
312			      union perf_event *event,
313			      struct perf_sample *sample __maybe_unused,
314			      struct machine *machine __maybe_unused)
315{
316	struct timechart *tchart = container_of(tool, struct timechart, tool);
317	pid_set_comm(tchart, event->comm.tid, event->comm.comm);
318	return 0;
319}
320
321static int process_fork_event(struct perf_tool *tool,
322			      union perf_event *event,
323			      struct perf_sample *sample __maybe_unused,
324			      struct machine *machine __maybe_unused)
325{
326	struct timechart *tchart = container_of(tool, struct timechart, tool);
327	pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
328	return 0;
329}
330
331static int process_exit_event(struct perf_tool *tool,
332			      union perf_event *event,
333			      struct perf_sample *sample __maybe_unused,
334			      struct machine *machine __maybe_unused)
335{
336	struct timechart *tchart = container_of(tool, struct timechart, tool);
337	pid_exit(tchart, event->fork.pid, event->fork.time);
338	return 0;
339}
340
341#ifdef SUPPORT_OLD_POWER_EVENTS
342static int use_old_power_events;
343#endif
344
345static void c_state_start(int cpu, u64 timestamp, int state)
346{
347	cpus_cstate_start_times[cpu] = timestamp;
348	cpus_cstate_state[cpu] = state;
349}
350
351static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
352{
353	struct power_event *pwr = zalloc(sizeof(*pwr));
354
355	if (!pwr)
356		return;
357
358	pwr->state = cpus_cstate_state[cpu];
359	pwr->start_time = cpus_cstate_start_times[cpu];
360	pwr->end_time = timestamp;
361	pwr->cpu = cpu;
362	pwr->type = CSTATE;
363	pwr->next = tchart->power_events;
364
365	tchart->power_events = pwr;
366}
367
368static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
369{
370	struct power_event *pwr;
371
372	if (new_freq > 8000000) /* detect invalid data */
373		return;
374
375	pwr = zalloc(sizeof(*pwr));
376	if (!pwr)
377		return;
378
379	pwr->state = cpus_pstate_state[cpu];
380	pwr->start_time = cpus_pstate_start_times[cpu];
381	pwr->end_time = timestamp;
382	pwr->cpu = cpu;
383	pwr->type = PSTATE;
384	pwr->next = tchart->power_events;
385
386	if (!pwr->start_time)
387		pwr->start_time = tchart->first_time;
388
389	tchart->power_events = pwr;
390
391	cpus_pstate_state[cpu] = new_freq;
392	cpus_pstate_start_times[cpu] = timestamp;
393
394	if ((u64)new_freq > tchart->max_freq)
395		tchart->max_freq = new_freq;
396
397	if (new_freq < tchart->min_freq || tchart->min_freq == 0)
398		tchart->min_freq = new_freq;
399
400	if (new_freq == tchart->max_freq - 1000)
401		tchart->turbo_frequency = tchart->max_freq;
402}
403
404static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
405			 int waker, int wakee, u8 flags, const char *backtrace)
406{
407	struct per_pid *p;
408	struct wake_event *we = zalloc(sizeof(*we));
409
410	if (!we)
411		return;
412
413	we->time = timestamp;
414	we->waker = waker;
415	we->backtrace = backtrace;
416
417	if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
418		we->waker = -1;
419
420	we->wakee = wakee;
421	we->next = tchart->wake_events;
422	tchart->wake_events = we;
423	p = find_create_pid(tchart, we->wakee);
424
425	if (p && p->current && p->current->state == TYPE_NONE) {
426		p->current->state_since = timestamp;
427		p->current->state = TYPE_WAITING;
428	}
429	if (p && p->current && p->current->state == TYPE_BLOCKED) {
430		pid_put_sample(tchart, p->pid, p->current->state, cpu,
431			       p->current->state_since, timestamp, NULL);
432		p->current->state_since = timestamp;
433		p->current->state = TYPE_WAITING;
434	}
435}
436
437static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
438			 int prev_pid, int next_pid, u64 prev_state,
439			 const char *backtrace)
440{
441	struct per_pid *p = NULL, *prev_p;
442
443	prev_p = find_create_pid(tchart, prev_pid);
444
445	p = find_create_pid(tchart, next_pid);
446
447	if (prev_p->current && prev_p->current->state != TYPE_NONE)
448		pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
449			       prev_p->current->state_since, timestamp,
450			       backtrace);
451	if (p && p->current) {
452		if (p->current->state != TYPE_NONE)
453			pid_put_sample(tchart, next_pid, p->current->state, cpu,
454				       p->current->state_since, timestamp,
455				       backtrace);
456
457		p->current->state_since = timestamp;
458		p->current->state = TYPE_RUNNING;
459	}
460
461	if (prev_p->current) {
462		prev_p->current->state = TYPE_NONE;
463		prev_p->current->state_since = timestamp;
464		if (prev_state & 2)
465			prev_p->current->state = TYPE_BLOCKED;
466		if (prev_state == 0)
467			prev_p->current->state = TYPE_WAITING;
468	}
469}
470
471static const char *cat_backtrace(union perf_event *event,
472				 struct perf_sample *sample,
473				 struct machine *machine)
474{
475	struct addr_location al;
476	unsigned int i;
477	char *p = NULL;
478	size_t p_len;
479	u8 cpumode = PERF_RECORD_MISC_USER;
480	struct addr_location tal;
481	struct ip_callchain *chain = sample->callchain;
482	FILE *f = open_memstream(&p, &p_len);
483
484	if (!f) {
485		perror("open_memstream error");
486		return NULL;
487	}
488
489	if (!chain)
490		goto exit;
491
492	if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
493		fprintf(stderr, "problem processing %d event, skipping it.\n",
494			event->header.type);
495		goto exit;
496	}
497
498	for (i = 0; i < chain->nr; i++) {
499		u64 ip;
500
501		if (callchain_param.order == ORDER_CALLEE)
502			ip = chain->ips[i];
503		else
504			ip = chain->ips[chain->nr - i - 1];
505
506		if (ip >= PERF_CONTEXT_MAX) {
507			switch (ip) {
508			case PERF_CONTEXT_HV:
509				cpumode = PERF_RECORD_MISC_HYPERVISOR;
510				break;
511			case PERF_CONTEXT_KERNEL:
512				cpumode = PERF_RECORD_MISC_KERNEL;
513				break;
514			case PERF_CONTEXT_USER:
515				cpumode = PERF_RECORD_MISC_USER;
516				break;
517			default:
518				pr_debug("invalid callchain context: "
519					 "%"PRId64"\n", (s64) ip);
520
521				/*
522				 * It seems the callchain is corrupted.
523				 * Discard all.
524				 */
525				zfree(&p);
526				goto exit;
527			}
528			continue;
529		}
530
531		tal.filtered = 0;
532		thread__find_addr_location(al.thread, cpumode,
533					   MAP__FUNCTION, ip, &tal);
534
535		if (tal.sym)
536			fprintf(f, "..... %016" PRIx64 " %s\n", ip,
537				tal.sym->name);
538		else
539			fprintf(f, "..... %016" PRIx64 "\n", ip);
540	}
541
542exit:
543	fclose(f);
544
545	return p;
546}
547
548typedef int (*tracepoint_handler)(struct timechart *tchart,
549				  struct perf_evsel *evsel,
550				  struct perf_sample *sample,
551				  const char *backtrace);
552
553static int process_sample_event(struct perf_tool *tool,
554				union perf_event *event,
555				struct perf_sample *sample,
556				struct perf_evsel *evsel,
557				struct machine *machine)
558{
559	struct timechart *tchart = container_of(tool, struct timechart, tool);
560
561	if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
562		if (!tchart->first_time || tchart->first_time > sample->time)
563			tchart->first_time = sample->time;
564		if (tchart->last_time < sample->time)
565			tchart->last_time = sample->time;
566	}
567
568	if (evsel->handler != NULL) {
569		tracepoint_handler f = evsel->handler;
570		return f(tchart, evsel, sample,
571			 cat_backtrace(event, sample, machine));
572	}
573
574	return 0;
575}
576
577static int
578process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
579			struct perf_evsel *evsel,
580			struct perf_sample *sample,
581			const char *backtrace __maybe_unused)
582{
583	u32 state = perf_evsel__intval(evsel, sample, "state");
584	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
585
586	if (state == (u32)PWR_EVENT_EXIT)
587		c_state_end(tchart, cpu_id, sample->time);
588	else
589		c_state_start(cpu_id, sample->time, state);
590	return 0;
591}
592
593static int
594process_sample_cpu_frequency(struct timechart *tchart,
595			     struct perf_evsel *evsel,
596			     struct perf_sample *sample,
597			     const char *backtrace __maybe_unused)
598{
599	u32 state = perf_evsel__intval(evsel, sample, "state");
600	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
601
602	p_state_change(tchart, cpu_id, sample->time, state);
603	return 0;
604}
605
606static int
607process_sample_sched_wakeup(struct timechart *tchart,
608			    struct perf_evsel *evsel,
609			    struct perf_sample *sample,
610			    const char *backtrace)
611{
612	u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
613	int waker = perf_evsel__intval(evsel, sample, "common_pid");
614	int wakee = perf_evsel__intval(evsel, sample, "pid");
615
616	sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
617	return 0;
618}
619
620static int
621process_sample_sched_switch(struct timechart *tchart,
622			    struct perf_evsel *evsel,
623			    struct perf_sample *sample,
624			    const char *backtrace)
625{
626	int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
627	int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
628	u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
629
630	sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
631		     prev_state, backtrace);
632	return 0;
633}
634
635#ifdef SUPPORT_OLD_POWER_EVENTS
636static int
637process_sample_power_start(struct timechart *tchart __maybe_unused,
638			   struct perf_evsel *evsel,
639			   struct perf_sample *sample,
640			   const char *backtrace __maybe_unused)
641{
642	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
643	u64 value = perf_evsel__intval(evsel, sample, "value");
644
645	c_state_start(cpu_id, sample->time, value);
646	return 0;
647}
648
649static int
650process_sample_power_end(struct timechart *tchart,
651			 struct perf_evsel *evsel __maybe_unused,
652			 struct perf_sample *sample,
653			 const char *backtrace __maybe_unused)
654{
655	c_state_end(tchart, sample->cpu, sample->time);
656	return 0;
657}
658
659static int
660process_sample_power_frequency(struct timechart *tchart,
661			       struct perf_evsel *evsel,
662			       struct perf_sample *sample,
663			       const char *backtrace __maybe_unused)
664{
665	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
666	u64 value = perf_evsel__intval(evsel, sample, "value");
667
668	p_state_change(tchart, cpu_id, sample->time, value);
669	return 0;
670}
671#endif /* SUPPORT_OLD_POWER_EVENTS */
672
673/*
674 * After the last sample we need to wrap up the current C/P state
675 * and close out each CPU for these.
676 */
677static void end_sample_processing(struct timechart *tchart)
678{
679	u64 cpu;
680	struct power_event *pwr;
681
682	for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
683		/* C state */
684#if 0
685		pwr = zalloc(sizeof(*pwr));
686		if (!pwr)
687			return;
688
689		pwr->state = cpus_cstate_state[cpu];
690		pwr->start_time = cpus_cstate_start_times[cpu];
691		pwr->end_time = tchart->last_time;
692		pwr->cpu = cpu;
693		pwr->type = CSTATE;
694		pwr->next = tchart->power_events;
695
696		tchart->power_events = pwr;
697#endif
698		/* P state */
699
700		pwr = zalloc(sizeof(*pwr));
701		if (!pwr)
702			return;
703
704		pwr->state = cpus_pstate_state[cpu];
705		pwr->start_time = cpus_pstate_start_times[cpu];
706		pwr->end_time = tchart->last_time;
707		pwr->cpu = cpu;
708		pwr->type = PSTATE;
709		pwr->next = tchart->power_events;
710
711		if (!pwr->start_time)
712			pwr->start_time = tchart->first_time;
713		if (!pwr->state)
714			pwr->state = tchart->min_freq;
715		tchart->power_events = pwr;
716	}
717}
718
719static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
720			       u64 start, int fd)
721{
722	struct per_pid *p = find_create_pid(tchart, pid);
723	struct per_pidcomm *c = p->current;
724	struct io_sample *sample;
725	struct io_sample *prev;
726
727	if (!c) {
728		c = zalloc(sizeof(*c));
729		if (!c)
730			return -ENOMEM;
731		p->current = c;
732		c->next = p->all;
733		p->all = c;
734	}
735
736	prev = c->io_samples;
737
738	if (prev && prev->start_time && !prev->end_time) {
739		pr_warning("Skip invalid start event: "
740			   "previous event already started!\n");
741
742		/* remove previous event that has been started,
743		 * we are not sure we will ever get an end for it */
744		c->io_samples = prev->next;
745		free(prev);
746		return 0;
747	}
748
749	sample = zalloc(sizeof(*sample));
750	if (!sample)
751		return -ENOMEM;
752	sample->start_time = start;
753	sample->type = type;
754	sample->fd = fd;
755	sample->next = c->io_samples;
756	c->io_samples = sample;
757
758	if (c->start_time == 0 || c->start_time > start)
759		c->start_time = start;
760
761	return 0;
762}
763
764static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
765			     u64 end, long ret)
766{
767	struct per_pid *p = find_create_pid(tchart, pid);
768	struct per_pidcomm *c = p->current;
769	struct io_sample *sample, *prev;
770
771	if (!c) {
772		pr_warning("Invalid pidcomm!\n");
773		return -1;
774	}
775
776	sample = c->io_samples;
777
778	if (!sample) /* skip partially captured events */
779		return 0;
780
781	if (sample->end_time) {
782		pr_warning("Skip invalid end event: "
783			   "previous event already ended!\n");
784		return 0;
785	}
786
787	if (sample->type != type) {
788		pr_warning("Skip invalid end event: invalid event type!\n");
789		return 0;
790	}
791
792	sample->end_time = end;
793	prev = sample->next;
794
795	/* we want to be able to see small and fast transfers, so make them
796	 * at least min_time long, but don't overlap them */
797	if (sample->end_time - sample->start_time < tchart->min_time)
798		sample->end_time = sample->start_time + tchart->min_time;
799	if (prev && sample->start_time < prev->end_time) {
800		if (prev->err) /* try to make errors more visible */
801			sample->start_time = prev->end_time;
802		else
803			prev->end_time = sample->start_time;
804	}
805
806	if (ret < 0) {
807		sample->err = ret;
808	} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
809		   type == IOTYPE_TX || type == IOTYPE_RX) {
810
811		if ((u64)ret > c->max_bytes)
812			c->max_bytes = ret;
813
814		c->total_bytes += ret;
815		p->total_bytes += ret;
816		sample->bytes = ret;
817	}
818
819	/* merge two requests to make svg smaller and render-friendly */
820	if (prev &&
821	    prev->type == sample->type &&
822	    prev->err == sample->err &&
823	    prev->fd == sample->fd &&
824	    prev->end_time + tchart->merge_dist >= sample->start_time) {
825
826		sample->bytes += prev->bytes;
827		sample->merges += prev->merges + 1;
828
829		sample->start_time = prev->start_time;
830		sample->next = prev->next;
831		free(prev);
832
833		if (!sample->err && sample->bytes > c->max_bytes)
834			c->max_bytes = sample->bytes;
835	}
836
837	tchart->io_events++;
838
839	return 0;
840}
841
842static int
843process_enter_read(struct timechart *tchart,
844		   struct perf_evsel *evsel,
845		   struct perf_sample *sample)
846{
847	long fd = perf_evsel__intval(evsel, sample, "fd");
848	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
849				   sample->time, fd);
850}
851
852static int
853process_exit_read(struct timechart *tchart,
854		  struct perf_evsel *evsel,
855		  struct perf_sample *sample)
856{
857	long ret = perf_evsel__intval(evsel, sample, "ret");
858	return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
859				 sample->time, ret);
860}
861
862static int
863process_enter_write(struct timechart *tchart,
864		    struct perf_evsel *evsel,
865		    struct perf_sample *sample)
866{
867	long fd = perf_evsel__intval(evsel, sample, "fd");
868	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
869				   sample->time, fd);
870}
871
872static int
873process_exit_write(struct timechart *tchart,
874		   struct perf_evsel *evsel,
875		   struct perf_sample *sample)
876{
877	long ret = perf_evsel__intval(evsel, sample, "ret");
878	return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
879				 sample->time, ret);
880}
881
882static int
883process_enter_sync(struct timechart *tchart,
884		   struct perf_evsel *evsel,
885		   struct perf_sample *sample)
886{
887	long fd = perf_evsel__intval(evsel, sample, "fd");
888	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
889				   sample->time, fd);
890}
891
892static int
893process_exit_sync(struct timechart *tchart,
894		  struct perf_evsel *evsel,
895		  struct perf_sample *sample)
896{
897	long ret = perf_evsel__intval(evsel, sample, "ret");
898	return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
899				 sample->time, ret);
900}
901
902static int
903process_enter_tx(struct timechart *tchart,
904		 struct perf_evsel *evsel,
905		 struct perf_sample *sample)
906{
907	long fd = perf_evsel__intval(evsel, sample, "fd");
908	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
909				   sample->time, fd);
910}
911
912static int
913process_exit_tx(struct timechart *tchart,
914		struct perf_evsel *evsel,
915		struct perf_sample *sample)
916{
917	long ret = perf_evsel__intval(evsel, sample, "ret");
918	return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
919				 sample->time, ret);
920}
921
922static int
923process_enter_rx(struct timechart *tchart,
924		 struct perf_evsel *evsel,
925		 struct perf_sample *sample)
926{
927	long fd = perf_evsel__intval(evsel, sample, "fd");
928	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
929				   sample->time, fd);
930}
931
932static int
933process_exit_rx(struct timechart *tchart,
934		struct perf_evsel *evsel,
935		struct perf_sample *sample)
936{
937	long ret = perf_evsel__intval(evsel, sample, "ret");
938	return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
939				 sample->time, ret);
940}
941
942static int
943process_enter_poll(struct timechart *tchart,
944		   struct perf_evsel *evsel,
945		   struct perf_sample *sample)
946{
947	long fd = perf_evsel__intval(evsel, sample, "fd");
948	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
949				   sample->time, fd);
950}
951
952static int
953process_exit_poll(struct timechart *tchart,
954		  struct perf_evsel *evsel,
955		  struct perf_sample *sample)
956{
957	long ret = perf_evsel__intval(evsel, sample, "ret");
958	return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
959				 sample->time, ret);
960}
961
962/*
963 * Sort the pid datastructure
964 */
965static void sort_pids(struct timechart *tchart)
966{
967	struct per_pid *new_list, *p, *cursor, *prev;
968	/* sort by ppid first, then by pid, lowest to highest */
969
970	new_list = NULL;
971
972	while (tchart->all_data) {
973		p = tchart->all_data;
974		tchart->all_data = p->next;
975		p->next = NULL;
976
977		if (new_list == NULL) {
978			new_list = p;
979			p->next = NULL;
980			continue;
981		}
982		prev = NULL;
983		cursor = new_list;
984		while (cursor) {
985			if (cursor->ppid > p->ppid ||
986				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
987				/* must insert before */
988				if (prev) {
989					p->next = prev->next;
990					prev->next = p;
991					cursor = NULL;
992					continue;
993				} else {
994					p->next = new_list;
995					new_list = p;
996					cursor = NULL;
997					continue;
998				}
999			}
1000
1001			prev = cursor;
1002			cursor = cursor->next;
1003			if (!cursor)
1004				prev->next = p;
1005		}
1006	}
1007	tchart->all_data = new_list;
1008}
1009
1010
1011static void draw_c_p_states(struct timechart *tchart)
1012{
1013	struct power_event *pwr;
1014	pwr = tchart->power_events;
1015
1016	/*
1017	 * two pass drawing so that the P state bars are on top of the C state blocks
1018	 */
1019	while (pwr) {
1020		if (pwr->type == CSTATE)
1021			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1022		pwr = pwr->next;
1023	}
1024
1025	pwr = tchart->power_events;
1026	while (pwr) {
1027		if (pwr->type == PSTATE) {
1028			if (!pwr->state)
1029				pwr->state = tchart->min_freq;
1030			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1031		}
1032		pwr = pwr->next;
1033	}
1034}
1035
1036static void draw_wakeups(struct timechart *tchart)
1037{
1038	struct wake_event *we;
1039	struct per_pid *p;
1040	struct per_pidcomm *c;
1041
1042	we = tchart->wake_events;
1043	while (we) {
1044		int from = 0, to = 0;
1045		char *task_from = NULL, *task_to = NULL;
1046
1047		/* locate the column of the waker and wakee */
1048		p = tchart->all_data;
1049		while (p) {
1050			if (p->pid == we->waker || p->pid == we->wakee) {
1051				c = p->all;
1052				while (c) {
1053					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
1054						if (p->pid == we->waker && !from) {
1055							from = c->Y;
1056							task_from = strdup(c->comm);
1057						}
1058						if (p->pid == we->wakee && !to) {
1059							to = c->Y;
1060							task_to = strdup(c->comm);
1061						}
1062					}
1063					c = c->next;
1064				}
1065				c = p->all;
1066				while (c) {
1067					if (p->pid == we->waker && !from) {
1068						from = c->Y;
1069						task_from = strdup(c->comm);
1070					}
1071					if (p->pid == we->wakee && !to) {
1072						to = c->Y;
1073						task_to = strdup(c->comm);
1074					}
1075					c = c->next;
1076				}
1077			}
1078			p = p->next;
1079		}
1080
1081		if (!task_from) {
1082			task_from = malloc(40);
1083			sprintf(task_from, "[%i]", we->waker);
1084		}
1085		if (!task_to) {
1086			task_to = malloc(40);
1087			sprintf(task_to, "[%i]", we->wakee);
1088		}
1089
1090		if (we->waker == -1)
1091			svg_interrupt(we->time, to, we->backtrace);
1092		else if (from && to && abs(from - to) == 1)
1093			svg_wakeline(we->time, from, to, we->backtrace);
1094		else
1095			svg_partial_wakeline(we->time, from, task_from, to,
1096					     task_to, we->backtrace);
1097		we = we->next;
1098
1099		free(task_from);
1100		free(task_to);
1101	}
1102}
1103
1104static void draw_cpu_usage(struct timechart *tchart)
1105{
1106	struct per_pid *p;
1107	struct per_pidcomm *c;
1108	struct cpu_sample *sample;
1109	p = tchart->all_data;
1110	while (p) {
1111		c = p->all;
1112		while (c) {
1113			sample = c->samples;
1114			while (sample) {
1115				if (sample->type == TYPE_RUNNING) {
1116					svg_process(sample->cpu,
1117						    sample->start_time,
1118						    sample->end_time,
1119						    p->pid,
1120						    c->comm,
1121						    sample->backtrace);
1122				}
1123
1124				sample = sample->next;
1125			}
1126			c = c->next;
1127		}
1128		p = p->next;
1129	}
1130}
1131
1132static void draw_io_bars(struct timechart *tchart)
1133{
1134	const char *suf;
1135	double bytes;
1136	char comm[256];
1137	struct per_pid *p;
1138	struct per_pidcomm *c;
1139	struct io_sample *sample;
1140	int Y = 1;
1141
1142	p = tchart->all_data;
1143	while (p) {
1144		c = p->all;
1145		while (c) {
1146			if (!c->display) {
1147				c->Y = 0;
1148				c = c->next;
1149				continue;
1150			}
1151
1152			svg_box(Y, c->start_time, c->end_time, "process3");
1153			sample = c->io_samples;
1154			for (sample = c->io_samples; sample; sample = sample->next) {
1155				double h = (double)sample->bytes / c->max_bytes;
1156
1157				if (tchart->skip_eagain &&
1158				    sample->err == -EAGAIN)
1159					continue;
1160
1161				if (sample->err)
1162					h = 1;
1163
1164				if (sample->type == IOTYPE_SYNC)
1165					svg_fbox(Y,
1166						sample->start_time,
1167						sample->end_time,
1168						1,
1169						sample->err ? "error" : "sync",
1170						sample->fd,
1171						sample->err,
1172						sample->merges);
1173				else if (sample->type == IOTYPE_POLL)
1174					svg_fbox(Y,
1175						sample->start_time,
1176						sample->end_time,
1177						1,
1178						sample->err ? "error" : "poll",
1179						sample->fd,
1180						sample->err,
1181						sample->merges);
1182				else if (sample->type == IOTYPE_READ)
1183					svg_ubox(Y,
1184						sample->start_time,
1185						sample->end_time,
1186						h,
1187						sample->err ? "error" : "disk",
1188						sample->fd,
1189						sample->err,
1190						sample->merges);
1191				else if (sample->type == IOTYPE_WRITE)
1192					svg_lbox(Y,
1193						sample->start_time,
1194						sample->end_time,
1195						h,
1196						sample->err ? "error" : "disk",
1197						sample->fd,
1198						sample->err,
1199						sample->merges);
1200				else if (sample->type == IOTYPE_RX)
1201					svg_ubox(Y,
1202						sample->start_time,
1203						sample->end_time,
1204						h,
1205						sample->err ? "error" : "net",
1206						sample->fd,
1207						sample->err,
1208						sample->merges);
1209				else if (sample->type == IOTYPE_TX)
1210					svg_lbox(Y,
1211						sample->start_time,
1212						sample->end_time,
1213						h,
1214						sample->err ? "error" : "net",
1215						sample->fd,
1216						sample->err,
1217						sample->merges);
1218			}
1219
1220			suf = "";
1221			bytes = c->total_bytes;
1222			if (bytes > 1024) {
1223				bytes = bytes / 1024;
1224				suf = "K";
1225			}
1226			if (bytes > 1024) {
1227				bytes = bytes / 1024;
1228				suf = "M";
1229			}
1230			if (bytes > 1024) {
1231				bytes = bytes / 1024;
1232				suf = "G";
1233			}
1234
1235
1236			sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
1237			svg_text(Y, c->start_time, comm);
1238
1239			c->Y = Y;
1240			Y++;
1241			c = c->next;
1242		}
1243		p = p->next;
1244	}
1245}
1246
1247static void draw_process_bars(struct timechart *tchart)
1248{
1249	struct per_pid *p;
1250	struct per_pidcomm *c;
1251	struct cpu_sample *sample;
1252	int Y = 0;
1253
1254	Y = 2 * tchart->numcpus + 2;
1255
1256	p = tchart->all_data;
1257	while (p) {
1258		c = p->all;
1259		while (c) {
1260			if (!c->display) {
1261				c->Y = 0;
1262				c = c->next;
1263				continue;
1264			}
1265
1266			svg_box(Y, c->start_time, c->end_time, "process");
1267			sample = c->samples;
1268			while (sample) {
1269				if (sample->type == TYPE_RUNNING)
1270					svg_running(Y, sample->cpu,
1271						    sample->start_time,
1272						    sample->end_time,
1273						    sample->backtrace);
1274				if (sample->type == TYPE_BLOCKED)
1275					svg_blocked(Y, sample->cpu,
1276						    sample->start_time,
1277						    sample->end_time,
1278						    sample->backtrace);
1279				if (sample->type == TYPE_WAITING)
1280					svg_waiting(Y, sample->cpu,
1281						    sample->start_time,
1282						    sample->end_time,
1283						    sample->backtrace);
1284				sample = sample->next;
1285			}
1286
1287			if (c->comm) {
1288				char comm[256];
1289				if (c->total_time > 5000000000) /* 5 seconds */
1290					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
1291				else
1292					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
1293
1294				svg_text(Y, c->start_time, comm);
1295			}
1296			c->Y = Y;
1297			Y++;
1298			c = c->next;
1299		}
1300		p = p->next;
1301	}
1302}
1303
1304static void add_process_filter(const char *string)
1305{
1306	int pid = strtoull(string, NULL, 10);
1307	struct process_filter *filt = malloc(sizeof(*filt));
1308
1309	if (!filt)
1310		return;
1311
1312	filt->name = strdup(string);
1313	filt->pid  = pid;
1314	filt->next = process_filter;
1315
1316	process_filter = filt;
1317}
1318
1319static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
1320{
1321	struct process_filter *filt;
1322	if (!process_filter)
1323		return 1;
1324
1325	filt = process_filter;
1326	while (filt) {
1327		if (filt->pid && p->pid == filt->pid)
1328			return 1;
1329		if (strcmp(filt->name, c->comm) == 0)
1330			return 1;
1331		filt = filt->next;
1332	}
1333	return 0;
1334}
1335
1336static int determine_display_tasks_filtered(struct timechart *tchart)
1337{
1338	struct per_pid *p;
1339	struct per_pidcomm *c;
1340	int count = 0;
1341
1342	p = tchart->all_data;
1343	while (p) {
1344		p->display = 0;
1345		if (p->start_time == 1)
1346			p->start_time = tchart->first_time;
1347
1348		/* no exit marker, task kept running to the end */
1349		if (p->end_time == 0)
1350			p->end_time = tchart->last_time;
1351
1352		c = p->all;
1353
1354		while (c) {
1355			c->display = 0;
1356
1357			if (c->start_time == 1)
1358				c->start_time = tchart->first_time;
1359
1360			if (passes_filter(p, c)) {
1361				c->display = 1;
1362				p->display = 1;
1363				count++;
1364			}
1365
1366			if (c->end_time == 0)
1367				c->end_time = tchart->last_time;
1368
1369			c = c->next;
1370		}
1371		p = p->next;
1372	}
1373	return count;
1374}
1375
1376static int determine_display_tasks(struct timechart *tchart, u64 threshold)
1377{
1378	struct per_pid *p;
1379	struct per_pidcomm *c;
1380	int count = 0;
1381
1382	p = tchart->all_data;
1383	while (p) {
1384		p->display = 0;
1385		if (p->start_time == 1)
1386			p->start_time = tchart->first_time;
1387
1388		/* no exit marker, task kept running to the end */
1389		if (p->end_time == 0)
1390			p->end_time = tchart->last_time;
1391		if (p->total_time >= threshold)
1392			p->display = 1;
1393
1394		c = p->all;
1395
1396		while (c) {
1397			c->display = 0;
1398
1399			if (c->start_time == 1)
1400				c->start_time = tchart->first_time;
1401
1402			if (c->total_time >= threshold) {
1403				c->display = 1;
1404				count++;
1405			}
1406
1407			if (c->end_time == 0)
1408				c->end_time = tchart->last_time;
1409
1410			c = c->next;
1411		}
1412		p = p->next;
1413	}
1414	return count;
1415}
1416
1417static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
1418{
1419	struct per_pid *p;
1420	struct per_pidcomm *c;
1421	int count = 0;
1422
1423	p = timechart->all_data;
1424	while (p) {
1425		/* no exit marker, task kept running to the end */
1426		if (p->end_time == 0)
1427			p->end_time = timechart->last_time;
1428
1429		c = p->all;
1430
1431		while (c) {
1432			c->display = 0;
1433
1434			if (c->total_bytes >= threshold) {
1435				c->display = 1;
1436				count++;
1437			}
1438
1439			if (c->end_time == 0)
1440				c->end_time = timechart->last_time;
1441
1442			c = c->next;
1443		}
1444		p = p->next;
1445	}
1446	return count;
1447}
1448
1449#define BYTES_THRESH (1 * 1024 * 1024)
1450#define TIME_THRESH 10000000
1451
1452static void write_svg_file(struct timechart *tchart, const char *filename)
1453{
1454	u64 i;
1455	int count;
1456	int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
1457
1458	if (tchart->power_only)
1459		tchart->proc_num = 0;
1460
1461	/* We'd like to show at least proc_num tasks;
1462	 * be less picky if we have fewer */
1463	do {
1464		if (process_filter)
1465			count = determine_display_tasks_filtered(tchart);
1466		else if (tchart->io_events)
1467			count = determine_display_io_tasks(tchart, thresh);
1468		else
1469			count = determine_display_tasks(tchart, thresh);
1470		thresh /= 10;
1471	} while (!process_filter && thresh && count < tchart->proc_num);
1472
1473	if (!tchart->proc_num)
1474		count = 0;
1475
1476	if (tchart->io_events) {
1477		open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
1478
1479		svg_time_grid(0.5);
1480		svg_io_legenda();
1481
1482		draw_io_bars(tchart);
1483	} else {
1484		open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
1485
1486		svg_time_grid(0);
1487
1488		svg_legenda();
1489
1490		for (i = 0; i < tchart->numcpus; i++)
1491			svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
1492
1493		draw_cpu_usage(tchart);
1494		if (tchart->proc_num)
1495			draw_process_bars(tchart);
1496		if (!tchart->tasks_only)
1497			draw_c_p_states(tchart);
1498		if (tchart->proc_num)
1499			draw_wakeups(tchart);
1500	}
1501
1502	svg_close();
1503}
1504
1505static int process_header(struct perf_file_section *section __maybe_unused,
1506			  struct perf_header *ph,
1507			  int feat,
1508			  int fd __maybe_unused,
1509			  void *data)
1510{
1511	struct timechart *tchart = data;
1512
1513	switch (feat) {
1514	case HEADER_NRCPUS:
1515		tchart->numcpus = ph->env.nr_cpus_avail;
1516		break;
1517
1518	case HEADER_CPU_TOPOLOGY:
1519		if (!tchart->topology)
1520			break;
1521
1522		if (svg_build_topology_map(ph->env.sibling_cores,
1523					   ph->env.nr_sibling_cores,
1524					   ph->env.sibling_threads,
1525					   ph->env.nr_sibling_threads))
1526			fprintf(stderr, "problem building topology\n");
1527		break;
1528
1529	default:
1530		break;
1531	}
1532
1533	return 0;
1534}
1535
1536static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1537{
1538	const struct perf_evsel_str_handler power_tracepoints[] = {
1539		{ "power:cpu_idle",		process_sample_cpu_idle },
1540		{ "power:cpu_frequency",	process_sample_cpu_frequency },
1541		{ "sched:sched_wakeup",		process_sample_sched_wakeup },
1542		{ "sched:sched_switch",		process_sample_sched_switch },
1543#ifdef SUPPORT_OLD_POWER_EVENTS
1544		{ "power:power_start",		process_sample_power_start },
1545		{ "power:power_end",		process_sample_power_end },
1546		{ "power:power_frequency",	process_sample_power_frequency },
1547#endif
1548
1549		{ "syscalls:sys_enter_read",		process_enter_read },
1550		{ "syscalls:sys_enter_pread64",		process_enter_read },
1551		{ "syscalls:sys_enter_readv",		process_enter_read },
1552		{ "syscalls:sys_enter_preadv",		process_enter_read },
1553		{ "syscalls:sys_enter_write",		process_enter_write },
1554		{ "syscalls:sys_enter_pwrite64",	process_enter_write },
1555		{ "syscalls:sys_enter_writev",		process_enter_write },
1556		{ "syscalls:sys_enter_pwritev",		process_enter_write },
1557		{ "syscalls:sys_enter_sync",		process_enter_sync },
1558		{ "syscalls:sys_enter_sync_file_range",	process_enter_sync },
1559		{ "syscalls:sys_enter_fsync",		process_enter_sync },
1560		{ "syscalls:sys_enter_msync",		process_enter_sync },
1561		{ "syscalls:sys_enter_recvfrom",	process_enter_rx },
1562		{ "syscalls:sys_enter_recvmmsg",	process_enter_rx },
1563		{ "syscalls:sys_enter_recvmsg",		process_enter_rx },
1564		{ "syscalls:sys_enter_sendto",		process_enter_tx },
1565		{ "syscalls:sys_enter_sendmsg",		process_enter_tx },
1566		{ "syscalls:sys_enter_sendmmsg",	process_enter_tx },
1567		{ "syscalls:sys_enter_epoll_pwait",	process_enter_poll },
1568		{ "syscalls:sys_enter_epoll_wait",	process_enter_poll },
1569		{ "syscalls:sys_enter_poll",		process_enter_poll },
1570		{ "syscalls:sys_enter_ppoll",		process_enter_poll },
1571		{ "syscalls:sys_enter_pselect6",	process_enter_poll },
1572		{ "syscalls:sys_enter_select",		process_enter_poll },
1573
1574		{ "syscalls:sys_exit_read",		process_exit_read },
1575		{ "syscalls:sys_exit_pread64",		process_exit_read },
1576		{ "syscalls:sys_exit_readv",		process_exit_read },
1577		{ "syscalls:sys_exit_preadv",		process_exit_read },
1578		{ "syscalls:sys_exit_write",		process_exit_write },
1579		{ "syscalls:sys_exit_pwrite64",		process_exit_write },
1580		{ "syscalls:sys_exit_writev",		process_exit_write },
1581		{ "syscalls:sys_exit_pwritev",		process_exit_write },
1582		{ "syscalls:sys_exit_sync",		process_exit_sync },
1583		{ "syscalls:sys_exit_sync_file_range",	process_exit_sync },
1584		{ "syscalls:sys_exit_fsync",		process_exit_sync },
1585		{ "syscalls:sys_exit_msync",		process_exit_sync },
1586		{ "syscalls:sys_exit_recvfrom",		process_exit_rx },
1587		{ "syscalls:sys_exit_recvmmsg",		process_exit_rx },
1588		{ "syscalls:sys_exit_recvmsg",		process_exit_rx },
1589		{ "syscalls:sys_exit_sendto",		process_exit_tx },
1590		{ "syscalls:sys_exit_sendmsg",		process_exit_tx },
1591		{ "syscalls:sys_exit_sendmmsg",		process_exit_tx },
1592		{ "syscalls:sys_exit_epoll_pwait",	process_exit_poll },
1593		{ "syscalls:sys_exit_epoll_wait",	process_exit_poll },
1594		{ "syscalls:sys_exit_poll",		process_exit_poll },
1595		{ "syscalls:sys_exit_ppoll",		process_exit_poll },
1596		{ "syscalls:sys_exit_pselect6",		process_exit_poll },
1597		{ "syscalls:sys_exit_select",		process_exit_poll },
1598	};
1599	struct perf_data_file file = {
1600		.path = input_name,
1601		.mode = PERF_DATA_MODE_READ,
1602		.force = tchart->force,
1603	};
1604
1605	struct perf_session *session = perf_session__new(&file, false,
1606							 &tchart->tool);
1607	int ret = -EINVAL;
1608
1609	if (session == NULL)
1610		return -1;
1611
1612	symbol__init(&session->header.env);
1613
1614	(void)perf_header__process_sections(&session->header,
1615					    perf_data_file__fd(session->file),
1616					    tchart,
1617					    process_header);
1618
1619	if (!perf_session__has_traces(session, "timechart record"))
1620		goto out_delete;
1621
1622	if (perf_session__set_tracepoints_handlers(session,
1623						   power_tracepoints)) {
1624		pr_err("Initializing session tracepoint handlers failed\n");
1625		goto out_delete;
1626	}
1627
1628	ret = perf_session__process_events(session);
1629	if (ret)
1630		goto out_delete;
1631
1632	end_sample_processing(tchart);
1633
1634	sort_pids(tchart);
1635
1636	write_svg_file(tchart, output_name);
1637
1638	pr_info("Written %2.1f seconds of trace to %s.\n",
1639		(tchart->last_time - tchart->first_time) / 1000000000.0, output_name);
1640out_delete:
1641	perf_session__delete(session);
1642	return ret;
1643}
1644
1645static int timechart__io_record(int argc, const char **argv)
1646{
1647	unsigned int rec_argc, i;
1648	const char **rec_argv;
1649	const char **p;
1650	char *filter = NULL;
1651
1652	const char * const common_args[] = {
1653		"record", "-a", "-R", "-c", "1",
1654	};
1655	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1656
1657	const char * const disk_events[] = {
1658		"syscalls:sys_enter_read",
1659		"syscalls:sys_enter_pread64",
1660		"syscalls:sys_enter_readv",
1661		"syscalls:sys_enter_preadv",
1662		"syscalls:sys_enter_write",
1663		"syscalls:sys_enter_pwrite64",
1664		"syscalls:sys_enter_writev",
1665		"syscalls:sys_enter_pwritev",
1666		"syscalls:sys_enter_sync",
1667		"syscalls:sys_enter_sync_file_range",
1668		"syscalls:sys_enter_fsync",
1669		"syscalls:sys_enter_msync",
1670
1671		"syscalls:sys_exit_read",
1672		"syscalls:sys_exit_pread64",
1673		"syscalls:sys_exit_readv",
1674		"syscalls:sys_exit_preadv",
1675		"syscalls:sys_exit_write",
1676		"syscalls:sys_exit_pwrite64",
1677		"syscalls:sys_exit_writev",
1678		"syscalls:sys_exit_pwritev",
1679		"syscalls:sys_exit_sync",
1680		"syscalls:sys_exit_sync_file_range",
1681		"syscalls:sys_exit_fsync",
1682		"syscalls:sys_exit_msync",
1683	};
1684	unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
1685
1686	const char * const net_events[] = {
1687		"syscalls:sys_enter_recvfrom",
1688		"syscalls:sys_enter_recvmmsg",
1689		"syscalls:sys_enter_recvmsg",
1690		"syscalls:sys_enter_sendto",
1691		"syscalls:sys_enter_sendmsg",
1692		"syscalls:sys_enter_sendmmsg",
1693
1694		"syscalls:sys_exit_recvfrom",
1695		"syscalls:sys_exit_recvmmsg",
1696		"syscalls:sys_exit_recvmsg",
1697		"syscalls:sys_exit_sendto",
1698		"syscalls:sys_exit_sendmsg",
1699		"syscalls:sys_exit_sendmmsg",
1700	};
1701	unsigned int net_events_nr = ARRAY_SIZE(net_events);
1702
1703	const char * const poll_events[] = {
1704		"syscalls:sys_enter_epoll_pwait",
1705		"syscalls:sys_enter_epoll_wait",
1706		"syscalls:sys_enter_poll",
1707		"syscalls:sys_enter_ppoll",
1708		"syscalls:sys_enter_pselect6",
1709		"syscalls:sys_enter_select",
1710
1711		"syscalls:sys_exit_epoll_pwait",
1712		"syscalls:sys_exit_epoll_wait",
1713		"syscalls:sys_exit_poll",
1714		"syscalls:sys_exit_ppoll",
1715		"syscalls:sys_exit_pselect6",
1716		"syscalls:sys_exit_select",
1717	};
1718	unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
1719
1720	rec_argc = common_args_nr +
1721		disk_events_nr * 4 +
1722		net_events_nr * 4 +
1723		poll_events_nr * 4 +
1724		argc;
1725	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1726
1727	if (rec_argv == NULL)
1728		return -ENOMEM;
1729
1730	if (asprintf(&filter, "common_pid != %d", getpid()) < 0)
1731		return -ENOMEM;
1732
1733	p = rec_argv;
1734	for (i = 0; i < common_args_nr; i++)
1735		*p++ = strdup(common_args[i]);
1736
1737	for (i = 0; i < disk_events_nr; i++) {
1738		if (!is_valid_tracepoint(disk_events[i])) {
1739			rec_argc -= 4;
1740			continue;
1741		}
1742
1743		*p++ = "-e";
1744		*p++ = strdup(disk_events[i]);
1745		*p++ = "--filter";
1746		*p++ = filter;
1747	}
1748	for (i = 0; i < net_events_nr; i++) {
1749		if (!is_valid_tracepoint(net_events[i])) {
1750			rec_argc -= 4;
1751			continue;
1752		}
1753
1754		*p++ = "-e";
1755		*p++ = strdup(net_events[i]);
1756		*p++ = "--filter";
1757		*p++ = filter;
1758	}
1759	for (i = 0; i < poll_events_nr; i++) {
1760		if (!is_valid_tracepoint(poll_events[i])) {
1761			rec_argc -= 4;
1762			continue;
1763		}
1764
1765		*p++ = "-e";
1766		*p++ = strdup(poll_events[i]);
1767		*p++ = "--filter";
1768		*p++ = filter;
1769	}
1770
1771	for (i = 0; i < (unsigned int)argc; i++)
1772		*p++ = argv[i];
1773
1774	return cmd_record(rec_argc, rec_argv, NULL);
1775}
1776
1777
1778static int timechart__record(struct timechart *tchart, int argc, const char **argv)
1779{
1780	unsigned int rec_argc, i, j;
1781	const char **rec_argv;
1782	const char **p;
1783	unsigned int record_elems;
1784
1785	const char * const common_args[] = {
1786		"record", "-a", "-R", "-c", "1",
1787	};
1788	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1789
1790	const char * const backtrace_args[] = {
1791		"-g",
1792	};
1793	unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1794
1795	const char * const power_args[] = {
1796		"-e", "power:cpu_frequency",
1797		"-e", "power:cpu_idle",
1798	};
1799	unsigned int power_args_nr = ARRAY_SIZE(power_args);
1800
1801	const char * const old_power_args[] = {
1802#ifdef SUPPORT_OLD_POWER_EVENTS
1803		"-e", "power:power_start",
1804		"-e", "power:power_end",
1805		"-e", "power:power_frequency",
1806#endif
1807	};
1808	unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1809
1810	const char * const tasks_args[] = {
1811		"-e", "sched:sched_wakeup",
1812		"-e", "sched:sched_switch",
1813	};
1814	unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1815
1816#ifdef SUPPORT_OLD_POWER_EVENTS
1817	if (!is_valid_tracepoint("power:cpu_idle") &&
1818	    is_valid_tracepoint("power:power_start")) {
1819		use_old_power_events = 1;
1820		power_args_nr = 0;
1821	} else {
1822		old_power_args_nr = 0;
1823	}
1824#endif
1825
1826	if (tchart->power_only)
1827		tasks_args_nr = 0;
1828
1829	if (tchart->tasks_only) {
1830		power_args_nr = 0;
1831		old_power_args_nr = 0;
1832	}
1833
1834	if (!tchart->with_backtrace)
1835		backtrace_args_no = 0;
1836
1837	record_elems = common_args_nr + tasks_args_nr +
1838		power_args_nr + old_power_args_nr + backtrace_args_no;
1839
1840	rec_argc = record_elems + argc;
1841	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1842
1843	if (rec_argv == NULL)
1844		return -ENOMEM;
1845
1846	p = rec_argv;
1847	for (i = 0; i < common_args_nr; i++)
1848		*p++ = strdup(common_args[i]);
1849
1850	for (i = 0; i < backtrace_args_no; i++)
1851		*p++ = strdup(backtrace_args[i]);
1852
1853	for (i = 0; i < tasks_args_nr; i++)
1854		*p++ = strdup(tasks_args[i]);
1855
1856	for (i = 0; i < power_args_nr; i++)
1857		*p++ = strdup(power_args[i]);
1858
1859	for (i = 0; i < old_power_args_nr; i++)
1860		*p++ = strdup(old_power_args[i]);
1861
1862	for (j = 0; j < (unsigned int)argc; j++)
1863		*p++ = argv[j];
1864
1865	return cmd_record(rec_argc, rec_argv, NULL);
1866}
1867
1868static int
1869parse_process(const struct option *opt __maybe_unused, const char *arg,
1870	      int __maybe_unused unset)
1871{
1872	if (arg)
1873		add_process_filter(arg);
1874	return 0;
1875}
1876
1877static int
1878parse_highlight(const struct option *opt __maybe_unused, const char *arg,
1879		int __maybe_unused unset)
1880{
1881	unsigned long duration = strtoul(arg, NULL, 0);
1882
1883	if (svg_highlight || svg_highlight_name)
1884		return -1;
1885
1886	if (duration)
1887		svg_highlight = duration;
1888	else
1889		svg_highlight_name = strdup(arg);
1890
1891	return 0;
1892}
1893
1894static int
1895parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
1896{
1897	char unit = 'n';
1898	u64 *value = opt->value;
1899
1900	if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
1901		switch (unit) {
1902		case 'm':
1903			*value *= 1000000;
1904			break;
1905		case 'u':
1906			*value *= 1000;
1907			break;
1908		case 'n':
1909			break;
1910		default:
1911			return -1;
1912		}
1913	}
1914
1915	return 0;
1916}
1917
1918int cmd_timechart(int argc, const char **argv,
1919		  const char *prefix __maybe_unused)
1920{
1921	struct timechart tchart = {
1922		.tool = {
1923			.comm		 = process_comm_event,
1924			.fork		 = process_fork_event,
1925			.exit		 = process_exit_event,
1926			.sample		 = process_sample_event,
1927			.ordered_events	 = true,
1928		},
1929		.proc_num = 15,
1930		.min_time = 1000000,
1931		.merge_dist = 1000,
1932	};
1933	const char *output_name = "output.svg";
1934	const struct option timechart_options[] = {
1935	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1936	OPT_STRING('o', "output", &output_name, "file", "output file name"),
1937	OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1938	OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
1939		      "highlight tasks. Pass duration in ns or process name.",
1940		       parse_highlight),
1941	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1942	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
1943		    "output processes data only"),
1944	OPT_CALLBACK('p', "process", NULL, "process",
1945		      "process selector. Pass a pid or process name.",
1946		       parse_process),
1947	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1948		    "Look for files with symbols relative to this directory"),
1949	OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1950		    "min. number of tasks to print"),
1951	OPT_BOOLEAN('t', "topology", &tchart.topology,
1952		    "sort CPUs according to topology"),
1953	OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
1954		    "skip EAGAIN errors"),
1955	OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
1956		     "all IO faster than min-time will visually appear longer",
1957		     parse_time),
1958	OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
1959		     "merge events that are merge-dist us apart",
1960		     parse_time),
1961	OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
1962	OPT_END()
1963	};
1964	const char * const timechart_subcommands[] = { "record", NULL };
1965	const char *timechart_usage[] = {
1966		"perf timechart [<options>] {record}",
1967		NULL
1968	};
1969
1970	const struct option timechart_record_options[] = {
1971	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1972	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
1973		    "output processes data only"),
1974	OPT_BOOLEAN('I', "io-only", &tchart.io_only,
1975		    "record only IO data"),
1976	OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1977	OPT_END()
1978	};
1979	const char * const timechart_record_usage[] = {
1980		"perf timechart record [<options>]",
1981		NULL
1982	};
1983	argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
1984			timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
1985
1986	if (tchart.power_only && tchart.tasks_only) {
1987		pr_err("-P and -T options cannot be used at the same time.\n");
1988		return -1;
1989	}
1990
1991	if (argc && !strncmp(argv[0], "rec", 3)) {
1992		argc = parse_options(argc, argv, timechart_record_options,
1993				     timechart_record_usage,
1994				     PARSE_OPT_STOP_AT_NON_OPTION);
1995
1996		if (tchart.power_only && tchart.tasks_only) {
1997			pr_err("-P and -T options cannot be used at the same time.\n");
1998			return -1;
1999		}
2000
2001		if (tchart.io_only)
2002			return timechart__io_record(argc, argv);
2003		else
2004			return timechart__record(&tchart, argc, argv);
2005	} else if (argc)
2006		usage_with_options(timechart_usage, timechart_options);
2007
2008	setup_pager();
2009
2010	return __cmd_timechart(&tchart, output_name);
2011}
2012