1/* Include in trace.c */
2
3#include <linux/stringify.h>
4#include <linux/kthread.h>
5#include <linux/delay.h>
6#include <linux/slab.h>
7
8static inline int trace_valid_entry(struct trace_entry *entry)
9{
10	switch (entry->type) {
11	case TRACE_FN:
12	case TRACE_CTX:
13	case TRACE_WAKE:
14	case TRACE_STACK:
15	case TRACE_PRINT:
16	case TRACE_BRANCH:
17	case TRACE_GRAPH_ENT:
18	case TRACE_GRAPH_RET:
19		return 1;
20	}
21	return 0;
22}
23
24static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
25{
26	struct ring_buffer_event *event;
27	struct trace_entry *entry;
28	unsigned int loops = 0;
29
30	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
31		entry = ring_buffer_event_data(event);
32
33		/*
34		 * The ring buffer is a size of trace_buf_size, if
35		 * we loop more than the size, there's something wrong
36		 * with the ring buffer.
37		 */
38		if (loops++ > trace_buf_size) {
39			printk(KERN_CONT ".. bad ring buffer ");
40			goto failed;
41		}
42		if (!trace_valid_entry(entry)) {
43			printk(KERN_CONT ".. invalid entry %d ",
44				entry->type);
45			goto failed;
46		}
47	}
48	return 0;
49
50 failed:
51	/* disable tracing */
52	tracing_disabled = 1;
53	printk(KERN_CONT ".. corrupted trace buffer .. ");
54	return -1;
55}
56
57/*
58 * Test the trace buffer to see if all the elements
59 * are still sane.
60 */
61static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
62{
63	unsigned long flags, cnt = 0;
64	int cpu, ret = 0;
65
66	/* Don't allow flipping of max traces now */
67	local_irq_save(flags);
68	arch_spin_lock(&buf->tr->max_lock);
69
70	cnt = ring_buffer_entries(buf->buffer);
71
72	/*
73	 * The trace_test_buffer_cpu runs a while loop to consume all data.
74	 * If the calling tracer is broken, and is constantly filling
75	 * the buffer, this will run forever, and hard lock the box.
76	 * We disable the ring buffer while we do this test to prevent
77	 * a hard lock up.
78	 */
79	tracing_off();
80	for_each_possible_cpu(cpu) {
81		ret = trace_test_buffer_cpu(buf, cpu);
82		if (ret)
83			break;
84	}
85	tracing_on();
86	arch_spin_unlock(&buf->tr->max_lock);
87	local_irq_restore(flags);
88
89	if (count)
90		*count = cnt;
91
92	return ret;
93}
94
95static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96{
97	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98		trace->name, init_ret);
99}
100#ifdef CONFIG_FUNCTION_TRACER
101
102#ifdef CONFIG_DYNAMIC_FTRACE
103
104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip,
106					    unsigned long pip,
107					    struct ftrace_ops *op,
108					    struct pt_regs *pt_regs)
109{
110	trace_selftest_test_probe1_cnt++;
111}
112
113static int trace_selftest_test_probe2_cnt;
114static void trace_selftest_test_probe2_func(unsigned long ip,
115					    unsigned long pip,
116					    struct ftrace_ops *op,
117					    struct pt_regs *pt_regs)
118{
119	trace_selftest_test_probe2_cnt++;
120}
121
122static int trace_selftest_test_probe3_cnt;
123static void trace_selftest_test_probe3_func(unsigned long ip,
124					    unsigned long pip,
125					    struct ftrace_ops *op,
126					    struct pt_regs *pt_regs)
127{
128	trace_selftest_test_probe3_cnt++;
129}
130
131static int trace_selftest_test_global_cnt;
132static void trace_selftest_test_global_func(unsigned long ip,
133					    unsigned long pip,
134					    struct ftrace_ops *op,
135					    struct pt_regs *pt_regs)
136{
137	trace_selftest_test_global_cnt++;
138}
139
140static int trace_selftest_test_dyn_cnt;
141static void trace_selftest_test_dyn_func(unsigned long ip,
142					 unsigned long pip,
143					 struct ftrace_ops *op,
144					 struct pt_regs *pt_regs)
145{
146	trace_selftest_test_dyn_cnt++;
147}
148
149static struct ftrace_ops test_probe1 = {
150	.func			= trace_selftest_test_probe1_func,
151	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
152};
153
154static struct ftrace_ops test_probe2 = {
155	.func			= trace_selftest_test_probe2_func,
156	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
157};
158
159static struct ftrace_ops test_probe3 = {
160	.func			= trace_selftest_test_probe3_func,
161	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
162};
163
164static void print_counts(void)
165{
166	printk("(%d %d %d %d %d) ",
167	       trace_selftest_test_probe1_cnt,
168	       trace_selftest_test_probe2_cnt,
169	       trace_selftest_test_probe3_cnt,
170	       trace_selftest_test_global_cnt,
171	       trace_selftest_test_dyn_cnt);
172}
173
174static void reset_counts(void)
175{
176	trace_selftest_test_probe1_cnt = 0;
177	trace_selftest_test_probe2_cnt = 0;
178	trace_selftest_test_probe3_cnt = 0;
179	trace_selftest_test_global_cnt = 0;
180	trace_selftest_test_dyn_cnt = 0;
181}
182
183static int trace_selftest_ops(struct trace_array *tr, int cnt)
184{
185	int save_ftrace_enabled = ftrace_enabled;
186	struct ftrace_ops *dyn_ops;
187	char *func1_name;
188	char *func2_name;
189	int len1;
190	int len2;
191	int ret = -1;
192
193	printk(KERN_CONT "PASSED\n");
194	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
195
196	ftrace_enabled = 1;
197	reset_counts();
198
199	/* Handle PPC64 '.' name */
200	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
201	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
202	len1 = strlen(func1_name);
203	len2 = strlen(func2_name);
204
205	/*
206	 * Probe 1 will trace function 1.
207	 * Probe 2 will trace function 2.
208	 * Probe 3 will trace functions 1 and 2.
209	 */
210	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
211	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
212	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
213	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
214
215	register_ftrace_function(&test_probe1);
216	register_ftrace_function(&test_probe2);
217	register_ftrace_function(&test_probe3);
218	/* First time we are running with main function */
219	if (cnt > 1) {
220		ftrace_init_array_ops(tr, trace_selftest_test_global_func);
221		register_ftrace_function(tr->ops);
222	}
223
224	DYN_FTRACE_TEST_NAME();
225
226	print_counts();
227
228	if (trace_selftest_test_probe1_cnt != 1)
229		goto out;
230	if (trace_selftest_test_probe2_cnt != 0)
231		goto out;
232	if (trace_selftest_test_probe3_cnt != 1)
233		goto out;
234	if (cnt > 1) {
235		if (trace_selftest_test_global_cnt == 0)
236			goto out;
237	}
238
239	DYN_FTRACE_TEST_NAME2();
240
241	print_counts();
242
243	if (trace_selftest_test_probe1_cnt != 1)
244		goto out;
245	if (trace_selftest_test_probe2_cnt != 1)
246		goto out;
247	if (trace_selftest_test_probe3_cnt != 2)
248		goto out;
249
250	/* Add a dynamic probe */
251	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
252	if (!dyn_ops) {
253		printk("MEMORY ERROR ");
254		goto out;
255	}
256
257	dyn_ops->func = trace_selftest_test_dyn_func;
258
259	register_ftrace_function(dyn_ops);
260
261	trace_selftest_test_global_cnt = 0;
262
263	DYN_FTRACE_TEST_NAME();
264
265	print_counts();
266
267	if (trace_selftest_test_probe1_cnt != 2)
268		goto out_free;
269	if (trace_selftest_test_probe2_cnt != 1)
270		goto out_free;
271	if (trace_selftest_test_probe3_cnt != 3)
272		goto out_free;
273	if (cnt > 1) {
274		if (trace_selftest_test_global_cnt == 0)
275			goto out;
276	}
277	if (trace_selftest_test_dyn_cnt == 0)
278		goto out_free;
279
280	DYN_FTRACE_TEST_NAME2();
281
282	print_counts();
283
284	if (trace_selftest_test_probe1_cnt != 2)
285		goto out_free;
286	if (trace_selftest_test_probe2_cnt != 2)
287		goto out_free;
288	if (trace_selftest_test_probe3_cnt != 4)
289		goto out_free;
290
291	ret = 0;
292 out_free:
293	unregister_ftrace_function(dyn_ops);
294	kfree(dyn_ops);
295
296 out:
297	/* Purposely unregister in the same order */
298	unregister_ftrace_function(&test_probe1);
299	unregister_ftrace_function(&test_probe2);
300	unregister_ftrace_function(&test_probe3);
301	if (cnt > 1)
302		unregister_ftrace_function(tr->ops);
303	ftrace_reset_array_ops(tr);
304
305	/* Make sure everything is off */
306	reset_counts();
307	DYN_FTRACE_TEST_NAME();
308	DYN_FTRACE_TEST_NAME();
309
310	if (trace_selftest_test_probe1_cnt ||
311	    trace_selftest_test_probe2_cnt ||
312	    trace_selftest_test_probe3_cnt ||
313	    trace_selftest_test_global_cnt ||
314	    trace_selftest_test_dyn_cnt)
315		ret = -1;
316
317	ftrace_enabled = save_ftrace_enabled;
318
319	return ret;
320}
321
322/* Test dynamic code modification and ftrace filters */
323static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
324						  struct trace_array *tr,
325						  int (*func)(void))
326{
327	int save_ftrace_enabled = ftrace_enabled;
328	unsigned long count;
329	char *func_name;
330	int ret;
331
332	/* The ftrace test PASSED */
333	printk(KERN_CONT "PASSED\n");
334	pr_info("Testing dynamic ftrace: ");
335
336	/* enable tracing, and record the filter function */
337	ftrace_enabled = 1;
338
339	/* passed in by parameter to fool gcc from optimizing */
340	func();
341
342	/*
343	 * Some archs *cough*PowerPC*cough* add characters to the
344	 * start of the function names. We simply put a '*' to
345	 * accommodate them.
346	 */
347	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
348
349	/* filter only on our function */
350	ftrace_set_global_filter(func_name, strlen(func_name), 1);
351
352	/* enable tracing */
353	ret = tracer_init(trace, tr);
354	if (ret) {
355		warn_failed_init_tracer(trace, ret);
356		goto out;
357	}
358
359	/* Sleep for a 1/10 of a second */
360	msleep(100);
361
362	/* we should have nothing in the buffer */
363	ret = trace_test_buffer(&tr->trace_buffer, &count);
364	if (ret)
365		goto out;
366
367	if (count) {
368		ret = -1;
369		printk(KERN_CONT ".. filter did not filter .. ");
370		goto out;
371	}
372
373	/* call our function again */
374	func();
375
376	/* sleep again */
377	msleep(100);
378
379	/* stop the tracing. */
380	tracing_stop();
381	ftrace_enabled = 0;
382
383	/* check the trace buffer */
384	ret = trace_test_buffer(&tr->trace_buffer, &count);
385
386	ftrace_enabled = 1;
387	tracing_start();
388
389	/* we should only have one item */
390	if (!ret && count != 1) {
391		trace->reset(tr);
392		printk(KERN_CONT ".. filter failed count=%ld ..", count);
393		ret = -1;
394		goto out;
395	}
396
397	/* Test the ops with global tracing running */
398	ret = trace_selftest_ops(tr, 1);
399	trace->reset(tr);
400
401 out:
402	ftrace_enabled = save_ftrace_enabled;
403
404	/* Enable tracing on all functions again */
405	ftrace_set_global_filter(NULL, 0, 1);
406
407	/* Test the ops with global tracing off */
408	if (!ret)
409		ret = trace_selftest_ops(tr, 2);
410
411	return ret;
412}
413
414static int trace_selftest_recursion_cnt;
415static void trace_selftest_test_recursion_func(unsigned long ip,
416					       unsigned long pip,
417					       struct ftrace_ops *op,
418					       struct pt_regs *pt_regs)
419{
420	/*
421	 * This function is registered without the recursion safe flag.
422	 * The ftrace infrastructure should provide the recursion
423	 * protection. If not, this will crash the kernel!
424	 */
425	if (trace_selftest_recursion_cnt++ > 10)
426		return;
427	DYN_FTRACE_TEST_NAME();
428}
429
430static void trace_selftest_test_recursion_safe_func(unsigned long ip,
431						    unsigned long pip,
432						    struct ftrace_ops *op,
433						    struct pt_regs *pt_regs)
434{
435	/*
436	 * We said we would provide our own recursion. By calling
437	 * this function again, we should recurse back into this function
438	 * and count again. But this only happens if the arch supports
439	 * all of ftrace features and nothing else is using the function
440	 * tracing utility.
441	 */
442	if (trace_selftest_recursion_cnt++)
443		return;
444	DYN_FTRACE_TEST_NAME();
445}
446
447static struct ftrace_ops test_rec_probe = {
448	.func			= trace_selftest_test_recursion_func,
449};
450
451static struct ftrace_ops test_recsafe_probe = {
452	.func			= trace_selftest_test_recursion_safe_func,
453	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
454};
455
456static int
457trace_selftest_function_recursion(void)
458{
459	int save_ftrace_enabled = ftrace_enabled;
460	char *func_name;
461	int len;
462	int ret;
463
464	/* The previous test PASSED */
465	pr_cont("PASSED\n");
466	pr_info("Testing ftrace recursion: ");
467
468
469	/* enable tracing, and record the filter function */
470	ftrace_enabled = 1;
471
472	/* Handle PPC64 '.' name */
473	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
474	len = strlen(func_name);
475
476	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
477	if (ret) {
478		pr_cont("*Could not set filter* ");
479		goto out;
480	}
481
482	ret = register_ftrace_function(&test_rec_probe);
483	if (ret) {
484		pr_cont("*could not register callback* ");
485		goto out;
486	}
487
488	DYN_FTRACE_TEST_NAME();
489
490	unregister_ftrace_function(&test_rec_probe);
491
492	ret = -1;
493	if (trace_selftest_recursion_cnt != 1) {
494		pr_cont("*callback not called once (%d)* ",
495			trace_selftest_recursion_cnt);
496		goto out;
497	}
498
499	trace_selftest_recursion_cnt = 1;
500
501	pr_cont("PASSED\n");
502	pr_info("Testing ftrace recursion safe: ");
503
504	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
505	if (ret) {
506		pr_cont("*Could not set filter* ");
507		goto out;
508	}
509
510	ret = register_ftrace_function(&test_recsafe_probe);
511	if (ret) {
512		pr_cont("*could not register callback* ");
513		goto out;
514	}
515
516	DYN_FTRACE_TEST_NAME();
517
518	unregister_ftrace_function(&test_recsafe_probe);
519
520	ret = -1;
521	if (trace_selftest_recursion_cnt != 2) {
522		pr_cont("*callback not called expected 2 times (%d)* ",
523			trace_selftest_recursion_cnt);
524		goto out;
525	}
526
527	ret = 0;
528out:
529	ftrace_enabled = save_ftrace_enabled;
530
531	return ret;
532}
533#else
534# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
535# define trace_selftest_function_recursion() ({ 0; })
536#endif /* CONFIG_DYNAMIC_FTRACE */
537
538static enum {
539	TRACE_SELFTEST_REGS_START,
540	TRACE_SELFTEST_REGS_FOUND,
541	TRACE_SELFTEST_REGS_NOT_FOUND,
542} trace_selftest_regs_stat;
543
544static void trace_selftest_test_regs_func(unsigned long ip,
545					  unsigned long pip,
546					  struct ftrace_ops *op,
547					  struct pt_regs *pt_regs)
548{
549	if (pt_regs)
550		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
551	else
552		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
553}
554
555static struct ftrace_ops test_regs_probe = {
556	.func		= trace_selftest_test_regs_func,
557	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
558};
559
560static int
561trace_selftest_function_regs(void)
562{
563	int save_ftrace_enabled = ftrace_enabled;
564	char *func_name;
565	int len;
566	int ret;
567	int supported = 0;
568
569#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
570	supported = 1;
571#endif
572
573	/* The previous test PASSED */
574	pr_cont("PASSED\n");
575	pr_info("Testing ftrace regs%s: ",
576		!supported ? "(no arch support)" : "");
577
578	/* enable tracing, and record the filter function */
579	ftrace_enabled = 1;
580
581	/* Handle PPC64 '.' name */
582	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
583	len = strlen(func_name);
584
585	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
586	/*
587	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
588	 * This test really doesn't care.
589	 */
590	if (ret && ret != -ENODEV) {
591		pr_cont("*Could not set filter* ");
592		goto out;
593	}
594
595	ret = register_ftrace_function(&test_regs_probe);
596	/*
597	 * Now if the arch does not support passing regs, then this should
598	 * have failed.
599	 */
600	if (!supported) {
601		if (!ret) {
602			pr_cont("*registered save-regs without arch support* ");
603			goto out;
604		}
605		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
606		ret = register_ftrace_function(&test_regs_probe);
607	}
608	if (ret) {
609		pr_cont("*could not register callback* ");
610		goto out;
611	}
612
613
614	DYN_FTRACE_TEST_NAME();
615
616	unregister_ftrace_function(&test_regs_probe);
617
618	ret = -1;
619
620	switch (trace_selftest_regs_stat) {
621	case TRACE_SELFTEST_REGS_START:
622		pr_cont("*callback never called* ");
623		goto out;
624
625	case TRACE_SELFTEST_REGS_FOUND:
626		if (supported)
627			break;
628		pr_cont("*callback received regs without arch support* ");
629		goto out;
630
631	case TRACE_SELFTEST_REGS_NOT_FOUND:
632		if (!supported)
633			break;
634		pr_cont("*callback received NULL regs* ");
635		goto out;
636	}
637
638	ret = 0;
639out:
640	ftrace_enabled = save_ftrace_enabled;
641
642	return ret;
643}
644
645/*
646 * Simple verification test of ftrace function tracer.
647 * Enable ftrace, sleep 1/10 second, and then read the trace
648 * buffer to see if all is in order.
649 */
650__init int
651trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
652{
653	int save_ftrace_enabled = ftrace_enabled;
654	unsigned long count;
655	int ret;
656
657#ifdef CONFIG_DYNAMIC_FTRACE
658	if (ftrace_filter_param) {
659		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
660		return 0;
661	}
662#endif
663
664	/* make sure msleep has been recorded */
665	msleep(1);
666
667	/* start the tracing */
668	ftrace_enabled = 1;
669
670	ret = tracer_init(trace, tr);
671	if (ret) {
672		warn_failed_init_tracer(trace, ret);
673		goto out;
674	}
675
676	/* Sleep for a 1/10 of a second */
677	msleep(100);
678	/* stop the tracing. */
679	tracing_stop();
680	ftrace_enabled = 0;
681
682	/* check the trace buffer */
683	ret = trace_test_buffer(&tr->trace_buffer, &count);
684
685	ftrace_enabled = 1;
686	trace->reset(tr);
687	tracing_start();
688
689	if (!ret && !count) {
690		printk(KERN_CONT ".. no entries found ..");
691		ret = -1;
692		goto out;
693	}
694
695	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
696						     DYN_FTRACE_TEST_NAME);
697	if (ret)
698		goto out;
699
700	ret = trace_selftest_function_recursion();
701	if (ret)
702		goto out;
703
704	ret = trace_selftest_function_regs();
705 out:
706	ftrace_enabled = save_ftrace_enabled;
707
708	/* kill ftrace totally if we failed */
709	if (ret)
710		ftrace_kill();
711
712	return ret;
713}
714#endif /* CONFIG_FUNCTION_TRACER */
715
716
717#ifdef CONFIG_FUNCTION_GRAPH_TRACER
718
719/* Maximum number of functions to trace before diagnosing a hang */
720#define GRAPH_MAX_FUNC_TEST	100000000
721
722static unsigned int graph_hang_thresh;
723
724/* Wrap the real function entry probe to avoid possible hanging */
725static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
726{
727	/* This is harmlessly racy, we want to approximately detect a hang */
728	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
729		ftrace_graph_stop();
730		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
731		if (ftrace_dump_on_oops) {
732			ftrace_dump(DUMP_ALL);
733			/* ftrace_dump() disables tracing */
734			tracing_on();
735		}
736		return 0;
737	}
738
739	return trace_graph_entry(trace);
740}
741
742/*
743 * Pretty much the same than for the function tracer from which the selftest
744 * has been borrowed.
745 */
746__init int
747trace_selftest_startup_function_graph(struct tracer *trace,
748					struct trace_array *tr)
749{
750	int ret;
751	unsigned long count;
752
753#ifdef CONFIG_DYNAMIC_FTRACE
754	if (ftrace_filter_param) {
755		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
756		return 0;
757	}
758#endif
759
760	/*
761	 * Simulate the init() callback but we attach a watchdog callback
762	 * to detect and recover from possible hangs
763	 */
764	tracing_reset_online_cpus(&tr->trace_buffer);
765	set_graph_array(tr);
766	ret = register_ftrace_graph(&trace_graph_return,
767				    &trace_graph_entry_watchdog);
768	if (ret) {
769		warn_failed_init_tracer(trace, ret);
770		goto out;
771	}
772	tracing_start_cmdline_record();
773
774	/* Sleep for a 1/10 of a second */
775	msleep(100);
776
777	/* Have we just recovered from a hang? */
778	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
779		tracing_selftest_disabled = true;
780		ret = -1;
781		goto out;
782	}
783
784	tracing_stop();
785
786	/* check the trace buffer */
787	ret = trace_test_buffer(&tr->trace_buffer, &count);
788
789	trace->reset(tr);
790	tracing_start();
791
792	if (!ret && !count) {
793		printk(KERN_CONT ".. no entries found ..");
794		ret = -1;
795		goto out;
796	}
797
798	/* Don't test dynamic tracing, the function tracer already did */
799
800out:
801	/* Stop it if we failed */
802	if (ret)
803		ftrace_graph_stop();
804
805	return ret;
806}
807#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
808
809
810#ifdef CONFIG_IRQSOFF_TRACER
811int
812trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
813{
814	unsigned long save_max = tr->max_latency;
815	unsigned long count;
816	int ret;
817
818	/* start the tracing */
819	ret = tracer_init(trace, tr);
820	if (ret) {
821		warn_failed_init_tracer(trace, ret);
822		return ret;
823	}
824
825	/* reset the max latency */
826	tr->max_latency = 0;
827	/* disable interrupts for a bit */
828	local_irq_disable();
829	udelay(100);
830	local_irq_enable();
831
832	/*
833	 * Stop the tracer to avoid a warning subsequent
834	 * to buffer flipping failure because tracing_stop()
835	 * disables the tr and max buffers, making flipping impossible
836	 * in case of parallels max irqs off latencies.
837	 */
838	trace->stop(tr);
839	/* stop the tracing. */
840	tracing_stop();
841	/* check both trace buffers */
842	ret = trace_test_buffer(&tr->trace_buffer, NULL);
843	if (!ret)
844		ret = trace_test_buffer(&tr->max_buffer, &count);
845	trace->reset(tr);
846	tracing_start();
847
848	if (!ret && !count) {
849		printk(KERN_CONT ".. no entries found ..");
850		ret = -1;
851	}
852
853	tr->max_latency = save_max;
854
855	return ret;
856}
857#endif /* CONFIG_IRQSOFF_TRACER */
858
859#ifdef CONFIG_PREEMPT_TRACER
860int
861trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
862{
863	unsigned long save_max = tr->max_latency;
864	unsigned long count;
865	int ret;
866
867	/*
868	 * Now that the big kernel lock is no longer preemptable,
869	 * and this is called with the BKL held, it will always
870	 * fail. If preemption is already disabled, simply
871	 * pass the test. When the BKL is removed, or becomes
872	 * preemptible again, we will once again test this,
873	 * so keep it in.
874	 */
875	if (preempt_count()) {
876		printk(KERN_CONT "can not test ... force ");
877		return 0;
878	}
879
880	/* start the tracing */
881	ret = tracer_init(trace, tr);
882	if (ret) {
883		warn_failed_init_tracer(trace, ret);
884		return ret;
885	}
886
887	/* reset the max latency */
888	tr->max_latency = 0;
889	/* disable preemption for a bit */
890	preempt_disable();
891	udelay(100);
892	preempt_enable();
893
894	/*
895	 * Stop the tracer to avoid a warning subsequent
896	 * to buffer flipping failure because tracing_stop()
897	 * disables the tr and max buffers, making flipping impossible
898	 * in case of parallels max preempt off latencies.
899	 */
900	trace->stop(tr);
901	/* stop the tracing. */
902	tracing_stop();
903	/* check both trace buffers */
904	ret = trace_test_buffer(&tr->trace_buffer, NULL);
905	if (!ret)
906		ret = trace_test_buffer(&tr->max_buffer, &count);
907	trace->reset(tr);
908	tracing_start();
909
910	if (!ret && !count) {
911		printk(KERN_CONT ".. no entries found ..");
912		ret = -1;
913	}
914
915	tr->max_latency = save_max;
916
917	return ret;
918}
919#endif /* CONFIG_PREEMPT_TRACER */
920
921#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
922int
923trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
924{
925	unsigned long save_max = tr->max_latency;
926	unsigned long count;
927	int ret;
928
929	/*
930	 * Now that the big kernel lock is no longer preemptable,
931	 * and this is called with the BKL held, it will always
932	 * fail. If preemption is already disabled, simply
933	 * pass the test. When the BKL is removed, or becomes
934	 * preemptible again, we will once again test this,
935	 * so keep it in.
936	 */
937	if (preempt_count()) {
938		printk(KERN_CONT "can not test ... force ");
939		return 0;
940	}
941
942	/* start the tracing */
943	ret = tracer_init(trace, tr);
944	if (ret) {
945		warn_failed_init_tracer(trace, ret);
946		goto out_no_start;
947	}
948
949	/* reset the max latency */
950	tr->max_latency = 0;
951
952	/* disable preemption and interrupts for a bit */
953	preempt_disable();
954	local_irq_disable();
955	udelay(100);
956	preempt_enable();
957	/* reverse the order of preempt vs irqs */
958	local_irq_enable();
959
960	/*
961	 * Stop the tracer to avoid a warning subsequent
962	 * to buffer flipping failure because tracing_stop()
963	 * disables the tr and max buffers, making flipping impossible
964	 * in case of parallels max irqs/preempt off latencies.
965	 */
966	trace->stop(tr);
967	/* stop the tracing. */
968	tracing_stop();
969	/* check both trace buffers */
970	ret = trace_test_buffer(&tr->trace_buffer, NULL);
971	if (ret)
972		goto out;
973
974	ret = trace_test_buffer(&tr->max_buffer, &count);
975	if (ret)
976		goto out;
977
978	if (!ret && !count) {
979		printk(KERN_CONT ".. no entries found ..");
980		ret = -1;
981		goto out;
982	}
983
984	/* do the test by disabling interrupts first this time */
985	tr->max_latency = 0;
986	tracing_start();
987	trace->start(tr);
988
989	preempt_disable();
990	local_irq_disable();
991	udelay(100);
992	preempt_enable();
993	/* reverse the order of preempt vs irqs */
994	local_irq_enable();
995
996	trace->stop(tr);
997	/* stop the tracing. */
998	tracing_stop();
999	/* check both trace buffers */
1000	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1001	if (ret)
1002		goto out;
1003
1004	ret = trace_test_buffer(&tr->max_buffer, &count);
1005
1006	if (!ret && !count) {
1007		printk(KERN_CONT ".. no entries found ..");
1008		ret = -1;
1009		goto out;
1010	}
1011
1012out:
1013	tracing_start();
1014out_no_start:
1015	trace->reset(tr);
1016	tr->max_latency = save_max;
1017
1018	return ret;
1019}
1020#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1021
1022#ifdef CONFIG_NOP_TRACER
1023int
1024trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1025{
1026	/* What could possibly go wrong? */
1027	return 0;
1028}
1029#endif
1030
1031#ifdef CONFIG_SCHED_TRACER
1032
1033struct wakeup_test_data {
1034	struct completion	is_ready;
1035	int			go;
1036};
1037
1038static int trace_wakeup_test_thread(void *data)
1039{
1040	/* Make this a -deadline thread */
1041	static const struct sched_attr attr = {
1042		.sched_policy = SCHED_DEADLINE,
1043		.sched_runtime = 100000ULL,
1044		.sched_deadline = 10000000ULL,
1045		.sched_period = 10000000ULL
1046	};
1047	struct wakeup_test_data *x = data;
1048
1049	sched_setattr(current, &attr);
1050
1051	/* Make it know we have a new prio */
1052	complete(&x->is_ready);
1053
1054	/* now go to sleep and let the test wake us up */
1055	set_current_state(TASK_INTERRUPTIBLE);
1056	while (!x->go) {
1057		schedule();
1058		set_current_state(TASK_INTERRUPTIBLE);
1059	}
1060
1061	complete(&x->is_ready);
1062
1063	set_current_state(TASK_INTERRUPTIBLE);
1064
1065	/* we are awake, now wait to disappear */
1066	while (!kthread_should_stop()) {
1067		schedule();
1068		set_current_state(TASK_INTERRUPTIBLE);
1069	}
1070
1071	__set_current_state(TASK_RUNNING);
1072
1073	return 0;
1074}
1075int
1076trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1077{
1078	unsigned long save_max = tr->max_latency;
1079	struct task_struct *p;
1080	struct wakeup_test_data data;
1081	unsigned long count;
1082	int ret;
1083
1084	memset(&data, 0, sizeof(data));
1085
1086	init_completion(&data.is_ready);
1087
1088	/* create a -deadline thread */
1089	p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1090	if (IS_ERR(p)) {
1091		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1092		return -1;
1093	}
1094
1095	/* make sure the thread is running at -deadline policy */
1096	wait_for_completion(&data.is_ready);
1097
1098	/* start the tracing */
1099	ret = tracer_init(trace, tr);
1100	if (ret) {
1101		warn_failed_init_tracer(trace, ret);
1102		return ret;
1103	}
1104
1105	/* reset the max latency */
1106	tr->max_latency = 0;
1107
1108	while (p->on_rq) {
1109		/*
1110		 * Sleep to make sure the -deadline thread is asleep too.
1111		 * On virtual machines we can't rely on timings,
1112		 * but we want to make sure this test still works.
1113		 */
1114		msleep(100);
1115	}
1116
1117	init_completion(&data.is_ready);
1118
1119	data.go = 1;
1120	/* memory barrier is in the wake_up_process() */
1121
1122	wake_up_process(p);
1123
1124	/* Wait for the task to wake up */
1125	wait_for_completion(&data.is_ready);
1126
1127	/* stop the tracing. */
1128	tracing_stop();
1129	/* check both trace buffers */
1130	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1131	if (!ret)
1132		ret = trace_test_buffer(&tr->max_buffer, &count);
1133
1134
1135	trace->reset(tr);
1136	tracing_start();
1137
1138	tr->max_latency = save_max;
1139
1140	/* kill the thread */
1141	kthread_stop(p);
1142
1143	if (!ret && !count) {
1144		printk(KERN_CONT ".. no entries found ..");
1145		ret = -1;
1146	}
1147
1148	return ret;
1149}
1150#endif /* CONFIG_SCHED_TRACER */
1151
1152#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1153int
1154trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1155{
1156	unsigned long count;
1157	int ret;
1158
1159	/* start the tracing */
1160	ret = tracer_init(trace, tr);
1161	if (ret) {
1162		warn_failed_init_tracer(trace, ret);
1163		return ret;
1164	}
1165
1166	/* Sleep for a 1/10 of a second */
1167	msleep(100);
1168	/* stop the tracing. */
1169	tracing_stop();
1170	/* check the trace buffer */
1171	ret = trace_test_buffer(&tr->trace_buffer, &count);
1172	trace->reset(tr);
1173	tracing_start();
1174
1175	if (!ret && !count) {
1176		printk(KERN_CONT ".. no entries found ..");
1177		ret = -1;
1178	}
1179
1180	return ret;
1181}
1182#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1183
1184#ifdef CONFIG_BRANCH_TRACER
1185int
1186trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1187{
1188	unsigned long count;
1189	int ret;
1190
1191	/* start the tracing */
1192	ret = tracer_init(trace, tr);
1193	if (ret) {
1194		warn_failed_init_tracer(trace, ret);
1195		return ret;
1196	}
1197
1198	/* Sleep for a 1/10 of a second */
1199	msleep(100);
1200	/* stop the tracing. */
1201	tracing_stop();
1202	/* check the trace buffer */
1203	ret = trace_test_buffer(&tr->trace_buffer, &count);
1204	trace->reset(tr);
1205	tracing_start();
1206
1207	if (!ret && !count) {
1208		printk(KERN_CONT ".. no entries found ..");
1209		ret = -1;
1210	}
1211
1212	return ret;
1213}
1214#endif /* CONFIG_BRANCH_TRACER */
1215
1216