1 /*
2 * trace irqs off critical timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
16
17 #include "trace.h"
18
19 static struct trace_array *irqsoff_trace __read_mostly;
20 static int tracer_enabled __read_mostly;
21
22 static DEFINE_PER_CPU(int, tracing_cpu);
23
24 static DEFINE_RAW_SPINLOCK(max_trace_lock);
25
26 enum {
27 TRACER_IRQS_OFF = (1 << 1),
28 TRACER_PREEMPT_OFF = (1 << 2),
29 };
30
31 static int trace_type __read_mostly;
32
33 static int save_flags;
34
35 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
36 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
37
38 #ifdef CONFIG_PREEMPT_TRACER
39 static inline int
preempt_trace(void)40 preempt_trace(void)
41 {
42 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
43 }
44 #else
45 # define preempt_trace() (0)
46 #endif
47
48 #ifdef CONFIG_IRQSOFF_TRACER
49 static inline int
irq_trace(void)50 irq_trace(void)
51 {
52 return ((trace_type & TRACER_IRQS_OFF) &&
53 irqs_disabled());
54 }
55 #else
56 # define irq_trace() (0)
57 #endif
58
59 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60 static int irqsoff_display_graph(struct trace_array *tr, int set);
61 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
62 #else
irqsoff_display_graph(struct trace_array * tr,int set)63 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
64 {
65 return -EINVAL;
66 }
67 # define is_graph(tr) false
68 #endif
69
70 /*
71 * Sequence count - we record it when starting a measurement and
72 * skip the latency if the sequence has changed - some other section
73 * did a maximum and could disturb our measurement with serial console
74 * printouts, etc. Truly coinciding maximum latencies should be rare
75 * and what happens together happens separately as well, so this doesn't
76 * decrease the validity of the maximum found:
77 */
78 static __cacheline_aligned_in_smp unsigned long max_sequence;
79
80 #ifdef CONFIG_FUNCTION_TRACER
81 /*
82 * Prologue for the preempt and irqs off function tracers.
83 *
84 * Returns 1 if it is OK to continue, and data->disabled is
85 * incremented.
86 * 0 if the trace is to be ignored, and data->disabled
87 * is kept the same.
88 *
89 * Note, this function is also used outside this ifdef but
90 * inside the #ifdef of the function graph tracer below.
91 * This is OK, since the function graph tracer is
92 * dependent on the function tracer.
93 */
func_prolog_dec(struct trace_array * tr,struct trace_array_cpu ** data,unsigned long * flags)94 static int func_prolog_dec(struct trace_array *tr,
95 struct trace_array_cpu **data,
96 unsigned long *flags)
97 {
98 long disabled;
99 int cpu;
100
101 /*
102 * Does not matter if we preempt. We test the flags
103 * afterward, to see if irqs are disabled or not.
104 * If we preempt and get a false positive, the flags
105 * test will fail.
106 */
107 cpu = raw_smp_processor_id();
108 if (likely(!per_cpu(tracing_cpu, cpu)))
109 return 0;
110
111 local_save_flags(*flags);
112 /*
113 * Slight chance to get a false positive on tracing_cpu,
114 * although I'm starting to think there isn't a chance.
115 * Leave this for now just to be paranoid.
116 */
117 if (!irqs_disabled_flags(*flags) && !preempt_count())
118 return 0;
119
120 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
121 disabled = atomic_inc_return(&(*data)->disabled);
122
123 if (likely(disabled == 1))
124 return 1;
125
126 atomic_dec(&(*data)->disabled);
127
128 return 0;
129 }
130
131 /*
132 * irqsoff uses its own tracer function to keep the overhead down:
133 */
134 static void
irqsoff_tracer_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct pt_regs * pt_regs)135 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
136 struct ftrace_ops *op, struct pt_regs *pt_regs)
137 {
138 struct trace_array *tr = irqsoff_trace;
139 struct trace_array_cpu *data;
140 unsigned long flags;
141
142 if (!func_prolog_dec(tr, &data, &flags))
143 return;
144
145 trace_function(tr, ip, parent_ip, flags, preempt_count());
146
147 atomic_dec(&data->disabled);
148 }
149 #endif /* CONFIG_FUNCTION_TRACER */
150
151 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
irqsoff_display_graph(struct trace_array * tr,int set)152 static int irqsoff_display_graph(struct trace_array *tr, int set)
153 {
154 int cpu;
155
156 if (!(is_graph(tr) ^ set))
157 return 0;
158
159 stop_irqsoff_tracer(irqsoff_trace, !set);
160
161 for_each_possible_cpu(cpu)
162 per_cpu(tracing_cpu, cpu) = 0;
163
164 tr->max_latency = 0;
165 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
166
167 return start_irqsoff_tracer(irqsoff_trace, set);
168 }
169
irqsoff_graph_entry(struct ftrace_graph_ent * trace)170 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
171 {
172 struct trace_array *tr = irqsoff_trace;
173 struct trace_array_cpu *data;
174 unsigned long flags;
175 int ret;
176 int pc;
177
178 if (!func_prolog_dec(tr, &data, &flags))
179 return 0;
180
181 pc = preempt_count();
182 ret = __trace_graph_entry(tr, trace, flags, pc);
183 atomic_dec(&data->disabled);
184
185 return ret;
186 }
187
irqsoff_graph_return(struct ftrace_graph_ret * trace)188 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
189 {
190 struct trace_array *tr = irqsoff_trace;
191 struct trace_array_cpu *data;
192 unsigned long flags;
193 int pc;
194
195 if (!func_prolog_dec(tr, &data, &flags))
196 return;
197
198 pc = preempt_count();
199 __trace_graph_return(tr, trace, flags, pc);
200 atomic_dec(&data->disabled);
201 }
202
irqsoff_trace_open(struct trace_iterator * iter)203 static void irqsoff_trace_open(struct trace_iterator *iter)
204 {
205 if (is_graph(iter->tr))
206 graph_trace_open(iter);
207
208 }
209
irqsoff_trace_close(struct trace_iterator * iter)210 static void irqsoff_trace_close(struct trace_iterator *iter)
211 {
212 if (iter->private)
213 graph_trace_close(iter);
214 }
215
216 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
217 TRACE_GRAPH_PRINT_PROC | \
218 TRACE_GRAPH_PRINT_ABS_TIME | \
219 TRACE_GRAPH_PRINT_DURATION)
220
irqsoff_print_line(struct trace_iterator * iter)221 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
222 {
223 /*
224 * In graph mode call the graph tracer output function,
225 * otherwise go with the TRACE_FN event handler
226 */
227 if (is_graph(iter->tr))
228 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
229
230 return TRACE_TYPE_UNHANDLED;
231 }
232
irqsoff_print_header(struct seq_file * s)233 static void irqsoff_print_header(struct seq_file *s)
234 {
235 struct trace_array *tr = irqsoff_trace;
236
237 if (is_graph(tr))
238 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
239 else
240 trace_default_header(s);
241 }
242
243 static void
__trace_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned long flags,int pc)244 __trace_function(struct trace_array *tr,
245 unsigned long ip, unsigned long parent_ip,
246 unsigned long flags, int pc)
247 {
248 if (is_graph(tr))
249 trace_graph_function(tr, ip, parent_ip, flags, pc);
250 else
251 trace_function(tr, ip, parent_ip, flags, pc);
252 }
253
254 #else
255 #define __trace_function trace_function
256
257 #ifdef CONFIG_FUNCTION_TRACER
irqsoff_graph_entry(struct ftrace_graph_ent * trace)258 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
259 {
260 return -1;
261 }
262 #endif
263
irqsoff_print_line(struct trace_iterator * iter)264 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
265 {
266 return TRACE_TYPE_UNHANDLED;
267 }
268
irqsoff_trace_open(struct trace_iterator * iter)269 static void irqsoff_trace_open(struct trace_iterator *iter) { }
irqsoff_trace_close(struct trace_iterator * iter)270 static void irqsoff_trace_close(struct trace_iterator *iter) { }
271
272 #ifdef CONFIG_FUNCTION_TRACER
irqsoff_graph_return(struct ftrace_graph_ret * trace)273 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
irqsoff_print_header(struct seq_file * s)274 static void irqsoff_print_header(struct seq_file *s)
275 {
276 trace_default_header(s);
277 }
278 #else
irqsoff_print_header(struct seq_file * s)279 static void irqsoff_print_header(struct seq_file *s)
280 {
281 trace_latency_header(s);
282 }
283 #endif /* CONFIG_FUNCTION_TRACER */
284 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
285
286 /*
287 * Should this new latency be reported/recorded?
288 */
report_latency(struct trace_array * tr,cycle_t delta)289 static bool report_latency(struct trace_array *tr, cycle_t delta)
290 {
291 if (tracing_thresh) {
292 if (delta < tracing_thresh)
293 return false;
294 } else {
295 if (delta <= tr->max_latency)
296 return false;
297 }
298 return true;
299 }
300
301 static void
check_critical_timing(struct trace_array * tr,struct trace_array_cpu * data,unsigned long parent_ip,int cpu)302 check_critical_timing(struct trace_array *tr,
303 struct trace_array_cpu *data,
304 unsigned long parent_ip,
305 int cpu)
306 {
307 cycle_t T0, T1, delta;
308 unsigned long flags;
309 int pc;
310
311 T0 = data->preempt_timestamp;
312 T1 = ftrace_now(cpu);
313 delta = T1-T0;
314
315 local_save_flags(flags);
316
317 pc = preempt_count();
318
319 if (!report_latency(tr, delta))
320 goto out;
321
322 raw_spin_lock_irqsave(&max_trace_lock, flags);
323
324 /* check if we are still the max latency */
325 if (!report_latency(tr, delta))
326 goto out_unlock;
327
328 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
329 /* Skip 5 functions to get to the irq/preempt enable function */
330 __trace_stack(tr, flags, 5, pc);
331
332 if (data->critical_sequence != max_sequence)
333 goto out_unlock;
334
335 data->critical_end = parent_ip;
336
337 if (likely(!is_tracing_stopped())) {
338 tr->max_latency = delta;
339 update_max_tr_single(tr, current, cpu);
340 }
341
342 max_sequence++;
343
344 out_unlock:
345 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
346
347 out:
348 data->critical_sequence = max_sequence;
349 data->preempt_timestamp = ftrace_now(cpu);
350 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
351 }
352
353 static inline void
start_critical_timing(unsigned long ip,unsigned long parent_ip)354 start_critical_timing(unsigned long ip, unsigned long parent_ip)
355 {
356 int cpu;
357 struct trace_array *tr = irqsoff_trace;
358 struct trace_array_cpu *data;
359 unsigned long flags;
360
361 if (!tracer_enabled || !tracing_is_enabled())
362 return;
363
364 cpu = raw_smp_processor_id();
365
366 if (per_cpu(tracing_cpu, cpu))
367 return;
368
369 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
370
371 if (unlikely(!data) || atomic_read(&data->disabled))
372 return;
373
374 atomic_inc(&data->disabled);
375
376 data->critical_sequence = max_sequence;
377 data->preempt_timestamp = ftrace_now(cpu);
378 data->critical_start = parent_ip ? : ip;
379
380 local_save_flags(flags);
381
382 __trace_function(tr, ip, parent_ip, flags, preempt_count());
383
384 per_cpu(tracing_cpu, cpu) = 1;
385
386 atomic_dec(&data->disabled);
387 }
388
389 static inline void
stop_critical_timing(unsigned long ip,unsigned long parent_ip)390 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
391 {
392 int cpu;
393 struct trace_array *tr = irqsoff_trace;
394 struct trace_array_cpu *data;
395 unsigned long flags;
396
397 cpu = raw_smp_processor_id();
398 /* Always clear the tracing cpu on stopping the trace */
399 if (unlikely(per_cpu(tracing_cpu, cpu)))
400 per_cpu(tracing_cpu, cpu) = 0;
401 else
402 return;
403
404 if (!tracer_enabled || !tracing_is_enabled())
405 return;
406
407 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
408
409 if (unlikely(!data) ||
410 !data->critical_start || atomic_read(&data->disabled))
411 return;
412
413 atomic_inc(&data->disabled);
414
415 local_save_flags(flags);
416 __trace_function(tr, ip, parent_ip, flags, preempt_count());
417 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
418 data->critical_start = 0;
419 atomic_dec(&data->disabled);
420 }
421
422 /* start and stop critical timings used to for stoppage (in idle) */
start_critical_timings(void)423 void start_critical_timings(void)
424 {
425 if (preempt_trace() || irq_trace())
426 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
427 }
428 EXPORT_SYMBOL_GPL(start_critical_timings);
429
stop_critical_timings(void)430 void stop_critical_timings(void)
431 {
432 if (preempt_trace() || irq_trace())
433 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
434 }
435 EXPORT_SYMBOL_GPL(stop_critical_timings);
436
437 #ifdef CONFIG_IRQSOFF_TRACER
438 #ifdef CONFIG_PROVE_LOCKING
time_hardirqs_on(unsigned long a0,unsigned long a1)439 void time_hardirqs_on(unsigned long a0, unsigned long a1)
440 {
441 if (!preempt_trace() && irq_trace())
442 stop_critical_timing(a0, a1);
443 }
444
time_hardirqs_off(unsigned long a0,unsigned long a1)445 void time_hardirqs_off(unsigned long a0, unsigned long a1)
446 {
447 if (!preempt_trace() && irq_trace())
448 start_critical_timing(a0, a1);
449 }
450
451 #else /* !CONFIG_PROVE_LOCKING */
452
453 /*
454 * Stubs:
455 */
456
trace_softirqs_on(unsigned long ip)457 void trace_softirqs_on(unsigned long ip)
458 {
459 }
460
trace_softirqs_off(unsigned long ip)461 void trace_softirqs_off(unsigned long ip)
462 {
463 }
464
print_irqtrace_events(struct task_struct * curr)465 inline void print_irqtrace_events(struct task_struct *curr)
466 {
467 }
468
469 /*
470 * We are only interested in hardirq on/off events:
471 */
trace_hardirqs_on(void)472 void trace_hardirqs_on(void)
473 {
474 if (!preempt_trace() && irq_trace())
475 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
476 }
477 EXPORT_SYMBOL(trace_hardirqs_on);
478
trace_hardirqs_off(void)479 void trace_hardirqs_off(void)
480 {
481 if (!preempt_trace() && irq_trace())
482 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
483 }
484 EXPORT_SYMBOL(trace_hardirqs_off);
485
trace_hardirqs_on_caller(unsigned long caller_addr)486 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
487 {
488 if (!preempt_trace() && irq_trace())
489 stop_critical_timing(CALLER_ADDR0, caller_addr);
490 }
491 EXPORT_SYMBOL(trace_hardirqs_on_caller);
492
trace_hardirqs_off_caller(unsigned long caller_addr)493 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
494 {
495 if (!preempt_trace() && irq_trace())
496 start_critical_timing(CALLER_ADDR0, caller_addr);
497 }
498 EXPORT_SYMBOL(trace_hardirqs_off_caller);
499
500 #endif /* CONFIG_PROVE_LOCKING */
501 #endif /* CONFIG_IRQSOFF_TRACER */
502
503 #ifdef CONFIG_PREEMPT_TRACER
trace_preempt_on(unsigned long a0,unsigned long a1)504 void trace_preempt_on(unsigned long a0, unsigned long a1)
505 {
506 if (preempt_trace() && !irq_trace())
507 stop_critical_timing(a0, a1);
508 }
509
trace_preempt_off(unsigned long a0,unsigned long a1)510 void trace_preempt_off(unsigned long a0, unsigned long a1)
511 {
512 if (preempt_trace() && !irq_trace())
513 start_critical_timing(a0, a1);
514 }
515 #endif /* CONFIG_PREEMPT_TRACER */
516
517 #ifdef CONFIG_FUNCTION_TRACER
518 static bool function_enabled;
519
register_irqsoff_function(struct trace_array * tr,int graph,int set)520 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
521 {
522 int ret;
523
524 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
525 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
526 return 0;
527
528 if (graph)
529 ret = register_ftrace_graph(&irqsoff_graph_return,
530 &irqsoff_graph_entry);
531 else
532 ret = register_ftrace_function(tr->ops);
533
534 if (!ret)
535 function_enabled = true;
536
537 return ret;
538 }
539
unregister_irqsoff_function(struct trace_array * tr,int graph)540 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
541 {
542 if (!function_enabled)
543 return;
544
545 if (graph)
546 unregister_ftrace_graph();
547 else
548 unregister_ftrace_function(tr->ops);
549
550 function_enabled = false;
551 }
552
irqsoff_function_set(struct trace_array * tr,u32 mask,int set)553 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
554 {
555 if (!(mask & TRACE_ITER_FUNCTION))
556 return 0;
557
558 if (set)
559 register_irqsoff_function(tr, is_graph(tr), 1);
560 else
561 unregister_irqsoff_function(tr, is_graph(tr));
562 return 1;
563 }
564 #else
register_irqsoff_function(struct trace_array * tr,int graph,int set)565 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
566 {
567 return 0;
568 }
unregister_irqsoff_function(struct trace_array * tr,int graph)569 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
irqsoff_function_set(struct trace_array * tr,u32 mask,int set)570 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
571 {
572 return 0;
573 }
574 #endif /* CONFIG_FUNCTION_TRACER */
575
irqsoff_flag_changed(struct trace_array * tr,u32 mask,int set)576 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
577 {
578 struct tracer *tracer = tr->current_trace;
579
580 if (irqsoff_function_set(tr, mask, set))
581 return 0;
582
583 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
584 if (mask & TRACE_ITER_DISPLAY_GRAPH)
585 return irqsoff_display_graph(tr, set);
586 #endif
587
588 return trace_keep_overwrite(tracer, mask, set);
589 }
590
start_irqsoff_tracer(struct trace_array * tr,int graph)591 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
592 {
593 int ret;
594
595 ret = register_irqsoff_function(tr, graph, 0);
596
597 if (!ret && tracing_is_enabled())
598 tracer_enabled = 1;
599 else
600 tracer_enabled = 0;
601
602 return ret;
603 }
604
stop_irqsoff_tracer(struct trace_array * tr,int graph)605 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
606 {
607 tracer_enabled = 0;
608
609 unregister_irqsoff_function(tr, graph);
610 }
611
612 static bool irqsoff_busy;
613
__irqsoff_tracer_init(struct trace_array * tr)614 static int __irqsoff_tracer_init(struct trace_array *tr)
615 {
616 if (irqsoff_busy)
617 return -EBUSY;
618
619 save_flags = tr->trace_flags;
620
621 /* non overwrite screws up the latency tracers */
622 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
623 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
624
625 tr->max_latency = 0;
626 irqsoff_trace = tr;
627 /* make sure that the tracer is visible */
628 smp_wmb();
629 tracing_reset_online_cpus(&tr->trace_buffer);
630
631 ftrace_init_array_ops(tr, irqsoff_tracer_call);
632
633 /* Only toplevel instance supports graph tracing */
634 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
635 is_graph(tr))))
636 printk(KERN_ERR "failed to start irqsoff tracer\n");
637
638 irqsoff_busy = true;
639 return 0;
640 }
641
irqsoff_tracer_reset(struct trace_array * tr)642 static void irqsoff_tracer_reset(struct trace_array *tr)
643 {
644 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
645 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
646
647 stop_irqsoff_tracer(tr, is_graph(tr));
648
649 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
650 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
651 ftrace_reset_array_ops(tr);
652
653 irqsoff_busy = false;
654 }
655
irqsoff_tracer_start(struct trace_array * tr)656 static void irqsoff_tracer_start(struct trace_array *tr)
657 {
658 tracer_enabled = 1;
659 }
660
irqsoff_tracer_stop(struct trace_array * tr)661 static void irqsoff_tracer_stop(struct trace_array *tr)
662 {
663 tracer_enabled = 0;
664 }
665
666 #ifdef CONFIG_IRQSOFF_TRACER
irqsoff_tracer_init(struct trace_array * tr)667 static int irqsoff_tracer_init(struct trace_array *tr)
668 {
669 trace_type = TRACER_IRQS_OFF;
670
671 return __irqsoff_tracer_init(tr);
672 }
673 static struct tracer irqsoff_tracer __read_mostly =
674 {
675 .name = "irqsoff",
676 .init = irqsoff_tracer_init,
677 .reset = irqsoff_tracer_reset,
678 .start = irqsoff_tracer_start,
679 .stop = irqsoff_tracer_stop,
680 .print_max = true,
681 .print_header = irqsoff_print_header,
682 .print_line = irqsoff_print_line,
683 .flag_changed = irqsoff_flag_changed,
684 #ifdef CONFIG_FTRACE_SELFTEST
685 .selftest = trace_selftest_startup_irqsoff,
686 #endif
687 .open = irqsoff_trace_open,
688 .close = irqsoff_trace_close,
689 .allow_instances = true,
690 .use_max_tr = true,
691 };
692 # define register_irqsoff(trace) register_tracer(&trace)
693 #else
694 # define register_irqsoff(trace) do { } while (0)
695 #endif
696
697 #ifdef CONFIG_PREEMPT_TRACER
preemptoff_tracer_init(struct trace_array * tr)698 static int preemptoff_tracer_init(struct trace_array *tr)
699 {
700 trace_type = TRACER_PREEMPT_OFF;
701
702 return __irqsoff_tracer_init(tr);
703 }
704
705 static struct tracer preemptoff_tracer __read_mostly =
706 {
707 .name = "preemptoff",
708 .init = preemptoff_tracer_init,
709 .reset = irqsoff_tracer_reset,
710 .start = irqsoff_tracer_start,
711 .stop = irqsoff_tracer_stop,
712 .print_max = true,
713 .print_header = irqsoff_print_header,
714 .print_line = irqsoff_print_line,
715 .flag_changed = irqsoff_flag_changed,
716 #ifdef CONFIG_FTRACE_SELFTEST
717 .selftest = trace_selftest_startup_preemptoff,
718 #endif
719 .open = irqsoff_trace_open,
720 .close = irqsoff_trace_close,
721 .allow_instances = true,
722 .use_max_tr = true,
723 };
724 # define register_preemptoff(trace) register_tracer(&trace)
725 #else
726 # define register_preemptoff(trace) do { } while (0)
727 #endif
728
729 #if defined(CONFIG_IRQSOFF_TRACER) && \
730 defined(CONFIG_PREEMPT_TRACER)
731
preemptirqsoff_tracer_init(struct trace_array * tr)732 static int preemptirqsoff_tracer_init(struct trace_array *tr)
733 {
734 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
735
736 return __irqsoff_tracer_init(tr);
737 }
738
739 static struct tracer preemptirqsoff_tracer __read_mostly =
740 {
741 .name = "preemptirqsoff",
742 .init = preemptirqsoff_tracer_init,
743 .reset = irqsoff_tracer_reset,
744 .start = irqsoff_tracer_start,
745 .stop = irqsoff_tracer_stop,
746 .print_max = true,
747 .print_header = irqsoff_print_header,
748 .print_line = irqsoff_print_line,
749 .flag_changed = irqsoff_flag_changed,
750 #ifdef CONFIG_FTRACE_SELFTEST
751 .selftest = trace_selftest_startup_preemptirqsoff,
752 #endif
753 .open = irqsoff_trace_open,
754 .close = irqsoff_trace_close,
755 .allow_instances = true,
756 .use_max_tr = true,
757 };
758
759 # define register_preemptirqsoff(trace) register_tracer(&trace)
760 #else
761 # define register_preemptirqsoff(trace) do { } while (0)
762 #endif
763
init_irqsoff_tracer(void)764 __init static int init_irqsoff_tracer(void)
765 {
766 register_irqsoff(irqsoff_tracer);
767 register_preemptoff(preemptoff_tracer);
768 register_preemptirqsoff(preemptirqsoff_tracer);
769
770 return 0;
771 }
772 core_initcall(init_irqsoff_tracer);
773