root/kernel/trace/trace_irqsoff.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. preempt_trace
  2. irq_trace
  3. irqsoff_display_graph
  4. func_prolog_dec
  5. irqsoff_tracer_call
  6. irqsoff_display_graph
  7. irqsoff_graph_entry
  8. irqsoff_graph_return
  9. irqsoff_trace_open
  10. irqsoff_trace_close
  11. irqsoff_print_line
  12. irqsoff_print_header
  13. __trace_function
  14. irqsoff_print_line
  15. irqsoff_trace_open
  16. irqsoff_trace_close
  17. irqsoff_print_header
  18. irqsoff_print_header
  19. report_latency
  20. check_critical_timing
  21. start_critical_timing
  22. stop_critical_timing
  23. start_critical_timings
  24. stop_critical_timings
  25. register_irqsoff_function
  26. unregister_irqsoff_function
  27. irqsoff_function_set
  28. register_irqsoff_function
  29. unregister_irqsoff_function
  30. irqsoff_function_set
  31. irqsoff_flag_changed
  32. start_irqsoff_tracer
  33. stop_irqsoff_tracer
  34. __irqsoff_tracer_init
  35. __irqsoff_tracer_reset
  36. irqsoff_tracer_start
  37. irqsoff_tracer_stop
  38. tracer_hardirqs_on
  39. tracer_hardirqs_off
  40. irqsoff_tracer_init
  41. irqsoff_tracer_reset
  42. tracer_preempt_on
  43. tracer_preempt_off
  44. preemptoff_tracer_init
  45. preemptoff_tracer_reset
  46. preemptirqsoff_tracer_init
  47. preemptirqsoff_tracer_reset
  48. init_irqsoff_tracer

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * trace irqs off critical timings
   4  *
   5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   7  *
   8  * From code in the latency_tracer, that is:
   9  *
  10  *  Copyright (C) 2004-2006 Ingo Molnar
  11  *  Copyright (C) 2004 Nadia Yvette Chambers
  12  */
  13 #include <linux/kallsyms.h>
  14 #include <linux/uaccess.h>
  15 #include <linux/module.h>
  16 #include <linux/ftrace.h>
  17 #include <linux/kprobes.h>
  18 
  19 #include "trace.h"
  20 
  21 #include <trace/events/preemptirq.h>
  22 
  23 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
  24 static struct trace_array               *irqsoff_trace __read_mostly;
  25 static int                              tracer_enabled __read_mostly;
  26 
  27 static DEFINE_PER_CPU(int, tracing_cpu);
  28 
  29 static DEFINE_RAW_SPINLOCK(max_trace_lock);
  30 
  31 enum {
  32         TRACER_IRQS_OFF         = (1 << 1),
  33         TRACER_PREEMPT_OFF      = (1 << 2),
  34 };
  35 
  36 static int trace_type __read_mostly;
  37 
  38 static int save_flags;
  39 
  40 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
  41 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
  42 
  43 #ifdef CONFIG_PREEMPT_TRACER
  44 static inline int
  45 preempt_trace(int pc)
  46 {
  47         return ((trace_type & TRACER_PREEMPT_OFF) && pc);
  48 }
  49 #else
  50 # define preempt_trace(pc) (0)
  51 #endif
  52 
  53 #ifdef CONFIG_IRQSOFF_TRACER
  54 static inline int
  55 irq_trace(void)
  56 {
  57         return ((trace_type & TRACER_IRQS_OFF) &&
  58                 irqs_disabled());
  59 }
  60 #else
  61 # define irq_trace() (0)
  62 #endif
  63 
  64 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  65 static int irqsoff_display_graph(struct trace_array *tr, int set);
  66 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
  67 #else
  68 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
  69 {
  70         return -EINVAL;
  71 }
  72 # define is_graph(tr) false
  73 #endif
  74 
  75 /*
  76  * Sequence count - we record it when starting a measurement and
  77  * skip the latency if the sequence has changed - some other section
  78  * did a maximum and could disturb our measurement with serial console
  79  * printouts, etc. Truly coinciding maximum latencies should be rare
  80  * and what happens together happens separately as well, so this doesn't
  81  * decrease the validity of the maximum found:
  82  */
  83 static __cacheline_aligned_in_smp       unsigned long max_sequence;
  84 
  85 #ifdef CONFIG_FUNCTION_TRACER
  86 /*
  87  * Prologue for the preempt and irqs off function tracers.
  88  *
  89  * Returns 1 if it is OK to continue, and data->disabled is
  90  *            incremented.
  91  *         0 if the trace is to be ignored, and data->disabled
  92  *            is kept the same.
  93  *
  94  * Note, this function is also used outside this ifdef but
  95  *  inside the #ifdef of the function graph tracer below.
  96  *  This is OK, since the function graph tracer is
  97  *  dependent on the function tracer.
  98  */
  99 static int func_prolog_dec(struct trace_array *tr,
 100                            struct trace_array_cpu **data,
 101                            unsigned long *flags)
 102 {
 103         long disabled;
 104         int cpu;
 105 
 106         /*
 107          * Does not matter if we preempt. We test the flags
 108          * afterward, to see if irqs are disabled or not.
 109          * If we preempt and get a false positive, the flags
 110          * test will fail.
 111          */
 112         cpu = raw_smp_processor_id();
 113         if (likely(!per_cpu(tracing_cpu, cpu)))
 114                 return 0;
 115 
 116         local_save_flags(*flags);
 117         /*
 118          * Slight chance to get a false positive on tracing_cpu,
 119          * although I'm starting to think there isn't a chance.
 120          * Leave this for now just to be paranoid.
 121          */
 122         if (!irqs_disabled_flags(*flags) && !preempt_count())
 123                 return 0;
 124 
 125         *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 126         disabled = atomic_inc_return(&(*data)->disabled);
 127 
 128         if (likely(disabled == 1))
 129                 return 1;
 130 
 131         atomic_dec(&(*data)->disabled);
 132 
 133         return 0;
 134 }
 135 
 136 /*
 137  * irqsoff uses its own tracer function to keep the overhead down:
 138  */
 139 static void
 140 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
 141                     struct ftrace_ops *op, struct pt_regs *pt_regs)
 142 {
 143         struct trace_array *tr = irqsoff_trace;
 144         struct trace_array_cpu *data;
 145         unsigned long flags;
 146 
 147         if (!func_prolog_dec(tr, &data, &flags))
 148                 return;
 149 
 150         trace_function(tr, ip, parent_ip, flags, preempt_count());
 151 
 152         atomic_dec(&data->disabled);
 153 }
 154 #endif /* CONFIG_FUNCTION_TRACER */
 155 
 156 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 157 static int irqsoff_display_graph(struct trace_array *tr, int set)
 158 {
 159         int cpu;
 160 
 161         if (!(is_graph(tr) ^ set))
 162                 return 0;
 163 
 164         stop_irqsoff_tracer(irqsoff_trace, !set);
 165 
 166         for_each_possible_cpu(cpu)
 167                 per_cpu(tracing_cpu, cpu) = 0;
 168 
 169         tr->max_latency = 0;
 170         tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
 171 
 172         return start_irqsoff_tracer(irqsoff_trace, set);
 173 }
 174 
 175 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
 176 {
 177         struct trace_array *tr = irqsoff_trace;
 178         struct trace_array_cpu *data;
 179         unsigned long flags;
 180         int ret;
 181         int pc;
 182 
 183         if (ftrace_graph_ignore_func(trace))
 184                 return 0;
 185         /*
 186          * Do not trace a function if it's filtered by set_graph_notrace.
 187          * Make the index of ret stack negative to indicate that it should
 188          * ignore further functions.  But it needs its own ret stack entry
 189          * to recover the original index in order to continue tracing after
 190          * returning from the function.
 191          */
 192         if (ftrace_graph_notrace_addr(trace->func))
 193                 return 1;
 194 
 195         if (!func_prolog_dec(tr, &data, &flags))
 196                 return 0;
 197 
 198         pc = preempt_count();
 199         ret = __trace_graph_entry(tr, trace, flags, pc);
 200         atomic_dec(&data->disabled);
 201 
 202         return ret;
 203 }
 204 
 205 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
 206 {
 207         struct trace_array *tr = irqsoff_trace;
 208         struct trace_array_cpu *data;
 209         unsigned long flags;
 210         int pc;
 211 
 212         ftrace_graph_addr_finish(trace);
 213 
 214         if (!func_prolog_dec(tr, &data, &flags))
 215                 return;
 216 
 217         pc = preempt_count();
 218         __trace_graph_return(tr, trace, flags, pc);
 219         atomic_dec(&data->disabled);
 220 }
 221 
 222 static struct fgraph_ops fgraph_ops = {
 223         .entryfunc              = &irqsoff_graph_entry,
 224         .retfunc                = &irqsoff_graph_return,
 225 };
 226 
 227 static void irqsoff_trace_open(struct trace_iterator *iter)
 228 {
 229         if (is_graph(iter->tr))
 230                 graph_trace_open(iter);
 231 
 232 }
 233 
 234 static void irqsoff_trace_close(struct trace_iterator *iter)
 235 {
 236         if (iter->private)
 237                 graph_trace_close(iter);
 238 }
 239 
 240 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
 241                             TRACE_GRAPH_PRINT_PROC | \
 242                             TRACE_GRAPH_PRINT_REL_TIME | \
 243                             TRACE_GRAPH_PRINT_DURATION)
 244 
 245 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
 246 {
 247         /*
 248          * In graph mode call the graph tracer output function,
 249          * otherwise go with the TRACE_FN event handler
 250          */
 251         if (is_graph(iter->tr))
 252                 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
 253 
 254         return TRACE_TYPE_UNHANDLED;
 255 }
 256 
 257 static void irqsoff_print_header(struct seq_file *s)
 258 {
 259         struct trace_array *tr = irqsoff_trace;
 260 
 261         if (is_graph(tr))
 262                 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
 263         else
 264                 trace_default_header(s);
 265 }
 266 
 267 static void
 268 __trace_function(struct trace_array *tr,
 269                  unsigned long ip, unsigned long parent_ip,
 270                  unsigned long flags, int pc)
 271 {
 272         if (is_graph(tr))
 273                 trace_graph_function(tr, ip, parent_ip, flags, pc);
 274         else
 275                 trace_function(tr, ip, parent_ip, flags, pc);
 276 }
 277 
 278 #else
 279 #define __trace_function trace_function
 280 
 281 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
 282 {
 283         return TRACE_TYPE_UNHANDLED;
 284 }
 285 
 286 static void irqsoff_trace_open(struct trace_iterator *iter) { }
 287 static void irqsoff_trace_close(struct trace_iterator *iter) { }
 288 
 289 #ifdef CONFIG_FUNCTION_TRACER
 290 static void irqsoff_print_header(struct seq_file *s)
 291 {
 292         trace_default_header(s);
 293 }
 294 #else
 295 static void irqsoff_print_header(struct seq_file *s)
 296 {
 297         trace_latency_header(s);
 298 }
 299 #endif /* CONFIG_FUNCTION_TRACER */
 300 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 301 
 302 /*
 303  * Should this new latency be reported/recorded?
 304  */
 305 static bool report_latency(struct trace_array *tr, u64 delta)
 306 {
 307         if (tracing_thresh) {
 308                 if (delta < tracing_thresh)
 309                         return false;
 310         } else {
 311                 if (delta <= tr->max_latency)
 312                         return false;
 313         }
 314         return true;
 315 }
 316 
 317 static void
 318 check_critical_timing(struct trace_array *tr,
 319                       struct trace_array_cpu *data,
 320                       unsigned long parent_ip,
 321                       int cpu)
 322 {
 323         u64 T0, T1, delta;
 324         unsigned long flags;
 325         int pc;
 326 
 327         T0 = data->preempt_timestamp;
 328         T1 = ftrace_now(cpu);
 329         delta = T1-T0;
 330 
 331         local_save_flags(flags);
 332 
 333         pc = preempt_count();
 334 
 335         if (!report_latency(tr, delta))
 336                 goto out;
 337 
 338         raw_spin_lock_irqsave(&max_trace_lock, flags);
 339 
 340         /* check if we are still the max latency */
 341         if (!report_latency(tr, delta))
 342                 goto out_unlock;
 343 
 344         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 345         /* Skip 5 functions to get to the irq/preempt enable function */
 346         __trace_stack(tr, flags, 5, pc);
 347 
 348         if (data->critical_sequence != max_sequence)
 349                 goto out_unlock;
 350 
 351         data->critical_end = parent_ip;
 352 
 353         if (likely(!is_tracing_stopped())) {
 354                 tr->max_latency = delta;
 355                 update_max_tr_single(tr, current, cpu);
 356         }
 357 
 358         max_sequence++;
 359 
 360 out_unlock:
 361         raw_spin_unlock_irqrestore(&max_trace_lock, flags);
 362 
 363 out:
 364         data->critical_sequence = max_sequence;
 365         data->preempt_timestamp = ftrace_now(cpu);
 366         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 367 }
 368 
 369 static nokprobe_inline void
 370 start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
 371 {
 372         int cpu;
 373         struct trace_array *tr = irqsoff_trace;
 374         struct trace_array_cpu *data;
 375         unsigned long flags;
 376 
 377         if (!tracer_enabled || !tracing_is_enabled())
 378                 return;
 379 
 380         cpu = raw_smp_processor_id();
 381 
 382         if (per_cpu(tracing_cpu, cpu))
 383                 return;
 384 
 385         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 386 
 387         if (unlikely(!data) || atomic_read(&data->disabled))
 388                 return;
 389 
 390         atomic_inc(&data->disabled);
 391 
 392         data->critical_sequence = max_sequence;
 393         data->preempt_timestamp = ftrace_now(cpu);
 394         data->critical_start = parent_ip ? : ip;
 395 
 396         local_save_flags(flags);
 397 
 398         __trace_function(tr, ip, parent_ip, flags, pc);
 399 
 400         per_cpu(tracing_cpu, cpu) = 1;
 401 
 402         atomic_dec(&data->disabled);
 403 }
 404 
 405 static nokprobe_inline void
 406 stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
 407 {
 408         int cpu;
 409         struct trace_array *tr = irqsoff_trace;
 410         struct trace_array_cpu *data;
 411         unsigned long flags;
 412 
 413         cpu = raw_smp_processor_id();
 414         /* Always clear the tracing cpu on stopping the trace */
 415         if (unlikely(per_cpu(tracing_cpu, cpu)))
 416                 per_cpu(tracing_cpu, cpu) = 0;
 417         else
 418                 return;
 419 
 420         if (!tracer_enabled || !tracing_is_enabled())
 421                 return;
 422 
 423         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 424 
 425         if (unlikely(!data) ||
 426             !data->critical_start || atomic_read(&data->disabled))
 427                 return;
 428 
 429         atomic_inc(&data->disabled);
 430 
 431         local_save_flags(flags);
 432         __trace_function(tr, ip, parent_ip, flags, pc);
 433         check_critical_timing(tr, data, parent_ip ? : ip, cpu);
 434         data->critical_start = 0;
 435         atomic_dec(&data->disabled);
 436 }
 437 
 438 /* start and stop critical timings used to for stoppage (in idle) */
 439 void start_critical_timings(void)
 440 {
 441         int pc = preempt_count();
 442 
 443         if (preempt_trace(pc) || irq_trace())
 444                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
 445 }
 446 EXPORT_SYMBOL_GPL(start_critical_timings);
 447 NOKPROBE_SYMBOL(start_critical_timings);
 448 
 449 void stop_critical_timings(void)
 450 {
 451         int pc = preempt_count();
 452 
 453         if (preempt_trace(pc) || irq_trace())
 454                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
 455 }
 456 EXPORT_SYMBOL_GPL(stop_critical_timings);
 457 NOKPROBE_SYMBOL(stop_critical_timings);
 458 
 459 #ifdef CONFIG_FUNCTION_TRACER
 460 static bool function_enabled;
 461 
 462 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
 463 {
 464         int ret;
 465 
 466         /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
 467         if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
 468                 return 0;
 469 
 470         if (graph)
 471                 ret = register_ftrace_graph(&fgraph_ops);
 472         else
 473                 ret = register_ftrace_function(tr->ops);
 474 
 475         if (!ret)
 476                 function_enabled = true;
 477 
 478         return ret;
 479 }
 480 
 481 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
 482 {
 483         if (!function_enabled)
 484                 return;
 485 
 486         if (graph)
 487                 unregister_ftrace_graph(&fgraph_ops);
 488         else
 489                 unregister_ftrace_function(tr->ops);
 490 
 491         function_enabled = false;
 492 }
 493 
 494 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
 495 {
 496         if (!(mask & TRACE_ITER_FUNCTION))
 497                 return 0;
 498 
 499         if (set)
 500                 register_irqsoff_function(tr, is_graph(tr), 1);
 501         else
 502                 unregister_irqsoff_function(tr, is_graph(tr));
 503         return 1;
 504 }
 505 #else
 506 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
 507 {
 508         return 0;
 509 }
 510 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
 511 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
 512 {
 513         return 0;
 514 }
 515 #endif /* CONFIG_FUNCTION_TRACER */
 516 
 517 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
 518 {
 519         struct tracer *tracer = tr->current_trace;
 520 
 521         if (irqsoff_function_set(tr, mask, set))
 522                 return 0;
 523 
 524 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 525         if (mask & TRACE_ITER_DISPLAY_GRAPH)
 526                 return irqsoff_display_graph(tr, set);
 527 #endif
 528 
 529         return trace_keep_overwrite(tracer, mask, set);
 530 }
 531 
 532 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
 533 {
 534         int ret;
 535 
 536         ret = register_irqsoff_function(tr, graph, 0);
 537 
 538         if (!ret && tracing_is_enabled())
 539                 tracer_enabled = 1;
 540         else
 541                 tracer_enabled = 0;
 542 
 543         return ret;
 544 }
 545 
 546 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
 547 {
 548         tracer_enabled = 0;
 549 
 550         unregister_irqsoff_function(tr, graph);
 551 }
 552 
 553 static bool irqsoff_busy;
 554 
 555 static int __irqsoff_tracer_init(struct trace_array *tr)
 556 {
 557         if (irqsoff_busy)
 558                 return -EBUSY;
 559 
 560         save_flags = tr->trace_flags;
 561 
 562         /* non overwrite screws up the latency tracers */
 563         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
 564         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
 565 
 566         tr->max_latency = 0;
 567         irqsoff_trace = tr;
 568         /* make sure that the tracer is visible */
 569         smp_wmb();
 570 
 571         ftrace_init_array_ops(tr, irqsoff_tracer_call);
 572 
 573         /* Only toplevel instance supports graph tracing */
 574         if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
 575                                       is_graph(tr))))
 576                 printk(KERN_ERR "failed to start irqsoff tracer\n");
 577 
 578         irqsoff_busy = true;
 579         return 0;
 580 }
 581 
 582 static void __irqsoff_tracer_reset(struct trace_array *tr)
 583 {
 584         int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
 585         int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
 586 
 587         stop_irqsoff_tracer(tr, is_graph(tr));
 588 
 589         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
 590         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
 591         ftrace_reset_array_ops(tr);
 592 
 593         irqsoff_busy = false;
 594 }
 595 
 596 static void irqsoff_tracer_start(struct trace_array *tr)
 597 {
 598         tracer_enabled = 1;
 599 }
 600 
 601 static void irqsoff_tracer_stop(struct trace_array *tr)
 602 {
 603         tracer_enabled = 0;
 604 }
 605 
 606 #ifdef CONFIG_IRQSOFF_TRACER
 607 /*
 608  * We are only interested in hardirq on/off events:
 609  */
 610 void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
 611 {
 612         unsigned int pc = preempt_count();
 613 
 614         if (!preempt_trace(pc) && irq_trace())
 615                 stop_critical_timing(a0, a1, pc);
 616 }
 617 NOKPROBE_SYMBOL(tracer_hardirqs_on);
 618 
 619 void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
 620 {
 621         unsigned int pc = preempt_count();
 622 
 623         if (!preempt_trace(pc) && irq_trace())
 624                 start_critical_timing(a0, a1, pc);
 625 }
 626 NOKPROBE_SYMBOL(tracer_hardirqs_off);
 627 
 628 static int irqsoff_tracer_init(struct trace_array *tr)
 629 {
 630         trace_type = TRACER_IRQS_OFF;
 631 
 632         return __irqsoff_tracer_init(tr);
 633 }
 634 
 635 static void irqsoff_tracer_reset(struct trace_array *tr)
 636 {
 637         __irqsoff_tracer_reset(tr);
 638 }
 639 
 640 static struct tracer irqsoff_tracer __read_mostly =
 641 {
 642         .name           = "irqsoff",
 643         .init           = irqsoff_tracer_init,
 644         .reset          = irqsoff_tracer_reset,
 645         .start          = irqsoff_tracer_start,
 646         .stop           = irqsoff_tracer_stop,
 647         .print_max      = true,
 648         .print_header   = irqsoff_print_header,
 649         .print_line     = irqsoff_print_line,
 650         .flag_changed   = irqsoff_flag_changed,
 651 #ifdef CONFIG_FTRACE_SELFTEST
 652         .selftest    = trace_selftest_startup_irqsoff,
 653 #endif
 654         .open           = irqsoff_trace_open,
 655         .close          = irqsoff_trace_close,
 656         .allow_instances = true,
 657         .use_max_tr     = true,
 658 };
 659 #endif /*  CONFIG_IRQSOFF_TRACER */
 660 
 661 #ifdef CONFIG_PREEMPT_TRACER
 662 void tracer_preempt_on(unsigned long a0, unsigned long a1)
 663 {
 664         int pc = preempt_count();
 665 
 666         if (preempt_trace(pc) && !irq_trace())
 667                 stop_critical_timing(a0, a1, pc);
 668 }
 669 
 670 void tracer_preempt_off(unsigned long a0, unsigned long a1)
 671 {
 672         int pc = preempt_count();
 673 
 674         if (preempt_trace(pc) && !irq_trace())
 675                 start_critical_timing(a0, a1, pc);
 676 }
 677 
 678 static int preemptoff_tracer_init(struct trace_array *tr)
 679 {
 680         trace_type = TRACER_PREEMPT_OFF;
 681 
 682         return __irqsoff_tracer_init(tr);
 683 }
 684 
 685 static void preemptoff_tracer_reset(struct trace_array *tr)
 686 {
 687         __irqsoff_tracer_reset(tr);
 688 }
 689 
 690 static struct tracer preemptoff_tracer __read_mostly =
 691 {
 692         .name           = "preemptoff",
 693         .init           = preemptoff_tracer_init,
 694         .reset          = preemptoff_tracer_reset,
 695         .start          = irqsoff_tracer_start,
 696         .stop           = irqsoff_tracer_stop,
 697         .print_max      = true,
 698         .print_header   = irqsoff_print_header,
 699         .print_line     = irqsoff_print_line,
 700         .flag_changed   = irqsoff_flag_changed,
 701 #ifdef CONFIG_FTRACE_SELFTEST
 702         .selftest    = trace_selftest_startup_preemptoff,
 703 #endif
 704         .open           = irqsoff_trace_open,
 705         .close          = irqsoff_trace_close,
 706         .allow_instances = true,
 707         .use_max_tr     = true,
 708 };
 709 #endif /* CONFIG_PREEMPT_TRACER */
 710 
 711 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 712 
 713 static int preemptirqsoff_tracer_init(struct trace_array *tr)
 714 {
 715         trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
 716 
 717         return __irqsoff_tracer_init(tr);
 718 }
 719 
 720 static void preemptirqsoff_tracer_reset(struct trace_array *tr)
 721 {
 722         __irqsoff_tracer_reset(tr);
 723 }
 724 
 725 static struct tracer preemptirqsoff_tracer __read_mostly =
 726 {
 727         .name           = "preemptirqsoff",
 728         .init           = preemptirqsoff_tracer_init,
 729         .reset          = preemptirqsoff_tracer_reset,
 730         .start          = irqsoff_tracer_start,
 731         .stop           = irqsoff_tracer_stop,
 732         .print_max      = true,
 733         .print_header   = irqsoff_print_header,
 734         .print_line     = irqsoff_print_line,
 735         .flag_changed   = irqsoff_flag_changed,
 736 #ifdef CONFIG_FTRACE_SELFTEST
 737         .selftest    = trace_selftest_startup_preemptirqsoff,
 738 #endif
 739         .open           = irqsoff_trace_open,
 740         .close          = irqsoff_trace_close,
 741         .allow_instances = true,
 742         .use_max_tr     = true,
 743 };
 744 #endif
 745 
 746 __init static int init_irqsoff_tracer(void)
 747 {
 748 #ifdef CONFIG_IRQSOFF_TRACER
 749         register_tracer(&irqsoff_tracer);
 750 #endif
 751 #ifdef CONFIG_PREEMPT_TRACER
 752         register_tracer(&preemptoff_tracer);
 753 #endif
 754 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 755         register_tracer(&preemptirqsoff_tracer);
 756 #endif
 757 
 758         return 0;
 759 }
 760 core_initcall(init_irqsoff_tracer);
 761 #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */

/* [<][>][^][v][top][bottom][index][help] */