root/kernel/trace/trace_clock.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. trace_clock_local
  2. trace_clock
  3. trace_clock_jiffies
  4. trace_clock_global
  5. trace_clock_counter

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * tracing clocks
   4  *
   5  *  Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   6  *
   7  * Implements 3 trace clock variants, with differing scalability/precision
   8  * tradeoffs:
   9  *
  10  *  -   local: CPU-local trace clock
  11  *  -  medium: scalable global clock with some jitter
  12  *  -  global: globally monotonic, serialized clock
  13  *
  14  * Tracer plugins will chose a default from these clocks.
  15  */
  16 #include <linux/spinlock.h>
  17 #include <linux/irqflags.h>
  18 #include <linux/hardirq.h>
  19 #include <linux/module.h>
  20 #include <linux/percpu.h>
  21 #include <linux/sched.h>
  22 #include <linux/sched/clock.h>
  23 #include <linux/ktime.h>
  24 #include <linux/trace_clock.h>
  25 
  26 /*
  27  * trace_clock_local(): the simplest and least coherent tracing clock.
  28  *
  29  * Useful for tracing that does not cross to other CPUs nor
  30  * does it go through idle events.
  31  */
  32 u64 notrace trace_clock_local(void)
  33 {
  34         u64 clock;
  35 
  36         /*
  37          * sched_clock() is an architecture implemented, fast, scalable,
  38          * lockless clock. It is not guaranteed to be coherent across
  39          * CPUs, nor across CPU idle events.
  40          */
  41         preempt_disable_notrace();
  42         clock = sched_clock();
  43         preempt_enable_notrace();
  44 
  45         return clock;
  46 }
  47 EXPORT_SYMBOL_GPL(trace_clock_local);
  48 
  49 /*
  50  * trace_clock(): 'between' trace clock. Not completely serialized,
  51  * but not completely incorrect when crossing CPUs either.
  52  *
  53  * This is based on cpu_clock(), which will allow at most ~1 jiffy of
  54  * jitter between CPUs. So it's a pretty scalable clock, but there
  55  * can be offsets in the trace data.
  56  */
  57 u64 notrace trace_clock(void)
  58 {
  59         return local_clock();
  60 }
  61 EXPORT_SYMBOL_GPL(trace_clock);
  62 
  63 /*
  64  * trace_jiffy_clock(): Simply use jiffies as a clock counter.
  65  * Note that this use of jiffies_64 is not completely safe on
  66  * 32-bit systems. But the window is tiny, and the effect if
  67  * we are affected is that we will have an obviously bogus
  68  * timestamp on a trace event - i.e. not life threatening.
  69  */
  70 u64 notrace trace_clock_jiffies(void)
  71 {
  72         return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
  73 }
  74 EXPORT_SYMBOL_GPL(trace_clock_jiffies);
  75 
  76 /*
  77  * trace_clock_global(): special globally coherent trace clock
  78  *
  79  * It has higher overhead than the other trace clocks but is still
  80  * an order of magnitude faster than GTOD derived hardware clocks.
  81  *
  82  * Used by plugins that need globally coherent timestamps.
  83  */
  84 
  85 /* keep prev_time and lock in the same cacheline. */
  86 static struct {
  87         u64 prev_time;
  88         arch_spinlock_t lock;
  89 } trace_clock_struct ____cacheline_aligned_in_smp =
  90         {
  91                 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
  92         };
  93 
  94 u64 notrace trace_clock_global(void)
  95 {
  96         unsigned long flags;
  97         int this_cpu;
  98         u64 now;
  99 
 100         raw_local_irq_save(flags);
 101 
 102         this_cpu = raw_smp_processor_id();
 103         now = sched_clock_cpu(this_cpu);
 104         /*
 105          * If in an NMI context then dont risk lockups and return the
 106          * cpu_clock() time:
 107          */
 108         if (unlikely(in_nmi()))
 109                 goto out;
 110 
 111         arch_spin_lock(&trace_clock_struct.lock);
 112 
 113         /*
 114          * TODO: if this happens often then maybe we should reset
 115          * my_scd->clock to prev_time+1, to make sure
 116          * we start ticking with the local clock from now on?
 117          */
 118         if ((s64)(now - trace_clock_struct.prev_time) < 0)
 119                 now = trace_clock_struct.prev_time + 1;
 120 
 121         trace_clock_struct.prev_time = now;
 122 
 123         arch_spin_unlock(&trace_clock_struct.lock);
 124 
 125  out:
 126         raw_local_irq_restore(flags);
 127 
 128         return now;
 129 }
 130 EXPORT_SYMBOL_GPL(trace_clock_global);
 131 
 132 static atomic64_t trace_counter;
 133 
 134 /*
 135  * trace_clock_counter(): simply an atomic counter.
 136  * Use the trace_counter "counter" for cases where you do not care
 137  * about timings, but are interested in strict ordering.
 138  */
 139 u64 notrace trace_clock_counter(void)
 140 {
 141         return atomic64_add_return(1, &trace_counter);
 142 }

/* [<][>][^][v][top][bottom][index][help] */