1 /*
2 * tracing clocks
3 *
4 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Implements 3 trace clock variants, with differing scalability/precision
7 * tradeoffs:
8 *
9 * - local: CPU-local trace clock
10 * - medium: scalable global clock with some jitter
11 * - global: globally monotonic, serialized clock
12 *
13 * Tracer plugins will chose a default from these clocks.
14 */
15 #include <linux/spinlock.h>
16 #include <linux/irqflags.h>
17 #include <linux/hardirq.h>
18 #include <linux/module.h>
19 #include <linux/percpu.h>
20 #include <linux/sched.h>
21 #include <linux/ktime.h>
22 #include <linux/trace_clock.h>
23
24 /*
25 * trace_clock_local(): the simplest and least coherent tracing clock.
26 *
27 * Useful for tracing that does not cross to other CPUs nor
28 * does it go through idle events.
29 */
trace_clock_local(void)30 u64 notrace trace_clock_local(void)
31 {
32 u64 clock;
33
34 /*
35 * sched_clock() is an architecture implemented, fast, scalable,
36 * lockless clock. It is not guaranteed to be coherent across
37 * CPUs, nor across CPU idle events.
38 */
39 preempt_disable_notrace();
40 clock = sched_clock();
41 preempt_enable_notrace();
42
43 return clock;
44 }
45 EXPORT_SYMBOL_GPL(trace_clock_local);
46
47 /*
48 * trace_clock(): 'between' trace clock. Not completely serialized,
49 * but not completely incorrect when crossing CPUs either.
50 *
51 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
52 * jitter between CPUs. So it's a pretty scalable clock, but there
53 * can be offsets in the trace data.
54 */
trace_clock(void)55 u64 notrace trace_clock(void)
56 {
57 return local_clock();
58 }
59 EXPORT_SYMBOL_GPL(trace_clock);
60
61 /*
62 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
63 * Note that this use of jiffies_64 is not completely safe on
64 * 32-bit systems. But the window is tiny, and the effect if
65 * we are affected is that we will have an obviously bogus
66 * timestamp on a trace event - i.e. not life threatening.
67 */
trace_clock_jiffies(void)68 u64 notrace trace_clock_jiffies(void)
69 {
70 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
71 }
72 EXPORT_SYMBOL_GPL(trace_clock_jiffies);
73
74 /*
75 * trace_clock_global(): special globally coherent trace clock
76 *
77 * It has higher overhead than the other trace clocks but is still
78 * an order of magnitude faster than GTOD derived hardware clocks.
79 *
80 * Used by plugins that need globally coherent timestamps.
81 */
82
83 /* keep prev_time and lock in the same cacheline. */
84 static struct {
85 u64 prev_time;
86 arch_spinlock_t lock;
87 } trace_clock_struct ____cacheline_aligned_in_smp =
88 {
89 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
90 };
91
trace_clock_global(void)92 u64 notrace trace_clock_global(void)
93 {
94 unsigned long flags;
95 int this_cpu;
96 u64 now, prev_time;
97
98 local_irq_save(flags);
99
100 this_cpu = raw_smp_processor_id();
101
102 /*
103 * The global clock "guarantees" that the events are ordered
104 * between CPUs. But if two events on two different CPUS call
105 * trace_clock_global at roughly the same time, it really does
106 * not matter which one gets the earlier time. Just make sure
107 * that the same CPU will always show a monotonic clock.
108 *
109 * Use a read memory barrier to get the latest written
110 * time that was recorded.
111 */
112 smp_rmb();
113 prev_time = READ_ONCE(trace_clock_struct.prev_time);
114 now = sched_clock_cpu(this_cpu);
115
116 /* Make sure that now is always greater than or equal to prev_time */
117 if ((s64)(now - prev_time) < 0)
118 now = prev_time;
119
120 /*
121 * If in an NMI context then dont risk lockups and simply return
122 * the current time.
123 */
124 if (unlikely(in_nmi()))
125 goto out;
126
127 /* Tracing can cause strange recursion, always use a try lock */
128 if (arch_spin_trylock(&trace_clock_struct.lock)) {
129 /* Reread prev_time in case it was already updated */
130 prev_time = READ_ONCE(trace_clock_struct.prev_time);
131 if ((s64)(now - prev_time) < 0)
132 now = prev_time;
133
134 trace_clock_struct.prev_time = now;
135
136 /* The unlock acts as the wmb for the above rmb */
137 arch_spin_unlock(&trace_clock_struct.lock);
138 }
139 out:
140 local_irq_restore(flags);
141
142 return now;
143 }
144 EXPORT_SYMBOL_GPL(trace_clock_global);
145
146 static atomic64_t trace_counter;
147
148 /*
149 * trace_clock_counter(): simply an atomic counter.
150 * Use the trace_counter "counter" for cases where you do not care
151 * about timings, but are interested in strict ordering.
152 */
trace_clock_counter(void)153 u64 notrace trace_clock_counter(void)
154 {
155 return atomic64_add_return(1, &trace_counter);
156 }
157