1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * tracing clocks |
4 | * |
5 | * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
6 | * |
7 | * Implements 3 trace clock variants, with differing scalability/precision |
8 | * tradeoffs: |
9 | * |
10 | * - local: CPU-local trace clock |
11 | * - medium: scalable global clock with some jitter |
12 | * - global: globally monotonic, serialized clock |
13 | * |
14 | * Tracer plugins will chose a default from these clocks. |
15 | */ |
16 | #include <linux/spinlock.h> |
17 | #include <linux/irqflags.h> |
18 | #include <linux/hardirq.h> |
19 | #include <linux/module.h> |
20 | #include <linux/percpu.h> |
21 | #include <linux/sched.h> |
22 | #include <linux/sched/clock.h> |
23 | #include <linux/ktime.h> |
24 | #include <linux/trace_clock.h> |
25 | |
26 | /* |
27 | * trace_clock_local(): the simplest and least coherent tracing clock. |
28 | * |
29 | * Useful for tracing that does not cross to other CPUs nor |
30 | * does it go through idle events. |
31 | */ |
32 | u64 notrace trace_clock_local(void) |
33 | { |
34 | u64 clock; |
35 | |
36 | /* |
37 | * sched_clock() is an architecture implemented, fast, scalable, |
38 | * lockless clock. It is not guaranteed to be coherent across |
39 | * CPUs, nor across CPU idle events. |
40 | */ |
41 | preempt_disable_notrace(); |
42 | clock = sched_clock(); |
43 | preempt_enable_notrace(); |
44 | |
45 | return clock; |
46 | } |
47 | EXPORT_SYMBOL_GPL(trace_clock_local); |
48 | |
49 | /* |
50 | * trace_clock(): 'between' trace clock. Not completely serialized, |
51 | * but not completely incorrect when crossing CPUs either. |
52 | * |
53 | * This is based on cpu_clock(), which will allow at most ~1 jiffy of |
54 | * jitter between CPUs. So it's a pretty scalable clock, but there |
55 | * can be offsets in the trace data. |
56 | */ |
57 | u64 notrace trace_clock(void) |
58 | { |
59 | return local_clock(); |
60 | } |
61 | EXPORT_SYMBOL_GPL(trace_clock); |
62 | |
63 | /* |
64 | * trace_jiffy_clock(): Simply use jiffies as a clock counter. |
65 | * Note that this use of jiffies_64 is not completely safe on |
66 | * 32-bit systems. But the window is tiny, and the effect if |
67 | * we are affected is that we will have an obviously bogus |
68 | * timestamp on a trace event - i.e. not life threatening. |
69 | */ |
70 | u64 notrace trace_clock_jiffies(void) |
71 | { |
72 | return jiffies_64_to_clock_t(x: jiffies_64 - INITIAL_JIFFIES); |
73 | } |
74 | EXPORT_SYMBOL_GPL(trace_clock_jiffies); |
75 | |
76 | /* |
77 | * trace_clock_global(): special globally coherent trace clock |
78 | * |
79 | * It has higher overhead than the other trace clocks but is still |
80 | * an order of magnitude faster than GTOD derived hardware clocks. |
81 | * |
82 | * Used by plugins that need globally coherent timestamps. |
83 | */ |
84 | |
85 | /* keep prev_time and lock in the same cacheline. */ |
86 | static struct { |
87 | u64 prev_time; |
88 | arch_spinlock_t lock; |
89 | } trace_clock_struct ____cacheline_aligned_in_smp = |
90 | { |
91 | .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, |
92 | }; |
93 | |
94 | u64 notrace trace_clock_global(void) |
95 | { |
96 | unsigned long flags; |
97 | int this_cpu; |
98 | u64 now, prev_time; |
99 | |
100 | raw_local_irq_save(flags); |
101 | |
102 | this_cpu = raw_smp_processor_id(); |
103 | |
104 | /* |
105 | * The global clock "guarantees" that the events are ordered |
106 | * between CPUs. But if two events on two different CPUS call |
107 | * trace_clock_global at roughly the same time, it really does |
108 | * not matter which one gets the earlier time. Just make sure |
109 | * that the same CPU will always show a monotonic clock. |
110 | * |
111 | * Use a read memory barrier to get the latest written |
112 | * time that was recorded. |
113 | */ |
114 | smp_rmb(); |
115 | prev_time = READ_ONCE(trace_clock_struct.prev_time); |
116 | now = sched_clock_cpu(cpu: this_cpu); |
117 | |
118 | /* Make sure that now is always greater than or equal to prev_time */ |
119 | if ((s64)(now - prev_time) < 0) |
120 | now = prev_time; |
121 | |
122 | /* |
123 | * If in an NMI context then dont risk lockups and simply return |
124 | * the current time. |
125 | */ |
126 | if (unlikely(in_nmi())) |
127 | goto out; |
128 | |
129 | /* Tracing can cause strange recursion, always use a try lock */ |
130 | if (arch_spin_trylock(&trace_clock_struct.lock)) { |
131 | /* Reread prev_time in case it was already updated */ |
132 | prev_time = READ_ONCE(trace_clock_struct.prev_time); |
133 | if ((s64)(now - prev_time) < 0) |
134 | now = prev_time; |
135 | |
136 | trace_clock_struct.prev_time = now; |
137 | |
138 | /* The unlock acts as the wmb for the above rmb */ |
139 | arch_spin_unlock(&trace_clock_struct.lock); |
140 | } |
141 | out: |
142 | raw_local_irq_restore(flags); |
143 | |
144 | return now; |
145 | } |
146 | EXPORT_SYMBOL_GPL(trace_clock_global); |
147 | |
148 | static atomic64_t trace_counter; |
149 | |
150 | /* |
151 | * trace_clock_counter(): simply an atomic counter. |
152 | * Use the trace_counter "counter" for cases where you do not care |
153 | * about timings, but are interested in strict ordering. |
154 | */ |
155 | u64 notrace trace_clock_counter(void) |
156 | { |
157 | return atomic64_add_return(i: 1, v: &trace_counter); |
158 | } |
159 | |