1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_SCHED_CLOCK_H |
3 | #define _LINUX_SCHED_CLOCK_H |
4 | |
5 | #include <linux/smp.h> |
6 | |
7 | /* |
8 | * Do not use outside of architecture code which knows its limitations. |
9 | * |
10 | * sched_clock() has no promise of monotonicity or bounded drift between |
11 | * CPUs, use (which you should not) requires disabling IRQs. |
12 | * |
13 | * Please use one of the three interfaces below. |
14 | */ |
15 | extern unsigned long long notrace sched_clock(void); |
16 | |
17 | /* |
18 | * See the comment in kernel/sched/clock.c |
19 | */ |
20 | extern u64 running_clock(void); |
21 | extern u64 sched_clock_cpu(int cpu); |
22 | |
23 | |
24 | extern void sched_clock_init(void); |
25 | |
26 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
27 | static inline void sched_clock_tick(void) |
28 | { |
29 | } |
30 | |
31 | static inline void clear_sched_clock_stable(void) |
32 | { |
33 | } |
34 | |
35 | static inline void sched_clock_idle_sleep_event(void) |
36 | { |
37 | } |
38 | |
39 | static inline void sched_clock_idle_wakeup_event(void) |
40 | { |
41 | } |
42 | |
43 | static inline u64 cpu_clock(int cpu) |
44 | { |
45 | return sched_clock(); |
46 | } |
47 | |
48 | static inline u64 local_clock(void) |
49 | { |
50 | return sched_clock(); |
51 | } |
52 | #else |
53 | extern int sched_clock_stable(void); |
54 | extern void clear_sched_clock_stable(void); |
55 | |
56 | /* |
57 | * When sched_clock_stable(), __sched_clock_offset provides the offset |
58 | * between local_clock() and sched_clock(). |
59 | */ |
60 | extern u64 __sched_clock_offset; |
61 | |
62 | extern void sched_clock_tick(void); |
63 | extern void sched_clock_tick_stable(void); |
64 | extern void sched_clock_idle_sleep_event(void); |
65 | extern void sched_clock_idle_wakeup_event(void); |
66 | |
67 | /* |
68 | * As outlined in clock.c, provides a fast, high resolution, nanosecond |
69 | * time source that is monotonic per cpu argument and has bounded drift |
70 | * between cpus. |
71 | * |
72 | * ######################### BIG FAT WARNING ########################## |
73 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # |
74 | * # go backwards !! # |
75 | * #################################################################### |
76 | */ |
77 | static inline u64 cpu_clock(int cpu) |
78 | { |
79 | return sched_clock_cpu(cpu); |
80 | } |
81 | |
82 | static inline u64 local_clock(void) |
83 | { |
84 | return sched_clock_cpu(raw_smp_processor_id()); |
85 | } |
86 | #endif |
87 | |
88 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
89 | /* |
90 | * An i/f to runtime opt-in for irq time accounting based off of sched_clock. |
91 | * The reason for this explicit opt-in is not to have perf penalty with |
92 | * slow sched_clocks. |
93 | */ |
94 | extern void enable_sched_clock_irqtime(void); |
95 | extern void disable_sched_clock_irqtime(void); |
96 | #else |
97 | static inline void enable_sched_clock_irqtime(void) {} |
98 | static inline void disable_sched_clock_irqtime(void) {} |
99 | #endif |
100 | |
101 | #endif /* _LINUX_SCHED_CLOCK_H */ |
102 | |