1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * hrtimers - High-resolution kernel timers |
4 | * |
5 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> |
6 | * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar |
7 | * |
8 | * data type definitions, declarations, prototypes |
9 | * |
10 | * Started by: Thomas Gleixner and Ingo Molnar |
11 | */ |
12 | #ifndef _LINUX_HRTIMER_H |
13 | #define _LINUX_HRTIMER_H |
14 | |
15 | #include <linux/rbtree.h> |
16 | #include <linux/ktime.h> |
17 | #include <linux/init.h> |
18 | #include <linux/list.h> |
19 | #include <linux/percpu.h> |
20 | #include <linux/timer.h> |
21 | #include <linux/timerqueue.h> |
22 | |
23 | struct hrtimer_clock_base; |
24 | struct hrtimer_cpu_base; |
25 | |
26 | /* |
27 | * Mode arguments of xxx_hrtimer functions: |
28 | * |
29 | * HRTIMER_MODE_ABS - Time value is absolute |
30 | * HRTIMER_MODE_REL - Time value is relative to now |
31 | * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered |
32 | * when starting the timer) |
33 | * HRTIMER_MODE_SOFT - Timer callback function will be executed in |
34 | * soft irq context |
35 | */ |
36 | enum hrtimer_mode { |
37 | HRTIMER_MODE_ABS = 0x00, |
38 | HRTIMER_MODE_REL = 0x01, |
39 | HRTIMER_MODE_PINNED = 0x02, |
40 | HRTIMER_MODE_SOFT = 0x04, |
41 | |
42 | HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, |
43 | HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, |
44 | |
45 | HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT, |
46 | HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT, |
47 | |
48 | HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, |
49 | HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, |
50 | |
51 | }; |
52 | |
53 | /* |
54 | * Return values for the callback function |
55 | */ |
56 | enum hrtimer_restart { |
57 | HRTIMER_NORESTART, /* Timer is not restarted */ |
58 | HRTIMER_RESTART, /* Timer must be restarted */ |
59 | }; |
60 | |
61 | /* |
62 | * Values to track state of the timer |
63 | * |
64 | * Possible states: |
65 | * |
66 | * 0x00 inactive |
67 | * 0x01 enqueued into rbtree |
68 | * |
69 | * The callback state is not part of the timer->state because clearing it would |
70 | * mean touching the timer after the callback, this makes it impossible to free |
71 | * the timer from the callback function. |
72 | * |
73 | * Therefore we track the callback state in: |
74 | * |
75 | * timer->base->cpu_base->running == timer |
76 | * |
77 | * On SMP it is possible to have a "callback function running and enqueued" |
78 | * status. It happens for example when a posix timer expired and the callback |
79 | * queued a signal. Between dropping the lock which protects the posix timer |
80 | * and reacquiring the base lock of the hrtimer, another CPU can deliver the |
81 | * signal and rearm the timer. |
82 | * |
83 | * All state transitions are protected by cpu_base->lock. |
84 | */ |
85 | #define HRTIMER_STATE_INACTIVE 0x00 |
86 | #define HRTIMER_STATE_ENQUEUED 0x01 |
87 | |
88 | /** |
89 | * struct hrtimer - the basic hrtimer structure |
90 | * @node: timerqueue node, which also manages node.expires, |
91 | * the absolute expiry time in the hrtimers internal |
92 | * representation. The time is related to the clock on |
93 | * which the timer is based. Is setup by adding |
94 | * slack to the _softexpires value. For non range timers |
95 | * identical to _softexpires. |
96 | * @_softexpires: the absolute earliest expiry time of the hrtimer. |
97 | * The time which was given as expiry time when the timer |
98 | * was armed. |
99 | * @function: timer expiry callback function |
100 | * @base: pointer to the timer base (per cpu and per clock) |
101 | * @state: state information (See bit values above) |
102 | * @is_rel: Set if the timer was armed relative |
103 | * @is_soft: Set if hrtimer will be expired in soft interrupt context. |
104 | * |
105 | * The hrtimer structure must be initialized by hrtimer_init() |
106 | */ |
107 | struct hrtimer { |
108 | struct timerqueue_node node; |
109 | ktime_t _softexpires; |
110 | enum hrtimer_restart (*function)(struct hrtimer *); |
111 | struct hrtimer_clock_base *base; |
112 | u8 state; |
113 | u8 is_rel; |
114 | u8 is_soft; |
115 | }; |
116 | |
117 | /** |
118 | * struct hrtimer_sleeper - simple sleeper structure |
119 | * @timer: embedded timer structure |
120 | * @task: task to wake up |
121 | * |
122 | * task is set to NULL, when the timer expires. |
123 | */ |
124 | struct hrtimer_sleeper { |
125 | struct hrtimer timer; |
126 | struct task_struct *task; |
127 | }; |
128 | |
129 | #ifdef CONFIG_64BIT |
130 | # define __hrtimer_clock_base_align ____cacheline_aligned |
131 | #else |
132 | # define __hrtimer_clock_base_align |
133 | #endif |
134 | |
135 | /** |
136 | * struct hrtimer_clock_base - the timer base for a specific clock |
137 | * @cpu_base: per cpu clock base |
138 | * @index: clock type index for per_cpu support when moving a |
139 | * timer to a base on another cpu. |
140 | * @clockid: clock id for per_cpu support |
141 | * @seq: seqcount around __run_hrtimer |
142 | * @running: pointer to the currently running hrtimer |
143 | * @active: red black tree root node for the active timers |
144 | * @get_time: function to retrieve the current time of the clock |
145 | * @offset: offset of this clock to the monotonic base |
146 | */ |
147 | struct hrtimer_clock_base { |
148 | struct hrtimer_cpu_base *cpu_base; |
149 | unsigned int index; |
150 | clockid_t clockid; |
151 | seqcount_t seq; |
152 | struct hrtimer *running; |
153 | struct timerqueue_head active; |
154 | ktime_t (*get_time)(void); |
155 | ktime_t offset; |
156 | } __hrtimer_clock_base_align; |
157 | |
158 | enum hrtimer_base_type { |
159 | HRTIMER_BASE_MONOTONIC, |
160 | HRTIMER_BASE_REALTIME, |
161 | HRTIMER_BASE_BOOTTIME, |
162 | HRTIMER_BASE_TAI, |
163 | HRTIMER_BASE_MONOTONIC_SOFT, |
164 | HRTIMER_BASE_REALTIME_SOFT, |
165 | HRTIMER_BASE_BOOTTIME_SOFT, |
166 | HRTIMER_BASE_TAI_SOFT, |
167 | HRTIMER_MAX_CLOCK_BASES, |
168 | }; |
169 | |
170 | /** |
171 | * struct hrtimer_cpu_base - the per cpu clock bases |
172 | * @lock: lock protecting the base and associated clock bases |
173 | * and timers |
174 | * @cpu: cpu number |
175 | * @active_bases: Bitfield to mark bases with active timers |
176 | * @clock_was_set_seq: Sequence counter of clock was set events |
177 | * @hres_active: State of high resolution mode |
178 | * @in_hrtirq: hrtimer_interrupt() is currently executing |
179 | * @hang_detected: The last hrtimer interrupt detected a hang |
180 | * @softirq_activated: displays, if the softirq is raised - update of softirq |
181 | * related settings is not required then. |
182 | * @nr_events: Total number of hrtimer interrupt events |
183 | * @nr_retries: Total number of hrtimer interrupt retries |
184 | * @nr_hangs: Total number of hrtimer interrupt hangs |
185 | * @max_hang_time: Maximum time spent in hrtimer_interrupt |
186 | * @expires_next: absolute time of the next event, is required for remote |
187 | * hrtimer enqueue; it is the total first expiry time (hard |
188 | * and soft hrtimer are taken into account) |
189 | * @next_timer: Pointer to the first expiring timer |
190 | * @softirq_expires_next: Time to check, if soft queues needs also to be expired |
191 | * @softirq_next_timer: Pointer to the first expiring softirq based timer |
192 | * @clock_base: array of clock bases for this cpu |
193 | * |
194 | * Note: next_timer is just an optimization for __remove_hrtimer(). |
195 | * Do not dereference the pointer because it is not reliable on |
196 | * cross cpu removals. |
197 | */ |
198 | struct hrtimer_cpu_base { |
199 | raw_spinlock_t lock; |
200 | unsigned int cpu; |
201 | unsigned int active_bases; |
202 | unsigned int clock_was_set_seq; |
203 | unsigned int hres_active : 1, |
204 | in_hrtirq : 1, |
205 | hang_detected : 1, |
206 | softirq_activated : 1; |
207 | #ifdef CONFIG_HIGH_RES_TIMERS |
208 | unsigned int nr_events; |
209 | unsigned short nr_retries; |
210 | unsigned short nr_hangs; |
211 | unsigned int max_hang_time; |
212 | #endif |
213 | ktime_t expires_next; |
214 | struct hrtimer *next_timer; |
215 | ktime_t softirq_expires_next; |
216 | struct hrtimer *softirq_next_timer; |
217 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
218 | } ____cacheline_aligned; |
219 | |
220 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) |
221 | { |
222 | timer->node.expires = time; |
223 | timer->_softexpires = time; |
224 | } |
225 | |
226 | static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) |
227 | { |
228 | timer->_softexpires = time; |
229 | timer->node.expires = ktime_add_safe(time, delta); |
230 | } |
231 | |
232 | static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta) |
233 | { |
234 | timer->_softexpires = time; |
235 | timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); |
236 | } |
237 | |
238 | static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) |
239 | { |
240 | timer->node.expires = tv64; |
241 | timer->_softexpires = tv64; |
242 | } |
243 | |
244 | static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) |
245 | { |
246 | timer->node.expires = ktime_add_safe(timer->node.expires, time); |
247 | timer->_softexpires = ktime_add_safe(timer->_softexpires, time); |
248 | } |
249 | |
250 | static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) |
251 | { |
252 | timer->node.expires = ktime_add_ns(timer->node.expires, ns); |
253 | timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); |
254 | } |
255 | |
256 | static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) |
257 | { |
258 | return timer->node.expires; |
259 | } |
260 | |
261 | static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) |
262 | { |
263 | return timer->_softexpires; |
264 | } |
265 | |
266 | static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) |
267 | { |
268 | return timer->node.expires; |
269 | } |
270 | static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) |
271 | { |
272 | return timer->_softexpires; |
273 | } |
274 | |
275 | static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) |
276 | { |
277 | return ktime_to_ns(timer->node.expires); |
278 | } |
279 | |
280 | static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) |
281 | { |
282 | return ktime_sub(timer->node.expires, timer->base->get_time()); |
283 | } |
284 | |
285 | static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) |
286 | { |
287 | return timer->base->get_time(); |
288 | } |
289 | |
290 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) |
291 | { |
292 | return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? |
293 | timer->base->cpu_base->hres_active : 0; |
294 | } |
295 | |
296 | #ifdef CONFIG_HIGH_RES_TIMERS |
297 | struct clock_event_device; |
298 | |
299 | extern void hrtimer_interrupt(struct clock_event_device *dev); |
300 | |
301 | /* |
302 | * The resolution of the clocks. The resolution value is returned in |
303 | * the clock_getres() system call to give application programmers an |
304 | * idea of the (in)accuracy of timers. Timer values are rounded up to |
305 | * this resolution values. |
306 | */ |
307 | # define HIGH_RES_NSEC 1 |
308 | # define KTIME_HIGH_RES (HIGH_RES_NSEC) |
309 | # define MONOTONIC_RES_NSEC HIGH_RES_NSEC |
310 | # define KTIME_MONOTONIC_RES KTIME_HIGH_RES |
311 | |
312 | extern void clock_was_set_delayed(void); |
313 | |
314 | extern unsigned int hrtimer_resolution; |
315 | |
316 | #else |
317 | |
318 | # define MONOTONIC_RES_NSEC LOW_RES_NSEC |
319 | # define KTIME_MONOTONIC_RES KTIME_LOW_RES |
320 | |
321 | #define hrtimer_resolution (unsigned int)LOW_RES_NSEC |
322 | |
323 | static inline void clock_was_set_delayed(void) { } |
324 | |
325 | #endif |
326 | |
327 | static inline ktime_t |
328 | __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now) |
329 | { |
330 | ktime_t rem = ktime_sub(timer->node.expires, now); |
331 | |
332 | /* |
333 | * Adjust relative timers for the extra we added in |
334 | * hrtimer_start_range_ns() to prevent short timeouts. |
335 | */ |
336 | if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) |
337 | rem -= hrtimer_resolution; |
338 | return rem; |
339 | } |
340 | |
341 | static inline ktime_t |
342 | hrtimer_expires_remaining_adjusted(const struct hrtimer *timer) |
343 | { |
344 | return __hrtimer_expires_remaining_adjusted(timer, |
345 | timer->base->get_time()); |
346 | } |
347 | |
348 | extern void clock_was_set(void); |
349 | #ifdef CONFIG_TIMERFD |
350 | extern void timerfd_clock_was_set(void); |
351 | #else |
352 | static inline void timerfd_clock_was_set(void) { } |
353 | #endif |
354 | extern void hrtimers_resume(void); |
355 | |
356 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
357 | |
358 | |
359 | /* Exported timer functions: */ |
360 | |
361 | /* Initialize timers: */ |
362 | extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, |
363 | enum hrtimer_mode mode); |
364 | |
365 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
366 | extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, |
367 | enum hrtimer_mode mode); |
368 | |
369 | extern void destroy_hrtimer_on_stack(struct hrtimer *timer); |
370 | #else |
371 | static inline void hrtimer_init_on_stack(struct hrtimer *timer, |
372 | clockid_t which_clock, |
373 | enum hrtimer_mode mode) |
374 | { |
375 | hrtimer_init(timer, which_clock, mode); |
376 | } |
377 | static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } |
378 | #endif |
379 | |
380 | /* Basic timer operations: */ |
381 | extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
382 | u64 range_ns, const enum hrtimer_mode mode); |
383 | |
384 | /** |
385 | * hrtimer_start - (re)start an hrtimer |
386 | * @timer: the timer to be added |
387 | * @tim: expiry time |
388 | * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or |
389 | * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); |
390 | * softirq based mode is considered for debug purpose only! |
391 | */ |
392 | static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, |
393 | const enum hrtimer_mode mode) |
394 | { |
395 | hrtimer_start_range_ns(timer, tim, 0, mode); |
396 | } |
397 | |
398 | extern int hrtimer_cancel(struct hrtimer *timer); |
399 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); |
400 | |
401 | static inline void hrtimer_start_expires(struct hrtimer *timer, |
402 | enum hrtimer_mode mode) |
403 | { |
404 | u64 delta; |
405 | ktime_t soft, hard; |
406 | soft = hrtimer_get_softexpires(timer); |
407 | hard = hrtimer_get_expires(timer); |
408 | delta = ktime_to_ns(ktime_sub(hard, soft)); |
409 | hrtimer_start_range_ns(timer, soft, delta, mode); |
410 | } |
411 | |
412 | static inline void hrtimer_restart(struct hrtimer *timer) |
413 | { |
414 | hrtimer_start_expires(timer, HRTIMER_MODE_ABS); |
415 | } |
416 | |
417 | /* Query timers: */ |
418 | extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); |
419 | |
420 | static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
421 | { |
422 | return __hrtimer_get_remaining(timer, false); |
423 | } |
424 | |
425 | extern u64 hrtimer_get_next_event(void); |
426 | extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); |
427 | |
428 | extern bool hrtimer_active(const struct hrtimer *timer); |
429 | |
430 | /* |
431 | * Helper function to check, whether the timer is on one of the queues |
432 | */ |
433 | static inline int hrtimer_is_queued(struct hrtimer *timer) |
434 | { |
435 | return timer->state & HRTIMER_STATE_ENQUEUED; |
436 | } |
437 | |
438 | /* |
439 | * Helper function to check, whether the timer is running the callback |
440 | * function |
441 | */ |
442 | static inline int hrtimer_callback_running(struct hrtimer *timer) |
443 | { |
444 | return timer->base->running == timer; |
445 | } |
446 | |
447 | /* Forward a hrtimer so it expires after now: */ |
448 | extern u64 |
449 | hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); |
450 | |
451 | /** |
452 | * hrtimer_forward_now - forward the timer expiry so it expires after now |
453 | * @timer: hrtimer to forward |
454 | * @interval: the interval to forward |
455 | * |
456 | * Forward the timer expiry so it will expire after the current time |
457 | * of the hrtimer clock base. Returns the number of overruns. |
458 | * |
459 | * Can be safely called from the callback function of @timer. If |
460 | * called from other contexts @timer must neither be enqueued nor |
461 | * running the callback and the caller needs to take care of |
462 | * serialization. |
463 | * |
464 | * Note: This only updates the timer expiry value and does not requeue |
465 | * the timer. |
466 | */ |
467 | static inline u64 hrtimer_forward_now(struct hrtimer *timer, |
468 | ktime_t interval) |
469 | { |
470 | return hrtimer_forward(timer, timer->base->get_time(), interval); |
471 | } |
472 | |
473 | /* Precise sleep: */ |
474 | |
475 | extern int nanosleep_copyout(struct restart_block *, struct timespec64 *); |
476 | extern long hrtimer_nanosleep(const struct timespec64 *rqtp, |
477 | const enum hrtimer_mode mode, |
478 | const clockid_t clockid); |
479 | |
480 | extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, |
481 | struct task_struct *tsk); |
482 | |
483 | extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, |
484 | const enum hrtimer_mode mode); |
485 | extern int schedule_hrtimeout_range_clock(ktime_t *expires, |
486 | u64 delta, |
487 | const enum hrtimer_mode mode, |
488 | clockid_t clock_id); |
489 | extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); |
490 | |
491 | /* Soft interrupt function to run the hrtimer queues: */ |
492 | extern void hrtimer_run_queues(void); |
493 | |
494 | /* Bootup initialization: */ |
495 | extern void __init hrtimers_init(void); |
496 | |
497 | /* Show pending timers: */ |
498 | extern void sysrq_timer_list_show(void); |
499 | |
500 | int hrtimers_prepare_cpu(unsigned int cpu); |
501 | #ifdef CONFIG_HOTPLUG_CPU |
502 | int hrtimers_dead_cpu(unsigned int cpu); |
503 | #else |
504 | #define hrtimers_dead_cpu NULL |
505 | #endif |
506 | |
507 | #endif |
508 | |