1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * hrtimers - High-resolution kernel timers |
4 | * |
5 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> |
6 | * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar |
7 | * |
8 | * data type definitions, declarations, prototypes |
9 | * |
10 | * Started by: Thomas Gleixner and Ingo Molnar |
11 | */ |
12 | #ifndef _LINUX_HRTIMER_H |
13 | #define _LINUX_HRTIMER_H |
14 | |
15 | #include <linux/hrtimer_defs.h> |
16 | #include <linux/rbtree.h> |
17 | #include <linux/init.h> |
18 | #include <linux/list.h> |
19 | #include <linux/percpu.h> |
20 | #include <linux/seqlock.h> |
21 | #include <linux/timer.h> |
22 | #include <linux/timerqueue.h> |
23 | |
24 | struct hrtimer_clock_base; |
25 | struct hrtimer_cpu_base; |
26 | |
27 | /* |
28 | * Mode arguments of xxx_hrtimer functions: |
29 | * |
30 | * HRTIMER_MODE_ABS - Time value is absolute |
31 | * HRTIMER_MODE_REL - Time value is relative to now |
32 | * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered |
33 | * when starting the timer) |
34 | * HRTIMER_MODE_SOFT - Timer callback function will be executed in |
35 | * soft irq context |
36 | * HRTIMER_MODE_HARD - Timer callback function will be executed in |
37 | * hard irq context even on PREEMPT_RT. |
38 | */ |
39 | enum hrtimer_mode { |
40 | HRTIMER_MODE_ABS = 0x00, |
41 | HRTIMER_MODE_REL = 0x01, |
42 | HRTIMER_MODE_PINNED = 0x02, |
43 | HRTIMER_MODE_SOFT = 0x04, |
44 | HRTIMER_MODE_HARD = 0x08, |
45 | |
46 | HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, |
47 | HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, |
48 | |
49 | HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT, |
50 | HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT, |
51 | |
52 | HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, |
53 | HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, |
54 | |
55 | HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD, |
56 | HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD, |
57 | |
58 | HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD, |
59 | HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD, |
60 | }; |
61 | |
62 | /* |
63 | * Return values for the callback function |
64 | */ |
65 | enum hrtimer_restart { |
66 | HRTIMER_NORESTART, /* Timer is not restarted */ |
67 | HRTIMER_RESTART, /* Timer must be restarted */ |
68 | }; |
69 | |
70 | /* |
71 | * Values to track state of the timer |
72 | * |
73 | * Possible states: |
74 | * |
75 | * 0x00 inactive |
76 | * 0x01 enqueued into rbtree |
77 | * |
78 | * The callback state is not part of the timer->state because clearing it would |
79 | * mean touching the timer after the callback, this makes it impossible to free |
80 | * the timer from the callback function. |
81 | * |
82 | * Therefore we track the callback state in: |
83 | * |
84 | * timer->base->cpu_base->running == timer |
85 | * |
86 | * On SMP it is possible to have a "callback function running and enqueued" |
87 | * status. It happens for example when a posix timer expired and the callback |
88 | * queued a signal. Between dropping the lock which protects the posix timer |
89 | * and reacquiring the base lock of the hrtimer, another CPU can deliver the |
90 | * signal and rearm the timer. |
91 | * |
92 | * All state transitions are protected by cpu_base->lock. |
93 | */ |
94 | #define HRTIMER_STATE_INACTIVE 0x00 |
95 | #define HRTIMER_STATE_ENQUEUED 0x01 |
96 | |
97 | /** |
98 | * struct hrtimer - the basic hrtimer structure |
99 | * @node: timerqueue node, which also manages node.expires, |
100 | * the absolute expiry time in the hrtimers internal |
101 | * representation. The time is related to the clock on |
102 | * which the timer is based. Is setup by adding |
103 | * slack to the _softexpires value. For non range timers |
104 | * identical to _softexpires. |
105 | * @_softexpires: the absolute earliest expiry time of the hrtimer. |
106 | * The time which was given as expiry time when the timer |
107 | * was armed. |
108 | * @function: timer expiry callback function |
109 | * @base: pointer to the timer base (per cpu and per clock) |
110 | * @state: state information (See bit values above) |
111 | * @is_rel: Set if the timer was armed relative |
112 | * @is_soft: Set if hrtimer will be expired in soft interrupt context. |
113 | * @is_hard: Set if hrtimer will be expired in hard interrupt context |
114 | * even on RT. |
115 | * |
116 | * The hrtimer structure must be initialized by hrtimer_init() |
117 | */ |
118 | struct hrtimer { |
119 | struct timerqueue_node node; |
120 | ktime_t _softexpires; |
121 | enum hrtimer_restart (*function)(struct hrtimer *); |
122 | struct hrtimer_clock_base *base; |
123 | u8 state; |
124 | u8 is_rel; |
125 | u8 is_soft; |
126 | u8 is_hard; |
127 | }; |
128 | |
129 | /** |
130 | * struct hrtimer_sleeper - simple sleeper structure |
131 | * @timer: embedded timer structure |
132 | * @task: task to wake up |
133 | * |
134 | * task is set to NULL, when the timer expires. |
135 | */ |
136 | struct hrtimer_sleeper { |
137 | struct hrtimer timer; |
138 | struct task_struct *task; |
139 | }; |
140 | |
141 | #ifdef CONFIG_64BIT |
142 | # define __hrtimer_clock_base_align ____cacheline_aligned |
143 | #else |
144 | # define __hrtimer_clock_base_align |
145 | #endif |
146 | |
147 | /** |
148 | * struct hrtimer_clock_base - the timer base for a specific clock |
149 | * @cpu_base: per cpu clock base |
150 | * @index: clock type index for per_cpu support when moving a |
151 | * timer to a base on another cpu. |
152 | * @clockid: clock id for per_cpu support |
153 | * @seq: seqcount around __run_hrtimer |
154 | * @running: pointer to the currently running hrtimer |
155 | * @active: red black tree root node for the active timers |
156 | * @get_time: function to retrieve the current time of the clock |
157 | * @offset: offset of this clock to the monotonic base |
158 | */ |
159 | struct hrtimer_clock_base { |
160 | struct hrtimer_cpu_base *cpu_base; |
161 | unsigned int index; |
162 | clockid_t clockid; |
163 | seqcount_raw_spinlock_t seq; |
164 | struct hrtimer *running; |
165 | struct timerqueue_head active; |
166 | ktime_t (*get_time)(void); |
167 | ktime_t offset; |
168 | } __hrtimer_clock_base_align; |
169 | |
170 | enum hrtimer_base_type { |
171 | HRTIMER_BASE_MONOTONIC, |
172 | HRTIMER_BASE_REALTIME, |
173 | HRTIMER_BASE_BOOTTIME, |
174 | HRTIMER_BASE_TAI, |
175 | HRTIMER_BASE_MONOTONIC_SOFT, |
176 | HRTIMER_BASE_REALTIME_SOFT, |
177 | HRTIMER_BASE_BOOTTIME_SOFT, |
178 | HRTIMER_BASE_TAI_SOFT, |
179 | HRTIMER_MAX_CLOCK_BASES, |
180 | }; |
181 | |
182 | /** |
183 | * struct hrtimer_cpu_base - the per cpu clock bases |
184 | * @lock: lock protecting the base and associated clock bases |
185 | * and timers |
186 | * @cpu: cpu number |
187 | * @active_bases: Bitfield to mark bases with active timers |
188 | * @clock_was_set_seq: Sequence counter of clock was set events |
189 | * @hres_active: State of high resolution mode |
190 | * @in_hrtirq: hrtimer_interrupt() is currently executing |
191 | * @hang_detected: The last hrtimer interrupt detected a hang |
192 | * @softirq_activated: displays, if the softirq is raised - update of softirq |
193 | * related settings is not required then. |
194 | * @nr_events: Total number of hrtimer interrupt events |
195 | * @nr_retries: Total number of hrtimer interrupt retries |
196 | * @nr_hangs: Total number of hrtimer interrupt hangs |
197 | * @max_hang_time: Maximum time spent in hrtimer_interrupt |
198 | * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are |
199 | * expired |
200 | * @timer_waiters: A hrtimer_cancel() invocation waits for the timer |
201 | * callback to finish. |
202 | * @expires_next: absolute time of the next event, is required for remote |
203 | * hrtimer enqueue; it is the total first expiry time (hard |
204 | * and soft hrtimer are taken into account) |
205 | * @next_timer: Pointer to the first expiring timer |
206 | * @softirq_expires_next: Time to check, if soft queues needs also to be expired |
207 | * @softirq_next_timer: Pointer to the first expiring softirq based timer |
208 | * @clock_base: array of clock bases for this cpu |
209 | * |
210 | * Note: next_timer is just an optimization for __remove_hrtimer(). |
211 | * Do not dereference the pointer because it is not reliable on |
212 | * cross cpu removals. |
213 | */ |
214 | struct hrtimer_cpu_base { |
215 | raw_spinlock_t lock; |
216 | unsigned int cpu; |
217 | unsigned int active_bases; |
218 | unsigned int clock_was_set_seq; |
219 | unsigned int hres_active : 1, |
220 | in_hrtirq : 1, |
221 | hang_detected : 1, |
222 | softirq_activated : 1; |
223 | #ifdef CONFIG_HIGH_RES_TIMERS |
224 | unsigned int nr_events; |
225 | unsigned short nr_retries; |
226 | unsigned short nr_hangs; |
227 | unsigned int max_hang_time; |
228 | #endif |
229 | #ifdef CONFIG_PREEMPT_RT |
230 | spinlock_t softirq_expiry_lock; |
231 | atomic_t timer_waiters; |
232 | #endif |
233 | ktime_t expires_next; |
234 | struct hrtimer *next_timer; |
235 | ktime_t softirq_expires_next; |
236 | struct hrtimer *softirq_next_timer; |
237 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
238 | } ____cacheline_aligned; |
239 | |
240 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) |
241 | { |
242 | timer->node.expires = time; |
243 | timer->_softexpires = time; |
244 | } |
245 | |
246 | static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) |
247 | { |
248 | timer->_softexpires = time; |
249 | timer->node.expires = ktime_add_safe(lhs: time, rhs: delta); |
250 | } |
251 | |
252 | static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta) |
253 | { |
254 | timer->_softexpires = time; |
255 | timer->node.expires = ktime_add_safe(lhs: time, rhs: ns_to_ktime(ns: delta)); |
256 | } |
257 | |
258 | static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) |
259 | { |
260 | timer->node.expires = tv64; |
261 | timer->_softexpires = tv64; |
262 | } |
263 | |
264 | static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) |
265 | { |
266 | timer->node.expires = ktime_add_safe(lhs: timer->node.expires, rhs: time); |
267 | timer->_softexpires = ktime_add_safe(lhs: timer->_softexpires, rhs: time); |
268 | } |
269 | |
270 | static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) |
271 | { |
272 | timer->node.expires = ktime_add_ns(timer->node.expires, ns); |
273 | timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); |
274 | } |
275 | |
276 | static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) |
277 | { |
278 | return timer->node.expires; |
279 | } |
280 | |
281 | static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) |
282 | { |
283 | return timer->_softexpires; |
284 | } |
285 | |
286 | static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) |
287 | { |
288 | return timer->node.expires; |
289 | } |
290 | static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) |
291 | { |
292 | return timer->_softexpires; |
293 | } |
294 | |
295 | static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) |
296 | { |
297 | return ktime_to_ns(kt: timer->node.expires); |
298 | } |
299 | |
300 | static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) |
301 | { |
302 | return ktime_sub(timer->node.expires, timer->base->get_time()); |
303 | } |
304 | |
305 | static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) |
306 | { |
307 | return timer->base->get_time(); |
308 | } |
309 | |
310 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) |
311 | { |
312 | return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? |
313 | timer->base->cpu_base->hres_active : 0; |
314 | } |
315 | |
316 | #ifdef CONFIG_HIGH_RES_TIMERS |
317 | struct clock_event_device; |
318 | |
319 | extern void hrtimer_interrupt(struct clock_event_device *dev); |
320 | |
321 | extern unsigned int hrtimer_resolution; |
322 | |
323 | #else |
324 | |
325 | #define hrtimer_resolution (unsigned int)LOW_RES_NSEC |
326 | |
327 | #endif |
328 | |
329 | static inline ktime_t |
330 | __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now) |
331 | { |
332 | ktime_t rem = ktime_sub(timer->node.expires, now); |
333 | |
334 | /* |
335 | * Adjust relative timers for the extra we added in |
336 | * hrtimer_start_range_ns() to prevent short timeouts. |
337 | */ |
338 | if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) |
339 | rem -= hrtimer_resolution; |
340 | return rem; |
341 | } |
342 | |
343 | static inline ktime_t |
344 | hrtimer_expires_remaining_adjusted(const struct hrtimer *timer) |
345 | { |
346 | return __hrtimer_expires_remaining_adjusted(timer, |
347 | now: timer->base->get_time()); |
348 | } |
349 | |
350 | #ifdef CONFIG_TIMERFD |
351 | extern void timerfd_clock_was_set(void); |
352 | extern void timerfd_resume(void); |
353 | #else |
354 | static inline void timerfd_clock_was_set(void) { } |
355 | static inline void timerfd_resume(void) { } |
356 | #endif |
357 | |
358 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
359 | |
360 | #ifdef CONFIG_PREEMPT_RT |
361 | void hrtimer_cancel_wait_running(const struct hrtimer *timer); |
362 | #else |
363 | static inline void hrtimer_cancel_wait_running(struct hrtimer *timer) |
364 | { |
365 | cpu_relax(); |
366 | } |
367 | #endif |
368 | |
369 | /* Exported timer functions: */ |
370 | |
371 | /* Initialize timers: */ |
372 | extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, |
373 | enum hrtimer_mode mode); |
374 | extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, |
375 | enum hrtimer_mode mode); |
376 | |
377 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
378 | extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, |
379 | enum hrtimer_mode mode); |
380 | extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, |
381 | clockid_t clock_id, |
382 | enum hrtimer_mode mode); |
383 | |
384 | extern void destroy_hrtimer_on_stack(struct hrtimer *timer); |
385 | #else |
386 | static inline void hrtimer_init_on_stack(struct hrtimer *timer, |
387 | clockid_t which_clock, |
388 | enum hrtimer_mode mode) |
389 | { |
390 | hrtimer_init(timer, which_clock, mode); |
391 | } |
392 | |
393 | static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, |
394 | clockid_t clock_id, |
395 | enum hrtimer_mode mode) |
396 | { |
397 | hrtimer_init_sleeper(sl, clock_id, mode); |
398 | } |
399 | |
400 | static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } |
401 | #endif |
402 | |
403 | /* Basic timer operations: */ |
404 | extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
405 | u64 range_ns, const enum hrtimer_mode mode); |
406 | |
407 | /** |
408 | * hrtimer_start - (re)start an hrtimer |
409 | * @timer: the timer to be added |
410 | * @tim: expiry time |
411 | * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or |
412 | * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); |
413 | * softirq based mode is considered for debug purpose only! |
414 | */ |
415 | static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, |
416 | const enum hrtimer_mode mode) |
417 | { |
418 | hrtimer_start_range_ns(timer, tim, range_ns: 0, mode); |
419 | } |
420 | |
421 | extern int hrtimer_cancel(struct hrtimer *timer); |
422 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); |
423 | |
424 | static inline void hrtimer_start_expires(struct hrtimer *timer, |
425 | enum hrtimer_mode mode) |
426 | { |
427 | u64 delta; |
428 | ktime_t soft, hard; |
429 | soft = hrtimer_get_softexpires(timer); |
430 | hard = hrtimer_get_expires(timer); |
431 | delta = ktime_to_ns(ktime_sub(hard, soft)); |
432 | hrtimer_start_range_ns(timer, tim: soft, range_ns: delta, mode); |
433 | } |
434 | |
435 | void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl, |
436 | enum hrtimer_mode mode); |
437 | |
438 | static inline void hrtimer_restart(struct hrtimer *timer) |
439 | { |
440 | hrtimer_start_expires(timer, mode: HRTIMER_MODE_ABS); |
441 | } |
442 | |
443 | /* Query timers: */ |
444 | extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); |
445 | |
446 | /** |
447 | * hrtimer_get_remaining - get remaining time for the timer |
448 | * @timer: the timer to read |
449 | */ |
450 | static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
451 | { |
452 | return __hrtimer_get_remaining(timer, adjust: false); |
453 | } |
454 | |
455 | extern u64 hrtimer_get_next_event(void); |
456 | extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); |
457 | |
458 | extern bool hrtimer_active(const struct hrtimer *timer); |
459 | |
460 | /** |
461 | * hrtimer_is_queued - check, whether the timer is on one of the queues |
462 | * @timer: Timer to check |
463 | * |
464 | * Returns: True if the timer is queued, false otherwise |
465 | * |
466 | * The function can be used lockless, but it gives only a current snapshot. |
467 | */ |
468 | static inline bool hrtimer_is_queued(struct hrtimer *timer) |
469 | { |
470 | /* The READ_ONCE pairs with the update functions of timer->state */ |
471 | return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED); |
472 | } |
473 | |
474 | /* |
475 | * Helper function to check, whether the timer is running the callback |
476 | * function |
477 | */ |
478 | static inline int hrtimer_callback_running(struct hrtimer *timer) |
479 | { |
480 | return timer->base->running == timer; |
481 | } |
482 | |
483 | /* Forward a hrtimer so it expires after now: */ |
484 | extern u64 |
485 | hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); |
486 | |
487 | /** |
488 | * hrtimer_forward_now - forward the timer expiry so it expires after now |
489 | * @timer: hrtimer to forward |
490 | * @interval: the interval to forward |
491 | * |
492 | * Forward the timer expiry so it will expire after the current time |
493 | * of the hrtimer clock base. Returns the number of overruns. |
494 | * |
495 | * Can be safely called from the callback function of @timer. If |
496 | * called from other contexts @timer must neither be enqueued nor |
497 | * running the callback and the caller needs to take care of |
498 | * serialization. |
499 | * |
500 | * Note: This only updates the timer expiry value and does not requeue |
501 | * the timer. |
502 | */ |
503 | static inline u64 hrtimer_forward_now(struct hrtimer *timer, |
504 | ktime_t interval) |
505 | { |
506 | return hrtimer_forward(timer, now: timer->base->get_time(), interval); |
507 | } |
508 | |
509 | /* Precise sleep: */ |
510 | |
511 | extern int nanosleep_copyout(struct restart_block *, struct timespec64 *); |
512 | extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, |
513 | const clockid_t clockid); |
514 | |
515 | extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, |
516 | const enum hrtimer_mode mode); |
517 | extern int schedule_hrtimeout_range_clock(ktime_t *expires, |
518 | u64 delta, |
519 | const enum hrtimer_mode mode, |
520 | clockid_t clock_id); |
521 | extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); |
522 | |
523 | /* Soft interrupt function to run the hrtimer queues: */ |
524 | extern void hrtimer_run_queues(void); |
525 | |
526 | /* Bootup initialization: */ |
527 | extern void __init hrtimers_init(void); |
528 | |
529 | /* Show pending timers: */ |
530 | extern void sysrq_timer_list_show(void); |
531 | |
532 | int hrtimers_prepare_cpu(unsigned int cpu); |
533 | #ifdef CONFIG_HOTPLUG_CPU |
534 | int hrtimers_dead_cpu(unsigned int cpu); |
535 | #else |
536 | #define hrtimers_dead_cpu NULL |
537 | #endif |
538 | |
539 | #endif |
540 | |