Warning: That file was not part of the compilation database. It may have many parsing errors.
1 | /* Helper code for POSIX timer implementation on NPTL. |
---|---|
2 | Copyright (C) 2000-2019 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | Contributed by Kaz Kylheku <kaz@ashi.footprints.net>. |
5 | |
6 | The GNU C Library is free software; you can redistribute it and/or |
7 | modify it under the terms of the GNU Lesser General Public License as |
8 | published by the Free Software Foundation; either version 2.1 of the |
9 | License, or (at your option) any later version. |
10 | |
11 | The GNU C Library is distributed in the hope that it will be useful, |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | Lesser General Public License for more details. |
15 | |
16 | You should have received a copy of the GNU Lesser General Public |
17 | License along with the GNU C Library; see the file COPYING.LIB. If |
18 | not, see <http://www.gnu.org/licenses/>. */ |
19 | |
20 | #include <assert.h> |
21 | #include <errno.h> |
22 | #include <pthread.h> |
23 | #include <stddef.h> |
24 | #include <stdlib.h> |
25 | #include <string.h> |
26 | #include <sysdep.h> |
27 | #include <time.h> |
28 | #include <unistd.h> |
29 | #include <sys/syscall.h> |
30 | |
31 | #include "posix-timer.h" |
32 | #include <timer_routines.h> |
33 | |
34 | #ifndef DELAYTIMER_MAX |
35 | # define DELAYTIMER_MAX INT_MAX |
36 | #endif |
37 | |
38 | /* Number of threads used. */ |
39 | #define THREAD_MAXNODES 16 |
40 | |
41 | /* Array containing the descriptors for the used threads. */ |
42 | static struct thread_node thread_array[THREAD_MAXNODES]; |
43 | |
44 | /* Static array with the structures for all the timers. */ |
45 | struct timer_node __timer_array[TIMER_MAX]; |
46 | |
47 | /* Global lock to protect operation on the lists. */ |
48 | pthread_mutex_t __timer_mutex = PTHREAD_MUTEX_INITIALIZER; |
49 | |
50 | /* Variable to protext initialization. */ |
51 | pthread_once_t __timer_init_once_control = PTHREAD_ONCE_INIT; |
52 | |
53 | /* Nonzero if initialization of timer implementation failed. */ |
54 | int __timer_init_failed; |
55 | |
56 | /* Node for the thread used to deliver signals. */ |
57 | struct thread_node __timer_signal_thread_rclk; |
58 | |
59 | /* Lists to keep free and used timers and threads. */ |
60 | static struct list_head timer_free_list; |
61 | static struct list_head thread_free_list; |
62 | static struct list_head thread_active_list; |
63 | |
64 | |
65 | #ifdef __NR_rt_sigqueueinfo |
66 | extern int __syscall_rt_sigqueueinfo (int, int, siginfo_t *); |
67 | #endif |
68 | |
69 | |
70 | /* List handling functions. */ |
71 | static inline void |
72 | list_append (struct list_head *list, struct list_head *newp) |
73 | { |
74 | newp->prev = list->prev; |
75 | newp->next = list; |
76 | list->prev->next = newp; |
77 | list->prev = newp; |
78 | } |
79 | |
80 | static inline void |
81 | list_insbefore (struct list_head *list, struct list_head *newp) |
82 | { |
83 | list_append (list, newp); |
84 | } |
85 | |
86 | /* |
87 | * Like list_unlink_ip, except that calling it on a node that |
88 | * is already unlinked is disastrous rather than a noop. |
89 | */ |
90 | |
91 | static inline void |
92 | list_unlink (struct list_head *list) |
93 | { |
94 | struct list_head *lnext = list->next, *lprev = list->prev; |
95 | |
96 | lnext->prev = lprev; |
97 | lprev->next = lnext; |
98 | } |
99 | |
100 | static inline struct list_head * |
101 | list_first (struct list_head *list) |
102 | { |
103 | return list->next; |
104 | } |
105 | |
106 | static inline struct list_head * |
107 | list_null (struct list_head *list) |
108 | { |
109 | return list; |
110 | } |
111 | |
112 | static inline struct list_head * |
113 | list_next (struct list_head *list) |
114 | { |
115 | return list->next; |
116 | } |
117 | |
118 | static inline int |
119 | list_isempty (struct list_head *list) |
120 | { |
121 | return list->next == list; |
122 | } |
123 | |
124 | |
125 | /* Functions build on top of the list functions. */ |
126 | static inline struct thread_node * |
127 | thread_links2ptr (struct list_head *list) |
128 | { |
129 | return (struct thread_node *) ((char *) list |
130 | - offsetof (struct thread_node, links)); |
131 | } |
132 | |
133 | static inline struct timer_node * |
134 | timer_links2ptr (struct list_head *list) |
135 | { |
136 | return (struct timer_node *) ((char *) list |
137 | - offsetof (struct timer_node, links)); |
138 | } |
139 | |
140 | |
141 | /* Initialize a newly allocated thread structure. */ |
142 | static void |
143 | thread_init (struct thread_node *thread, const pthread_attr_t *attr, clockid_t clock_id) |
144 | { |
145 | if (attr != NULL) |
146 | thread->attr = *attr; |
147 | else |
148 | { |
149 | pthread_attr_init (&thread->attr); |
150 | pthread_attr_setdetachstate (&thread->attr, PTHREAD_CREATE_DETACHED); |
151 | } |
152 | |
153 | thread->exists = 0; |
154 | INIT_LIST_HEAD (&thread->timer_queue); |
155 | pthread_cond_init (&thread->cond, 0); |
156 | thread->current_timer = 0; |
157 | thread->captured = pthread_self (); |
158 | thread->clock_id = clock_id; |
159 | } |
160 | |
161 | |
162 | /* Initialize the global lists, and acquire global resources. Error |
163 | reporting is done by storing a non-zero value to the global variable |
164 | timer_init_failed. */ |
165 | static void |
166 | init_module (void) |
167 | { |
168 | int i; |
169 | |
170 | INIT_LIST_HEAD (&timer_free_list); |
171 | INIT_LIST_HEAD (&thread_free_list); |
172 | INIT_LIST_HEAD (&thread_active_list); |
173 | |
174 | for (i = 0; i < TIMER_MAX; ++i) |
175 | { |
176 | list_append (&timer_free_list, &__timer_array[i].links); |
177 | __timer_array[i].inuse = TIMER_FREE; |
178 | } |
179 | |
180 | for (i = 0; i < THREAD_MAXNODES; ++i) |
181 | list_append (&thread_free_list, &thread_array[i].links); |
182 | |
183 | thread_init (&__timer_signal_thread_rclk, 0, CLOCK_REALTIME); |
184 | } |
185 | |
186 | |
187 | /* This is a handler executed in a child process after a fork() |
188 | occurs. It reinitializes the module, resetting all of the data |
189 | structures to their initial state. The mutex is initialized in |
190 | case it was locked in the parent process. */ |
191 | static void |
192 | reinit_after_fork (void) |
193 | { |
194 | init_module (); |
195 | pthread_mutex_init (&__timer_mutex, 0); |
196 | } |
197 | |
198 | |
199 | /* Called once form pthread_once in timer_init. This initializes the |
200 | module and ensures that reinit_after_fork will be executed in any |
201 | child process. */ |
202 | void |
203 | __timer_init_once (void) |
204 | { |
205 | init_module (); |
206 | pthread_atfork (0, 0, reinit_after_fork); |
207 | } |
208 | |
209 | |
210 | /* Deinitialize a thread that is about to be deallocated. */ |
211 | static void |
212 | thread_deinit (struct thread_node *thread) |
213 | { |
214 | assert (list_isempty (&thread->timer_queue)); |
215 | pthread_cond_destroy (&thread->cond); |
216 | } |
217 | |
218 | |
219 | /* Allocate a thread structure from the global free list. Global |
220 | mutex lock must be held by caller. The thread is moved to |
221 | the active list. */ |
222 | struct thread_node * |
223 | __timer_thread_alloc (const pthread_attr_t *desired_attr, clockid_t clock_id) |
224 | { |
225 | struct list_head *node = list_first (&thread_free_list); |
226 | |
227 | if (node != list_null (&thread_free_list)) |
228 | { |
229 | struct thread_node *thread = thread_links2ptr (node); |
230 | list_unlink (node); |
231 | thread_init (thread, desired_attr, clock_id); |
232 | list_append (&thread_active_list, node); |
233 | return thread; |
234 | } |
235 | |
236 | return 0; |
237 | } |
238 | |
239 | |
240 | /* Return a thread structure to the global free list. Global lock |
241 | must be held by caller. */ |
242 | void |
243 | __timer_thread_dealloc (struct thread_node *thread) |
244 | { |
245 | thread_deinit (thread); |
246 | list_unlink (&thread->links); |
247 | list_append (&thread_free_list, &thread->links); |
248 | } |
249 | |
250 | |
251 | /* Each of our threads which terminates executes this cleanup |
252 | handler. We never terminate threads ourselves; if a thread gets here |
253 | it means that the evil application has killed it. If the thread has |
254 | timers, these require servicing and so we must hire a replacement |
255 | thread right away. We must also unblock another thread that may |
256 | have been waiting for this thread to finish servicing a timer (see |
257 | timer_delete()). */ |
258 | |
259 | static void |
260 | thread_cleanup (void *val) |
261 | { |
262 | if (val != NULL) |
263 | { |
264 | struct thread_node *thread = val; |
265 | |
266 | /* How did the signal thread get killed? */ |
267 | assert (thread != &__timer_signal_thread_rclk); |
268 | |
269 | pthread_mutex_lock (&__timer_mutex); |
270 | |
271 | thread->exists = 0; |
272 | |
273 | /* We are no longer processing a timer event. */ |
274 | thread->current_timer = 0; |
275 | |
276 | if (list_isempty (&thread->timer_queue)) |
277 | __timer_thread_dealloc (thread); |
278 | else |
279 | (void) __timer_thread_start (thread); |
280 | |
281 | pthread_mutex_unlock (&__timer_mutex); |
282 | |
283 | /* Unblock potentially blocked timer_delete(). */ |
284 | pthread_cond_broadcast (&thread->cond); |
285 | } |
286 | } |
287 | |
288 | |
289 | /* Handle a timer which is supposed to go off now. */ |
290 | static void |
291 | thread_expire_timer (struct thread_node *self, struct timer_node *timer) |
292 | { |
293 | self->current_timer = timer; /* Lets timer_delete know timer is running. */ |
294 | |
295 | pthread_mutex_unlock (&__timer_mutex); |
296 | |
297 | switch (__builtin_expect (timer->event.sigev_notify, SIGEV_SIGNAL)) |
298 | { |
299 | case SIGEV_NONE: |
300 | break; |
301 | |
302 | case SIGEV_SIGNAL: |
303 | #ifdef __NR_rt_sigqueueinfo |
304 | { |
305 | siginfo_t info; |
306 | |
307 | /* First, clear the siginfo_t structure, so that we don't pass our |
308 | stack content to other tasks. */ |
309 | memset (&info, 0, sizeof (siginfo_t)); |
310 | /* We must pass the information about the data in a siginfo_t |
311 | value. */ |
312 | info.si_signo = timer->event.sigev_signo; |
313 | info.si_code = SI_TIMER; |
314 | info.si_pid = timer->creator_pid; |
315 | info.si_uid = getuid (); |
316 | info.si_value = timer->event.sigev_value; |
317 | |
318 | INLINE_SYSCALL (rt_sigqueueinfo, 3, info.si_pid, info.si_signo, &info); |
319 | } |
320 | #else |
321 | if (pthread_kill (self->captured, timer->event.sigev_signo) != 0) |
322 | { |
323 | if (pthread_kill (self->id, timer->event.sigev_signo) != 0) |
324 | abort (); |
325 | } |
326 | #endif |
327 | break; |
328 | |
329 | case SIGEV_THREAD: |
330 | timer->event.sigev_notify_function (timer->event.sigev_value); |
331 | break; |
332 | |
333 | default: |
334 | assert (! "unknown event"); |
335 | break; |
336 | } |
337 | |
338 | pthread_mutex_lock (&__timer_mutex); |
339 | |
340 | self->current_timer = 0; |
341 | |
342 | pthread_cond_broadcast (&self->cond); |
343 | } |
344 | |
345 | |
346 | /* Thread function; executed by each timer thread. The job of this |
347 | function is to wait on the thread's timer queue and expire the |
348 | timers in chronological order as close to their scheduled time as |
349 | possible. */ |
350 | static void |
351 | __attribute__ ((noreturn)) |
352 | thread_func (void *arg) |
353 | { |
354 | struct thread_node *self = arg; |
355 | |
356 | /* Register cleanup handler, in case rogue application terminates |
357 | this thread. (This cannot happen to __timer_signal_thread, which |
358 | doesn't invoke application callbacks). */ |
359 | |
360 | pthread_cleanup_push (thread_cleanup, self); |
361 | |
362 | pthread_mutex_lock (&__timer_mutex); |
363 | |
364 | while (1) |
365 | { |
366 | struct list_head *first; |
367 | struct timer_node *timer = NULL; |
368 | |
369 | /* While the timer queue is not empty, inspect the first node. */ |
370 | first = list_first (&self->timer_queue); |
371 | if (first != list_null (&self->timer_queue)) |
372 | { |
373 | struct timespec now; |
374 | |
375 | timer = timer_links2ptr (first); |
376 | |
377 | /* This assumes that the elements of the list of one thread |
378 | are all for the same clock. */ |
379 | __clock_gettime (timer->clock, &now); |
380 | |
381 | while (1) |
382 | { |
383 | /* If the timer is due or overdue, remove it from the queue. |
384 | If it's a periodic timer, re-compute its new time and |
385 | requeue it. Either way, perform the timer expiry. */ |
386 | if (timespec_compare (&now, &timer->expirytime) < 0) |
387 | break; |
388 | |
389 | list_unlink_ip (first); |
390 | |
391 | if (__builtin_expect (timer->value.it_interval.tv_sec, 0) != 0 |
392 | || timer->value.it_interval.tv_nsec != 0) |
393 | { |
394 | timer->overrun_count = 0; |
395 | timespec_add (&timer->expirytime, &timer->expirytime, |
396 | &timer->value.it_interval); |
397 | while (timespec_compare (&timer->expirytime, &now) < 0) |
398 | { |
399 | timespec_add (&timer->expirytime, &timer->expirytime, |
400 | &timer->value.it_interval); |
401 | if (timer->overrun_count < DELAYTIMER_MAX) |
402 | ++timer->overrun_count; |
403 | } |
404 | __timer_thread_queue_timer (self, timer); |
405 | } |
406 | |
407 | thread_expire_timer (self, timer); |
408 | |
409 | first = list_first (&self->timer_queue); |
410 | if (first == list_null (&self->timer_queue)) |
411 | break; |
412 | |
413 | timer = timer_links2ptr (first); |
414 | } |
415 | } |
416 | |
417 | /* If the queue is not empty, wait until the expiry time of the |
418 | first node. Otherwise wait indefinitely. Insertions at the |
419 | head of the queue must wake up the thread by broadcasting |
420 | this condition variable. */ |
421 | if (timer != NULL) |
422 | pthread_cond_timedwait (&self->cond, &__timer_mutex, |
423 | &timer->expirytime); |
424 | else |
425 | pthread_cond_wait (&self->cond, &__timer_mutex); |
426 | } |
427 | /* This macro will never be executed since the while loop loops |
428 | forever - but we have to add it for proper nesting. */ |
429 | pthread_cleanup_pop (1); |
430 | } |
431 | |
432 | |
433 | /* Enqueue a timer in wakeup order in the thread's timer queue. |
434 | Returns 1 if the timer was inserted at the head of the queue, |
435 | causing the queue's next wakeup time to change. */ |
436 | |
437 | int |
438 | __timer_thread_queue_timer (struct thread_node *thread, |
439 | struct timer_node *insert) |
440 | { |
441 | struct list_head *iter; |
442 | int athead = 1; |
443 | |
444 | for (iter = list_first (&thread->timer_queue); |
445 | iter != list_null (&thread->timer_queue); |
446 | iter = list_next (iter)) |
447 | { |
448 | struct timer_node *timer = timer_links2ptr (iter); |
449 | |
450 | if (timespec_compare (&insert->expirytime, &timer->expirytime) < 0) |
451 | break; |
452 | athead = 0; |
453 | } |
454 | |
455 | list_insbefore (iter, &insert->links); |
456 | return athead; |
457 | } |
458 | |
459 | |
460 | /* Start a thread and associate it with the given thread node. Global |
461 | lock must be held by caller. */ |
462 | int |
463 | __timer_thread_start (struct thread_node *thread) |
464 | { |
465 | int retval = 1; |
466 | sigset_t set, oset; |
467 | |
468 | assert (!thread->exists); |
469 | thread->exists = 1; |
470 | |
471 | sigfillset (&set); |
472 | pthread_sigmask (SIG_SETMASK, &set, &oset); |
473 | |
474 | if (pthread_create (&thread->id, &thread->attr, |
475 | (void *(*) (void *)) thread_func, thread) != 0) |
476 | { |
477 | thread->exists = 0; |
478 | retval = -1; |
479 | } |
480 | |
481 | pthread_sigmask (SIG_SETMASK, &oset, NULL); |
482 | |
483 | return retval; |
484 | } |
485 | |
486 | |
487 | void |
488 | __timer_thread_wakeup (struct thread_node *thread) |
489 | { |
490 | pthread_cond_broadcast (&thread->cond); |
491 | } |
492 | |
493 | |
494 | |
495 | /* Search the list of active threads and find one which has matching |
496 | attributes. Global mutex lock must be held by caller. */ |
497 | struct thread_node * |
498 | __timer_thread_find_matching (const pthread_attr_t *desired_attr, |
499 | clockid_t desired_clock_id) |
500 | { |
501 | struct list_head *iter = list_first (&thread_active_list); |
502 | |
503 | while (iter != list_null (&thread_active_list)) |
504 | { |
505 | struct thread_node *candidate = thread_links2ptr (iter); |
506 | |
507 | if (thread_attr_compare (desired_attr, &candidate->attr) |
508 | && desired_clock_id == candidate->clock_id) |
509 | return candidate; |
510 | |
511 | iter = list_next (iter); |
512 | } |
513 | |
514 | return NULL; |
515 | } |
516 | |
517 | |
518 | /* Grab a free timer structure from the global free list. The global |
519 | lock must be held by the caller. */ |
520 | struct timer_node * |
521 | __timer_alloc (void) |
522 | { |
523 | struct list_head *node = list_first (&timer_free_list); |
524 | |
525 | if (node != list_null (&timer_free_list)) |
526 | { |
527 | struct timer_node *timer = timer_links2ptr (node); |
528 | list_unlink_ip (node); |
529 | timer->inuse = TIMER_INUSE; |
530 | timer->refcount = 1; |
531 | return timer; |
532 | } |
533 | |
534 | return NULL; |
535 | } |
536 | |
537 | |
538 | /* Return a timer structure to the global free list. The global lock |
539 | must be held by the caller. */ |
540 | void |
541 | __timer_dealloc (struct timer_node *timer) |
542 | { |
543 | assert (timer->refcount == 0); |
544 | timer->thread = NULL; /* Break association between timer and thread. */ |
545 | timer->inuse = TIMER_FREE; |
546 | list_append (&timer_free_list, &timer->links); |
547 | } |
548 | |
549 | |
550 | /* Thread cancellation handler which unlocks a mutex. */ |
551 | void |
552 | __timer_mutex_cancel_handler (void *arg) |
553 | { |
554 | pthread_mutex_unlock (arg); |
555 | } |
556 |
Warning: That file was not part of the compilation database. It may have many parsing errors.