1/* Copyright (C) 2002-2022 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
17
18#ifndef _DESCR_H
19#define _DESCR_H 1
20
21#include <limits.h>
22#include <sched.h>
23#include <setjmp.h>
24#include <stdbool.h>
25#include <sys/types.h>
26#include <hp-timing.h>
27#include <list_t.h>
28#include <lowlevellock.h>
29#include <pthreaddef.h>
30#include <dl-sysdep.h>
31#include <thread_db.h>
32#include <tls.h>
33#include <unwind.h>
34#include <bits/types/res_state.h>
35#include <kernel-features.h>
36#include <tls-internal-struct.h>
37#include <sys/rseq.h>
38
39#ifndef TCB_ALIGNMENT
40# define TCB_ALIGNMENT 32
41#elif TCB_ALIGNMENT < 32
42# error TCB_ALIGNMENT must be at least 32
43#endif
44
45
46/* We keep thread specific data in a special data structure, a two-level
47 array. The top-level array contains pointers to dynamically allocated
48 arrays of a certain number of data pointers. So we can implement a
49 sparse array. Each dynamic second-level array has
50 PTHREAD_KEY_2NDLEVEL_SIZE
51 entries. This value shouldn't be too large. */
52#define PTHREAD_KEY_2NDLEVEL_SIZE 32
53
54/* We need to address PTHREAD_KEYS_MAX key with PTHREAD_KEY_2NDLEVEL_SIZE
55 keys in each subarray. */
56#define PTHREAD_KEY_1STLEVEL_SIZE \
57 ((PTHREAD_KEYS_MAX + PTHREAD_KEY_2NDLEVEL_SIZE - 1) \
58 / PTHREAD_KEY_2NDLEVEL_SIZE)
59
60
61
62
63/* Internal version of the buffer to store cancellation handler
64 information. */
65struct pthread_unwind_buf
66{
67 struct
68 {
69 __jmp_buf jmp_buf;
70 int mask_was_saved;
71 } cancel_jmp_buf[1];
72
73 union
74 {
75 /* This is the placeholder of the public version. */
76 void *pad[4];
77
78 struct
79 {
80 /* Pointer to the previous cleanup buffer. */
81 struct pthread_unwind_buf *prev;
82
83 /* Backward compatibility: state of the old-style cleanup
84 handler at the time of the previous new-style cleanup handler
85 installment. */
86 struct _pthread_cleanup_buffer *cleanup;
87
88 /* Cancellation type before the push call. */
89 int canceltype;
90 } data;
91 } priv;
92};
93
94
95/* Opcodes and data types for communication with the signal handler to
96 change user/group IDs. */
97struct xid_command
98{
99 int syscall_no;
100 /* Enforce zero-extension for the pointer argument in
101
102 int setgroups (size_t size, const gid_t *list);
103
104 The kernel XID arguments are unsigned and do not require sign
105 extension. */
106 unsigned long int id[3];
107 volatile int cntr;
108 volatile int error; /* -1: no call yet, 0: success seen, >0: error seen. */
109};
110
111
112/* Data structure used by the kernel to find robust futexes. */
113struct robust_list_head
114{
115 void *list;
116 long int futex_offset;
117 void *list_op_pending;
118};
119
120
121/* Data strcture used to handle thread priority protection. */
122struct priority_protection_data
123{
124 int priomax;
125 unsigned int priomap[];
126};
127
128
129/* Thread descriptor data structure. */
130struct pthread
131{
132 union
133 {
134#if !TLS_DTV_AT_TP
135 /* This overlaps the TCB as used for TLS without threads (see tls.h). */
136 tcbhead_t header;
137#else
138 struct
139 {
140 /* multiple_threads is enabled either when the process has spawned at
141 least one thread or when a single-threaded process cancels itself.
142 This enables additional code to introduce locking before doing some
143 compare_and_exchange operations and also enable cancellation points.
144 The concepts of multiple threads and cancellation points ideally
145 should be separate, since it is not necessary for multiple threads to
146 have been created for cancellation points to be enabled, as is the
147 case is when single-threaded process cancels itself.
148
149 Since enabling multiple_threads enables additional code in
150 cancellation points and compare_and_exchange operations, there is a
151 potential for an unneeded performance hit when it is enabled in a
152 single-threaded, self-canceling process. This is OK though, since a
153 single-threaded process will enable async cancellation only when it
154 looks to cancel itself and is hence going to end anyway. */
155 int multiple_threads;
156 int gscope_flag;
157 } header;
158#endif
159
160 /* This extra padding has no special purpose, and this structure layout
161 is private and subject to change without affecting the official ABI.
162 We just have it here in case it might be convenient for some
163 implementation-specific instrumentation hack or suchlike. */
164 void *__padding[24];
165 };
166
167 /* This descriptor's link on the GL (dl_stack_used) or
168 GL (dl_stack_user) list. */
169 list_t list;
170
171 /* Thread ID - which is also a 'is this thread descriptor (and
172 therefore stack) used' flag. */
173 pid_t tid;
174
175 /* Ununsed. */
176 pid_t pid_ununsed;
177
178 /* List of robust mutexes the thread is holding. */
179#if __PTHREAD_MUTEX_HAVE_PREV
180 void *robust_prev;
181 struct robust_list_head robust_head;
182
183 /* The list above is strange. It is basically a double linked list
184 but the pointer to the next/previous element of the list points
185 in the middle of the object, the __next element. Whenever
186 casting to __pthread_list_t we need to adjust the pointer
187 first.
188 These operations are effectively concurrent code in that the thread
189 can get killed at any point in time and the kernel takes over. Thus,
190 the __next elements are a kind of concurrent list and we need to
191 enforce using compiler barriers that the individual operations happen
192 in such a way that the kernel always sees a consistent list. The
193 backward links (ie, the __prev elements) are not used by the kernel.
194 FIXME We should use relaxed MO atomic operations here and signal fences
195 because this kind of concurrency is similar to synchronizing with a
196 signal handler. */
197# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
198
199# define ENQUEUE_MUTEX_BOTH(mutex, val) \
200 do { \
201 __pthread_list_t *next = (__pthread_list_t *) \
202 ((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul) \
203 - QUEUE_PTR_ADJUST); \
204 next->__prev = (void *) &mutex->__data.__list.__next; \
205 mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF, \
206 robust_head.list); \
207 mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \
208 /* Ensure that the new list entry is ready before we insert it. */ \
209 __asm ("" ::: "memory"); \
210 THREAD_SETMEM (THREAD_SELF, robust_head.list, \
211 (void *) (((uintptr_t) &mutex->__data.__list.__next) \
212 | val)); \
213 } while (0)
214# define DEQUEUE_MUTEX(mutex) \
215 do { \
216 __pthread_list_t *next = (__pthread_list_t *) \
217 ((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul) \
218 - QUEUE_PTR_ADJUST); \
219 next->__prev = mutex->__data.__list.__prev; \
220 __pthread_list_t *prev = (__pthread_list_t *) \
221 ((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul) \
222 - QUEUE_PTR_ADJUST); \
223 prev->__next = mutex->__data.__list.__next; \
224 /* Ensure that we remove the entry from the list before we change the \
225 __next pointer of the entry, which is read by the kernel. */ \
226 __asm ("" ::: "memory"); \
227 mutex->__data.__list.__prev = NULL; \
228 mutex->__data.__list.__next = NULL; \
229 } while (0)
230#else
231 union
232 {
233 __pthread_slist_t robust_list;
234 struct robust_list_head robust_head;
235 };
236
237# define ENQUEUE_MUTEX_BOTH(mutex, val) \
238 do { \
239 mutex->__data.__list.__next \
240 = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
241 /* Ensure that the new list entry is ready before we insert it. */ \
242 __asm ("" ::: "memory"); \
243 THREAD_SETMEM (THREAD_SELF, robust_list.__next, \
244 (void *) (((uintptr_t) &mutex->__data.__list) | val)); \
245 } while (0)
246# define DEQUEUE_MUTEX(mutex) \
247 do { \
248 __pthread_slist_t *runp = (__pthread_slist_t *) \
249 (((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_list.__next)) & ~1ul); \
250 if (runp == &mutex->__data.__list) \
251 THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next); \
252 else \
253 { \
254 __pthread_slist_t *next = (__pthread_slist_t *) \
255 (((uintptr_t) runp->__next) & ~1ul); \
256 while (next != &mutex->__data.__list) \
257 { \
258 runp = next; \
259 next = (__pthread_slist_t *) (((uintptr_t) runp->__next) & ~1ul); \
260 } \
261 \
262 runp->__next = next->__next; \
263 /* Ensure that we remove the entry from the list before we change the \
264 __next pointer of the entry, which is read by the kernel. */ \
265 __asm ("" ::: "memory"); \
266 mutex->__data.__list.__next = NULL; \
267 } \
268 } while (0)
269#endif
270#define ENQUEUE_MUTEX(mutex) ENQUEUE_MUTEX_BOTH (mutex, 0)
271#define ENQUEUE_MUTEX_PI(mutex) ENQUEUE_MUTEX_BOTH (mutex, 1)
272
273 /* List of cleanup buffers. */
274 struct _pthread_cleanup_buffer *cleanup;
275
276 /* Unwind information. */
277 struct pthread_unwind_buf *cleanup_jmp_buf;
278#define HAVE_CLEANUP_JMP_BUF
279
280 /* Flags determining processing of cancellation. */
281 int cancelhandling;
282 /* Bit set if cancellation is disabled. */
283#define CANCELSTATE_BIT 0
284#define CANCELSTATE_BITMASK (1 << CANCELSTATE_BIT)
285 /* Bit set if asynchronous cancellation mode is selected. */
286#define CANCELTYPE_BIT 1
287#define CANCELTYPE_BITMASK (1 << CANCELTYPE_BIT)
288 /* Bit set if canceling has been initiated. */
289#define CANCELING_BIT 2
290#define CANCELING_BITMASK (1 << CANCELING_BIT)
291 /* Bit set if canceled. */
292#define CANCELED_BIT 3
293#define CANCELED_BITMASK (1 << CANCELED_BIT)
294 /* Bit set if thread is exiting. */
295#define EXITING_BIT 4
296#define EXITING_BITMASK (1 << EXITING_BIT)
297 /* Bit set if thread terminated and TCB is freed. */
298#define TERMINATED_BIT 5
299#define TERMINATED_BITMASK (1 << TERMINATED_BIT)
300 /* Bit set if thread is supposed to change XID. */
301#define SETXID_BIT 6
302#define SETXID_BITMASK (1 << SETXID_BIT)
303
304 /* Flags. Including those copied from the thread attribute. */
305 int flags;
306
307 /* We allocate one block of references here. This should be enough
308 to avoid allocating any memory dynamically for most applications. */
309 struct pthread_key_data
310 {
311 /* Sequence number. We use uintptr_t to not require padding on
312 32- and 64-bit machines. On 64-bit machines it helps to avoid
313 wrapping, too. */
314 uintptr_t seq;
315
316 /* Data pointer. */
317 void *data;
318 } specific_1stblock[PTHREAD_KEY_2NDLEVEL_SIZE];
319
320 /* Two-level array for the thread-specific data. */
321 struct pthread_key_data *specific[PTHREAD_KEY_1STLEVEL_SIZE];
322
323 /* Flag which is set when specific data is set. */
324 bool specific_used;
325
326 /* True if events must be reported. */
327 bool report_events;
328
329 /* True if the user provided the stack. */
330 bool user_stack;
331
332 /* True if thread must stop at startup time. */
333 bool stopped_start;
334
335 /* Indicate that a thread creation setup has failed (for instance the
336 scheduler or affinity). */
337 int setup_failed;
338
339 /* Lock to synchronize access to the descriptor. */
340 int lock;
341
342 /* Lock for synchronizing setxid calls. */
343 unsigned int setxid_futex;
344
345#if HP_TIMING_INLINE
346 hp_timing_t cpuclock_offset_ununsed;
347#endif
348
349 /* If the thread waits to join another one the ID of the latter is
350 stored here.
351
352 In case a thread is detached this field contains a pointer of the
353 TCB if the thread itself. This is something which cannot happen
354 in normal operation. */
355 struct pthread *joinid;
356 /* Check whether a thread is detached. */
357#define IS_DETACHED(pd) ((pd)->joinid == (pd))
358
359 /* The result of the thread function. */
360 void *result;
361
362 /* Scheduling parameters for the new thread. */
363 struct sched_param schedparam;
364 int schedpolicy;
365
366 /* Start position of the code to be executed and the argument passed
367 to the function. */
368 void *(*start_routine) (void *);
369 void *arg;
370
371 /* Debug state. */
372 td_eventbuf_t eventbuf;
373 /* Next descriptor with a pending event. */
374 struct pthread *nextevent;
375
376 /* Machine-specific unwind info. */
377 struct _Unwind_Exception exc;
378
379 /* If nonzero, pointer to the area allocated for the stack and guard. */
380 void *stackblock;
381 /* Size of the stackblock area including the guard. */
382 size_t stackblock_size;
383 /* Size of the included guard area. */
384 size_t guardsize;
385 /* This is what the user specified and what we will report. */
386 size_t reported_guardsize;
387
388 /* Thread Priority Protection data. */
389 struct priority_protection_data *tpp;
390
391 /* Resolver state. */
392 struct __res_state res;
393
394 /* Signal mask for the new thread. Used during thread startup to
395 restore the signal mask. (Threads are launched with all signals
396 masked.) */
397 sigset_t sigmask;
398
399 /* Indicates whether is a C11 thread created by thrd_creat. */
400 bool c11;
401
402 /* Used in __pthread_kill_internal to detected a thread that has
403 exited or is about to exit. exit_lock must only be acquired
404 after blocking signals. */
405 bool exiting;
406 int exit_lock; /* A low-level lock (for use with __libc_lock_init etc). */
407
408 /* Used on strsignal. */
409 struct tls_internal_t tls_state;
410
411 /* rseq area registered with the kernel. */
412 struct rseq rseq_area;
413
414 /* This member must be last. */
415 char end_padding[];
416
417#define PTHREAD_STRUCT_END_PADDING \
418 (sizeof (struct pthread) - offsetof (struct pthread, end_padding))
419} __attribute ((aligned (TCB_ALIGNMENT)));
420
421static inline bool
422cancel_enabled_and_canceled (int value)
423{
424 return (value & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
425 | TERMINATED_BITMASK))
426 == CANCELED_BITMASK;
427}
428
429static inline bool
430cancel_enabled_and_canceled_and_async (int value)
431{
432 return ((value) & (CANCELSTATE_BITMASK | CANCELTYPE_BITMASK | CANCELED_BITMASK
433 | EXITING_BITMASK | TERMINATED_BITMASK))
434 == (CANCELTYPE_BITMASK | CANCELED_BITMASK);
435}
436
437/* This yields the pointer that TLS support code calls the thread pointer. */
438#if TLS_TCB_AT_TP
439# define TLS_TPADJ(pd) (pd)
440#elif TLS_DTV_AT_TP
441# define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
442#endif
443
444#endif /* descr.h */
445

source code of glibc/nptl/descr.h