1 | //===-- tsan_platform_linux.cpp -------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
10 | // |
11 | // Linux- and BSD-specific code. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "sanitizer_common/sanitizer_platform.h" |
15 | #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD |
16 | |
17 | #include "sanitizer_common/sanitizer_common.h" |
18 | #include "sanitizer_common/sanitizer_libc.h" |
19 | #include "sanitizer_common/sanitizer_linux.h" |
20 | #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" |
21 | #include "sanitizer_common/sanitizer_platform_limits_posix.h" |
22 | #include "sanitizer_common/sanitizer_posix.h" |
23 | #include "sanitizer_common/sanitizer_procmaps.h" |
24 | #include "sanitizer_common/sanitizer_stackdepot.h" |
25 | #include "sanitizer_common/sanitizer_stoptheworld.h" |
26 | #include "tsan_flags.h" |
27 | #include "tsan_platform.h" |
28 | #include "tsan_rtl.h" |
29 | |
30 | #include <fcntl.h> |
31 | #include <pthread.h> |
32 | #include <signal.h> |
33 | #include <stdio.h> |
34 | #include <stdlib.h> |
35 | #include <string.h> |
36 | #include <stdarg.h> |
37 | #include <sys/mman.h> |
38 | #if SANITIZER_LINUX |
39 | #include <sys/personality.h> |
40 | #include <setjmp.h> |
41 | #endif |
42 | #include <sys/syscall.h> |
43 | #include <sys/socket.h> |
44 | #include <sys/time.h> |
45 | #include <sys/types.h> |
46 | #include <sys/resource.h> |
47 | #include <sys/stat.h> |
48 | #include <unistd.h> |
49 | #include <sched.h> |
50 | #include <dlfcn.h> |
51 | #if SANITIZER_LINUX |
52 | #define __need_res_state |
53 | #include <resolv.h> |
54 | #endif |
55 | |
56 | #ifdef sa_handler |
57 | # undef sa_handler |
58 | #endif |
59 | |
60 | #ifdef sa_sigaction |
61 | # undef sa_sigaction |
62 | #endif |
63 | |
64 | #if SANITIZER_FREEBSD |
65 | extern "C" void *__libc_stack_end; |
66 | void *__libc_stack_end = 0; |
67 | #endif |
68 | |
69 | #if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO |
70 | # define INIT_LONGJMP_XOR_KEY 1 |
71 | #else |
72 | # define INIT_LONGJMP_XOR_KEY 0 |
73 | #endif |
74 | |
75 | #if INIT_LONGJMP_XOR_KEY |
76 | #include "interception/interception.h" |
77 | // Must be declared outside of other namespaces. |
78 | DECLARE_REAL(int, _setjmp, void *env) |
79 | #endif |
80 | |
81 | namespace __tsan { |
82 | |
83 | #if INIT_LONGJMP_XOR_KEY |
84 | static void InitializeLongjmpXorKey(); |
85 | static uptr longjmp_xor_key; |
86 | #endif |
87 | |
88 | #ifdef TSAN_RUNTIME_VMA |
89 | // Runtime detected VMA size. |
90 | uptr vmaSize; |
91 | #endif |
92 | |
93 | enum { |
94 | MemTotal = 0, |
95 | MemShadow = 1, |
96 | MemMeta = 2, |
97 | MemFile = 3, |
98 | MemMmap = 4, |
99 | MemTrace = 5, |
100 | MemHeap = 6, |
101 | MemOther = 7, |
102 | MemCount = 8, |
103 | }; |
104 | |
105 | void FillProfileCallback(uptr p, uptr , bool file, |
106 | uptr *mem, uptr stats_size) { |
107 | mem[MemTotal] += rss; |
108 | if (p >= ShadowBeg() && p < ShadowEnd()) |
109 | mem[MemShadow] += rss; |
110 | else if (p >= MetaShadowBeg() && p < MetaShadowEnd()) |
111 | mem[MemMeta] += rss; |
112 | #if !SANITIZER_GO |
113 | else if (p >= HeapMemBeg() && p < HeapMemEnd()) |
114 | mem[MemHeap] += rss; |
115 | else if (p >= LoAppMemBeg() && p < LoAppMemEnd()) |
116 | mem[file ? MemFile : MemMmap] += rss; |
117 | else if (p >= HiAppMemBeg() && p < HiAppMemEnd()) |
118 | mem[file ? MemFile : MemMmap] += rss; |
119 | #else |
120 | else if (p >= AppMemBeg() && p < AppMemEnd()) |
121 | mem[file ? MemFile : MemMmap] += rss; |
122 | #endif |
123 | else if (p >= TraceMemBeg() && p < TraceMemEnd()) |
124 | mem[MemTrace] += rss; |
125 | else |
126 | mem[MemOther] += rss; |
127 | } |
128 | |
129 | void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { |
130 | uptr mem[MemCount]; |
131 | internal_memset(mem, 0, sizeof(mem[0]) * MemCount); |
132 | __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); |
133 | StackDepotStats *stacks = StackDepotGetStats(); |
134 | internal_snprintf(buf, buf_size, |
135 | "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" |
136 | " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n" , |
137 | mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, |
138 | mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, |
139 | mem[MemHeap] >> 20, mem[MemOther] >> 20, |
140 | stacks->allocated >> 20, stacks->n_uniq_ids, |
141 | nlive, nthread); |
142 | } |
143 | |
144 | #if SANITIZER_LINUX |
145 | void FlushShadowMemoryCallback( |
146 | const SuspendedThreadsList &suspended_threads_list, |
147 | void *argument) { |
148 | ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd()); |
149 | } |
150 | #endif |
151 | |
152 | void FlushShadowMemory() { |
153 | #if SANITIZER_LINUX |
154 | StopTheWorld(FlushShadowMemoryCallback, 0); |
155 | #endif |
156 | } |
157 | |
158 | #if !SANITIZER_GO |
159 | // Mark shadow for .rodata sections with the special kShadowRodata marker. |
160 | // Accesses to .rodata can't race, so this saves time, memory and trace space. |
161 | static void MapRodata() { |
162 | // First create temp file. |
163 | const char *tmpdir = GetEnv("TMPDIR" ); |
164 | if (tmpdir == 0) |
165 | tmpdir = GetEnv("TEST_TMPDIR" ); |
166 | #ifdef P_tmpdir |
167 | if (tmpdir == 0) |
168 | tmpdir = P_tmpdir; |
169 | #endif |
170 | if (tmpdir == 0) |
171 | return; |
172 | char name[256]; |
173 | internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d" , |
174 | tmpdir, (int)internal_getpid()); |
175 | uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); |
176 | if (internal_iserror(openrv)) |
177 | return; |
178 | internal_unlink(name); // Unlink it now, so that we can reuse the buffer. |
179 | fd_t fd = openrv; |
180 | // Fill the file with kShadowRodata. |
181 | const uptr kMarkerSize = 512 * 1024 / sizeof(u64); |
182 | InternalMmapVector<u64> marker(kMarkerSize); |
183 | // volatile to prevent insertion of memset |
184 | for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++) |
185 | *p = kShadowRodata; |
186 | internal_write(fd, marker.data(), marker.size() * sizeof(u64)); |
187 | // Map the file into memory. |
188 | uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, |
189 | MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); |
190 | if (internal_iserror(page)) { |
191 | internal_close(fd); |
192 | return; |
193 | } |
194 | // Map the file into shadow of .rodata sections. |
195 | MemoryMappingLayout proc_maps(/*cache_enabled*/true); |
196 | // Reusing the buffer 'name'. |
197 | MemoryMappedSegment segment(name, ARRAY_SIZE(name)); |
198 | while (proc_maps.Next(&segment)) { |
199 | if (segment.filename[0] != 0 && segment.filename[0] != '[' && |
200 | segment.IsReadable() && segment.IsExecutable() && |
201 | !segment.IsWritable() && IsAppMem(segment.start)) { |
202 | // Assume it's .rodata |
203 | char *shadow_start = (char *)MemToShadow(segment.start); |
204 | char *shadow_end = (char *)MemToShadow(segment.end); |
205 | for (char *p = shadow_start; p < shadow_end; |
206 | p += marker.size() * sizeof(u64)) { |
207 | internal_mmap(p, Min<uptr>(marker.size() * sizeof(u64), shadow_end - p), |
208 | PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); |
209 | } |
210 | } |
211 | } |
212 | internal_close(fd); |
213 | } |
214 | |
215 | void InitializeShadowMemoryPlatform() { |
216 | MapRodata(); |
217 | } |
218 | |
219 | #endif // #if !SANITIZER_GO |
220 | |
221 | void InitializePlatformEarly() { |
222 | #ifdef TSAN_RUNTIME_VMA |
223 | vmaSize = |
224 | (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); |
225 | #if defined(__aarch64__) |
226 | # if !SANITIZER_GO |
227 | if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) { |
228 | Printf("FATAL: ThreadSanitizer: unsupported VMA range\n" ); |
229 | Printf("FATAL: Found %zd - Supported 39, 42 and 48\n" , vmaSize); |
230 | Die(); |
231 | } |
232 | #else |
233 | if (vmaSize != 48) { |
234 | Printf("FATAL: ThreadSanitizer: unsupported VMA range\n" ); |
235 | Printf("FATAL: Found %zd - Supported 48\n" , vmaSize); |
236 | Die(); |
237 | } |
238 | #endif |
239 | #elif defined(__powerpc64__) |
240 | # if !SANITIZER_GO |
241 | if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) { |
242 | Printf("FATAL: ThreadSanitizer: unsupported VMA range\n" ); |
243 | Printf("FATAL: Found %zd - Supported 44, 46, and 47\n" , vmaSize); |
244 | Die(); |
245 | } |
246 | # else |
247 | if (vmaSize != 46 && vmaSize != 47) { |
248 | Printf("FATAL: ThreadSanitizer: unsupported VMA range\n" ); |
249 | Printf("FATAL: Found %zd - Supported 46, and 47\n" , vmaSize); |
250 | Die(); |
251 | } |
252 | # endif |
253 | #elif defined(__mips64) |
254 | # if !SANITIZER_GO |
255 | if (vmaSize != 40) { |
256 | Printf("FATAL: ThreadSanitizer: unsupported VMA range\n" ); |
257 | Printf("FATAL: Found %zd - Supported 40\n" , vmaSize); |
258 | Die(); |
259 | } |
260 | # else |
261 | if (vmaSize != 47) { |
262 | Printf("FATAL: ThreadSanitizer: unsupported VMA range\n" ); |
263 | Printf("FATAL: Found %zd - Supported 47\n" , vmaSize); |
264 | Die(); |
265 | } |
266 | # endif |
267 | #endif |
268 | #endif |
269 | } |
270 | |
271 | void InitializePlatform() { |
272 | DisableCoreDumperIfNecessary(); |
273 | |
274 | // Go maps shadow memory lazily and works fine with limited address space. |
275 | // Unlimited stack is not a problem as well, because the executable |
276 | // is not compiled with -pie. |
277 | #if !SANITIZER_GO |
278 | { |
279 | bool reexec = false; |
280 | // TSan doesn't play well with unlimited stack size (as stack |
281 | // overlaps with shadow memory). If we detect unlimited stack size, |
282 | // we re-exec the program with limited stack size as a best effort. |
283 | if (StackSizeIsUnlimited()) { |
284 | const uptr kMaxStackSize = 32 * 1024 * 1024; |
285 | VReport(1, "Program is run with unlimited stack size, which wouldn't " |
286 | "work with ThreadSanitizer.\n" |
287 | "Re-execing with stack size limited to %zd bytes.\n" , |
288 | kMaxStackSize); |
289 | SetStackSizeLimitInBytes(kMaxStackSize); |
290 | reexec = true; |
291 | } |
292 | |
293 | if (!AddressSpaceIsUnlimited()) { |
294 | Report("WARNING: Program is run with limited virtual address space," |
295 | " which wouldn't work with ThreadSanitizer.\n" ); |
296 | Report("Re-execing with unlimited virtual address space.\n" ); |
297 | SetAddressSpaceUnlimited(); |
298 | reexec = true; |
299 | } |
300 | #if SANITIZER_LINUX && defined(__aarch64__) |
301 | // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in |
302 | // linux kernel, the random gap between stack and mapped area is increased |
303 | // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover |
304 | // this big range, we should disable randomized virtual space on aarch64. |
305 | int old_personality = personality(0xffffffff); |
306 | if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) { |
307 | VReport(1, "WARNING: Program is run with randomized virtual address " |
308 | "space, which wouldn't work with ThreadSanitizer.\n" |
309 | "Re-execing with fixed virtual address space.\n" ); |
310 | CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); |
311 | reexec = true; |
312 | } |
313 | // Initialize the xor key used in {sig}{set,long}jump. |
314 | InitializeLongjmpXorKey(); |
315 | #endif |
316 | if (reexec) |
317 | ReExec(); |
318 | } |
319 | |
320 | CheckAndProtect(); |
321 | InitTlsSize(); |
322 | #endif // !SANITIZER_GO |
323 | } |
324 | |
325 | #if !SANITIZER_GO |
326 | // Extract file descriptors passed to glibc internal __res_iclose function. |
327 | // This is required to properly "close" the fds, because we do not see internal |
328 | // closes within glibc. The code is a pure hack. |
329 | int (void *state, int *fds, int nfd) { |
330 | #if SANITIZER_LINUX && !SANITIZER_ANDROID |
331 | int cnt = 0; |
332 | struct __res_state *statp = (struct __res_state*)state; |
333 | for (int i = 0; i < MAXNS && cnt < nfd; i++) { |
334 | if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1) |
335 | fds[cnt++] = statp->_u._ext.nssocks[i]; |
336 | } |
337 | return cnt; |
338 | #else |
339 | return 0; |
340 | #endif |
341 | } |
342 | |
343 | // Extract file descriptors passed via UNIX domain sockets. |
344 | // This is requried to properly handle "open" of these fds. |
345 | // see 'man recvmsg' and 'man 3 cmsg'. |
346 | int (void *msgp, int *fds, int nfd) { |
347 | int res = 0; |
348 | msghdr *msg = (msghdr*)msgp; |
349 | struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); |
350 | for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { |
351 | if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) |
352 | continue; |
353 | int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]); |
354 | for (int i = 0; i < n; i++) { |
355 | fds[res++] = ((int*)CMSG_DATA(cmsg))[i]; |
356 | if (res == nfd) |
357 | return res; |
358 | } |
359 | } |
360 | return res; |
361 | } |
362 | |
363 | // Reverse operation of libc stack pointer mangling |
364 | static uptr UnmangleLongJmpSp(uptr mangled_sp) { |
365 | #if defined(__x86_64__) |
366 | # if SANITIZER_LINUX |
367 | // Reverse of: |
368 | // xor %fs:0x30, %rsi |
369 | // rol $0x11, %rsi |
370 | uptr sp; |
371 | asm("ror $0x11, %0 \n" |
372 | "xor %%fs:0x30, %0 \n" |
373 | : "=r" (sp) |
374 | : "0" (mangled_sp)); |
375 | return sp; |
376 | # else |
377 | return mangled_sp; |
378 | # endif |
379 | #elif defined(__aarch64__) |
380 | # if SANITIZER_LINUX |
381 | return mangled_sp ^ longjmp_xor_key; |
382 | # else |
383 | return mangled_sp; |
384 | # endif |
385 | #elif defined(__powerpc64__) |
386 | // Reverse of: |
387 | // ld r4, -28696(r13) |
388 | // xor r4, r3, r4 |
389 | uptr xor_key; |
390 | asm("ld %0, -28696(%%r13)" : "=r" (xor_key)); |
391 | return mangled_sp ^ xor_key; |
392 | #elif defined(__mips__) |
393 | return mangled_sp; |
394 | #else |
395 | #error "Unknown platform" |
396 | #endif |
397 | } |
398 | |
399 | #if SANITIZER_NETBSD |
400 | # ifdef __x86_64__ |
401 | # define LONG_JMP_SP_ENV_SLOT 6 |
402 | # else |
403 | # error unsupported |
404 | # endif |
405 | #elif defined(__powerpc__) |
406 | # define LONG_JMP_SP_ENV_SLOT 0 |
407 | #elif SANITIZER_FREEBSD |
408 | # define LONG_JMP_SP_ENV_SLOT 2 |
409 | #elif SANITIZER_LINUX |
410 | # ifdef __aarch64__ |
411 | # define LONG_JMP_SP_ENV_SLOT 13 |
412 | # elif defined(__mips64) |
413 | # define LONG_JMP_SP_ENV_SLOT 1 |
414 | # else |
415 | # define LONG_JMP_SP_ENV_SLOT 6 |
416 | # endif |
417 | #endif |
418 | |
419 | uptr (uptr *env) { |
420 | uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT]; |
421 | return UnmangleLongJmpSp(mangled_sp); |
422 | } |
423 | |
424 | #if INIT_LONGJMP_XOR_KEY |
425 | // GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp |
426 | // functions) by XORing them with a random key. For AArch64 it is a global |
427 | // variable rather than a TCB one (as for x86_64/powerpc). We obtain the key by |
428 | // issuing a setjmp and XORing the SP pointer values to derive the key. |
429 | static void InitializeLongjmpXorKey() { |
430 | // 1. Call REAL(setjmp), which stores the mangled SP in env. |
431 | jmp_buf env; |
432 | REAL(_setjmp)(env); |
433 | |
434 | // 2. Retrieve vanilla/mangled SP. |
435 | uptr sp; |
436 | asm("mov %0, sp" : "=r" (sp)); |
437 | uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT]; |
438 | |
439 | // 3. xor SPs to obtain key. |
440 | longjmp_xor_key = mangled_sp ^ sp; |
441 | } |
442 | #endif |
443 | |
444 | void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) { |
445 | // Check that the thr object is in tls; |
446 | const uptr thr_beg = (uptr)thr; |
447 | const uptr thr_end = (uptr)thr + sizeof(*thr); |
448 | CHECK_GE(thr_beg, tls_addr); |
449 | CHECK_LE(thr_beg, tls_addr + tls_size); |
450 | CHECK_GE(thr_end, tls_addr); |
451 | CHECK_LE(thr_end, tls_addr + tls_size); |
452 | // Since the thr object is huge, skip it. |
453 | MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_beg - tls_addr); |
454 | MemoryRangeImitateWrite(thr, /*pc=*/2, thr_end, |
455 | tls_addr + tls_size - thr_end); |
456 | } |
457 | |
458 | // Note: this function runs with async signals enabled, |
459 | // so it must not touch any tsan state. |
460 | int call_pthread_cancel_with_cleanup(int (*fn)(void *arg), |
461 | void (*cleanup)(void *arg), void *arg) { |
462 | // pthread_cleanup_push/pop are hardcore macros mess. |
463 | // We can't intercept nor call them w/o including pthread.h. |
464 | int res; |
465 | pthread_cleanup_push(cleanup, arg); |
466 | res = fn(arg); |
467 | pthread_cleanup_pop(0); |
468 | return res; |
469 | } |
470 | #endif // !SANITIZER_GO |
471 | |
472 | #if !SANITIZER_GO |
473 | void ReplaceSystemMalloc() { } |
474 | #endif |
475 | |
476 | #if !SANITIZER_GO |
477 | #if SANITIZER_ANDROID |
478 | // On Android, one thread can call intercepted functions after |
479 | // DestroyThreadState(), so add a fake thread state for "dead" threads. |
480 | static ThreadState *dead_thread_state = nullptr; |
481 | |
482 | ThreadState *cur_thread() { |
483 | ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); |
484 | if (thr == nullptr) { |
485 | __sanitizer_sigset_t emptyset; |
486 | internal_sigfillset(&emptyset); |
487 | __sanitizer_sigset_t oldset; |
488 | CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); |
489 | thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); |
490 | if (thr == nullptr) { |
491 | thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState), |
492 | "ThreadState" )); |
493 | *get_android_tls_ptr() = reinterpret_cast<uptr>(thr); |
494 | if (dead_thread_state == nullptr) { |
495 | dead_thread_state = reinterpret_cast<ThreadState*>( |
496 | MmapOrDie(sizeof(ThreadState), "ThreadState" )); |
497 | dead_thread_state->fast_state.SetIgnoreBit(); |
498 | dead_thread_state->ignore_interceptors = 1; |
499 | dead_thread_state->is_dead = true; |
500 | *const_cast<u32*>(&dead_thread_state->tid) = -1; |
501 | CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState), |
502 | PROT_READ)); |
503 | } |
504 | } |
505 | CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); |
506 | } |
507 | return thr; |
508 | } |
509 | |
510 | void set_cur_thread(ThreadState *thr) { |
511 | *get_android_tls_ptr() = reinterpret_cast<uptr>(thr); |
512 | } |
513 | |
514 | void cur_thread_finalize() { |
515 | __sanitizer_sigset_t emptyset; |
516 | internal_sigfillset(&emptyset); |
517 | __sanitizer_sigset_t oldset; |
518 | CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); |
519 | ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); |
520 | if (thr != dead_thread_state) { |
521 | *get_android_tls_ptr() = reinterpret_cast<uptr>(dead_thread_state); |
522 | UnmapOrDie(thr, sizeof(ThreadState)); |
523 | } |
524 | CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); |
525 | } |
526 | #endif // SANITIZER_ANDROID |
527 | #endif // if !SANITIZER_GO |
528 | |
529 | } // namespace __tsan |
530 | |
531 | #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD |
532 | |