1//===-- tsan_mman.cpp -----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12#include "sanitizer_common/sanitizer_allocator_checks.h"
13#include "sanitizer_common/sanitizer_allocator_interface.h"
14#include "sanitizer_common/sanitizer_allocator_report.h"
15#include "sanitizer_common/sanitizer_common.h"
16#include "sanitizer_common/sanitizer_errno.h"
17#include "sanitizer_common/sanitizer_placement_new.h"
18#include "tsan_interface.h"
19#include "tsan_mman.h"
20#include "tsan_rtl.h"
21#include "tsan_report.h"
22#include "tsan_flags.h"
23
24namespace __tsan {
25
26struct MapUnmapCallback {
27 void OnMap(uptr p, uptr size) const { }
28 void OnMapSecondary(uptr p, uptr size, uptr user_begin,
29 uptr user_size) const {};
30 void OnUnmap(uptr p, uptr size) const {
31 // We are about to unmap a chunk of user memory.
32 // Mark the corresponding shadow memory as not needed.
33 DontNeedShadowFor(addr: p, size);
34 // Mark the corresponding meta shadow memory as not needed.
35 // Note the block does not contain any meta info at this point
36 // (this happens after free).
37 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
38 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
39 // Block came from LargeMmapAllocator, so must be large.
40 // We rely on this in the calculations below.
41 CHECK_GE(size, 2 * kPageSize);
42 uptr diff = RoundUp(p, align: kPageSize) - p;
43 if (diff != 0) {
44 p += diff;
45 size -= diff;
46 }
47 diff = p + size - RoundDown(p: p + size, align: kPageSize);
48 if (diff != 0)
49 size -= diff;
50 uptr p_meta = (uptr)MemToMeta(x: p);
51 ReleaseMemoryPagesToOS(beg: p_meta, end: p_meta + size / kMetaRatio);
52 }
53};
54
55static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
56Allocator *allocator() {
57 return reinterpret_cast<Allocator*>(&allocator_placeholder);
58}
59
60struct GlobalProc {
61 Mutex mtx;
62 Processor *proc;
63 // This mutex represents the internal allocator combined for
64 // the purposes of deadlock detection. The internal allocator
65 // uses multiple mutexes, moreover they are locked only occasionally
66 // and they are spin mutexes which don't support deadlock detection.
67 // So we use this fake mutex to serve as a substitute for these mutexes.
68 CheckedMutex internal_alloc_mtx;
69
70 GlobalProc()
71 : mtx(MutexTypeGlobalProc),
72 proc(ProcCreate()),
73 internal_alloc_mtx(MutexTypeInternalAlloc) {}
74};
75
76static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
77GlobalProc *global_proc() {
78 return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
79}
80
81static void InternalAllocAccess() {
82 global_proc()->internal_alloc_mtx.Lock();
83 global_proc()->internal_alloc_mtx.Unlock();
84}
85
86ScopedGlobalProcessor::ScopedGlobalProcessor() {
87 GlobalProc *gp = global_proc();
88 ThreadState *thr = cur_thread();
89 if (thr->proc())
90 return;
91 // If we don't have a proc, use the global one.
92 // There are currently only two known case where this path is triggered:
93 // __interceptor_free
94 // __nptl_deallocate_tsd
95 // start_thread
96 // clone
97 // and:
98 // ResetRange
99 // __interceptor_munmap
100 // __deallocate_stack
101 // start_thread
102 // clone
103 // Ideally, we destroy thread state (and unwire proc) when a thread actually
104 // exits (i.e. when we join/wait it). Then we would not need the global proc
105 gp->mtx.Lock();
106 ProcWire(proc: gp->proc, thr);
107}
108
109ScopedGlobalProcessor::~ScopedGlobalProcessor() {
110 GlobalProc *gp = global_proc();
111 ThreadState *thr = cur_thread();
112 if (thr->proc() != gp->proc)
113 return;
114 ProcUnwire(proc: gp->proc, thr);
115 gp->mtx.Unlock();
116}
117
118void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
119 global_proc()->internal_alloc_mtx.Lock();
120 InternalAllocatorLock();
121}
122
123void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
124 InternalAllocatorUnlock();
125 global_proc()->internal_alloc_mtx.Unlock();
126}
127
128void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
129 global_proc()->mtx.Lock();
130}
131
132void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
133 global_proc()->mtx.Unlock();
134}
135
136static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
137static uptr max_user_defined_malloc_size;
138
139void InitializeAllocator() {
140 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
141 allocator()->Init(release_to_os_interval_ms: common_flags()->allocator_release_to_os_interval_ms);
142 max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
143 ? common_flags()->max_allocation_size_mb
144 << 20
145 : kMaxAllowedMallocSize;
146}
147
148void InitializeAllocatorLate() {
149 new(global_proc()) GlobalProc();
150}
151
152void AllocatorProcStart(Processor *proc) {
153 allocator()->InitCache(cache: &proc->alloc_cache);
154 internal_allocator()->InitCache(cache: &proc->internal_alloc_cache);
155}
156
157void AllocatorProcFinish(Processor *proc) {
158 allocator()->DestroyCache(cache: &proc->alloc_cache);
159 internal_allocator()->DestroyCache(cache: &proc->internal_alloc_cache);
160}
161
162void AllocatorPrintStats() {
163 allocator()->PrintStats();
164}
165
166static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
167 if (atomic_load_relaxed(a: &thr->in_signal_handler) == 0 ||
168 !ShouldReport(thr, typ: ReportTypeSignalUnsafe))
169 return;
170 VarSizeStackTrace stack;
171 ObtainCurrentStack(thr, toppc: pc, stack: &stack);
172 if (IsFiredSuppression(ctx, type: ReportTypeSignalUnsafe, trace: stack))
173 return;
174 ThreadRegistryLock l(&ctx->thread_registry);
175 ScopedReport rep(ReportTypeSignalUnsafe);
176 rep.AddStack(stack, suppressable: true);
177 OutputReport(thr, srep: rep);
178}
179
180
181void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
182 bool signal) {
183 if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
184 sz > max_user_defined_malloc_size) {
185 if (AllocatorMayReturnNull())
186 return nullptr;
187 uptr malloc_limit =
188 Min(a: kMaxAllowedMallocSize, b: max_user_defined_malloc_size);
189 GET_STACK_TRACE_FATAL(thr, pc);
190 ReportAllocationSizeTooBig(user_size: sz, max_size: malloc_limit, stack: &stack);
191 }
192 if (UNLIKELY(IsRssLimitExceeded())) {
193 if (AllocatorMayReturnNull())
194 return nullptr;
195 GET_STACK_TRACE_FATAL(thr, pc);
196 ReportRssLimitExceeded(stack: &stack);
197 }
198 void *p = allocator()->Allocate(cache: &thr->proc()->alloc_cache, size: sz, alignment: align);
199 if (UNLIKELY(!p)) {
200 SetAllocatorOutOfMemory();
201 if (AllocatorMayReturnNull())
202 return nullptr;
203 GET_STACK_TRACE_FATAL(thr, pc);
204 ReportOutOfMemory(requested_size: sz, stack: &stack);
205 }
206 if (ctx && ctx->initialized)
207 OnUserAlloc(thr, pc, p: (uptr)p, sz, write: true);
208 if (signal)
209 SignalUnsafeCall(thr, pc);
210 return p;
211}
212
213void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
214 ScopedGlobalProcessor sgp;
215 if (ctx && ctx->initialized)
216 OnUserFree(thr, pc, p: (uptr)p, write: true);
217 allocator()->Deallocate(cache: &thr->proc()->alloc_cache, p);
218 if (signal)
219 SignalUnsafeCall(thr, pc);
220}
221
222void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
223 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align: kDefaultAlignment));
224}
225
226void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
227 if (UNLIKELY(CheckForCallocOverflow(size, n))) {
228 if (AllocatorMayReturnNull())
229 return SetErrnoOnNull(nullptr);
230 GET_STACK_TRACE_FATAL(thr, pc);
231 ReportCallocOverflow(count: n, size, stack: &stack);
232 }
233 void *p = user_alloc_internal(thr, pc, sz: n * size);
234 if (p)
235 internal_memset(s: p, c: 0, n: n * size);
236 return SetErrnoOnNull(p);
237}
238
239void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
240 if (UNLIKELY(CheckForCallocOverflow(size, n))) {
241 if (AllocatorMayReturnNull())
242 return SetErrnoOnNull(nullptr);
243 GET_STACK_TRACE_FATAL(thr, pc);
244 ReportReallocArrayOverflow(count: size, size: n, stack: &stack);
245 }
246 return user_realloc(thr, pc, p, sz: size * n);
247}
248
249void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
250 DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
251 // Note: this can run before thread initialization/after finalization.
252 // As a result this is not necessarily synchronized with DoReset,
253 // which iterates over and resets all sync objects,
254 // but it is fine to create new MBlocks in this context.
255 ctx->metamap.AllocBlock(thr, pc, p, sz);
256 // If this runs before thread initialization/after finalization
257 // and we don't have trace initialized, we can't imitate writes.
258 // In such case just reset the shadow range, it is fine since
259 // it affects only a small fraction of special objects.
260 if (write && thr->ignore_reads_and_writes == 0 &&
261 atomic_load_relaxed(a: &thr->trace_pos))
262 MemoryRangeImitateWrite(thr, pc, addr: (uptr)p, size: sz);
263 else
264 MemoryResetRange(thr, pc, addr: (uptr)p, size: sz);
265}
266
267void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
268 CHECK_NE(p, (void*)0);
269 if (!thr->slot) {
270 // Very early/late in thread lifetime, or during fork.
271 UNUSED uptr sz = ctx->metamap.FreeBlock(proc: thr->proc(), p, reset: false);
272 DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
273 return;
274 }
275 SlotLocker locker(thr);
276 uptr sz = ctx->metamap.FreeBlock(proc: thr->proc(), p, reset: true);
277 DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
278 if (write && thr->ignore_reads_and_writes == 0)
279 MemoryRangeFreed(thr, pc, addr: (uptr)p, size: sz);
280}
281
282void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
283 // FIXME: Handle "shrinking" more efficiently,
284 // it seems that some software actually does this.
285 if (!p)
286 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
287 if (!sz) {
288 user_free(thr, pc, p);
289 return nullptr;
290 }
291 void *new_p = user_alloc_internal(thr, pc, sz);
292 if (new_p) {
293 uptr old_sz = user_alloc_usable_size(p);
294 internal_memcpy(dest: new_p, src: p, n: min(a: old_sz, b: sz));
295 user_free(thr, pc, p);
296 }
297 return SetErrnoOnNull(new_p);
298}
299
300void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
301 if (UNLIKELY(!IsPowerOfTwo(align))) {
302 errno = errno_EINVAL;
303 if (AllocatorMayReturnNull())
304 return nullptr;
305 GET_STACK_TRACE_FATAL(thr, pc);
306 ReportInvalidAllocationAlignment(alignment: align, stack: &stack);
307 }
308 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
309}
310
311int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
312 uptr sz) {
313 if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
314 if (AllocatorMayReturnNull())
315 return errno_EINVAL;
316 GET_STACK_TRACE_FATAL(thr, pc);
317 ReportInvalidPosixMemalignAlignment(alignment: align, stack: &stack);
318 }
319 void *ptr = user_alloc_internal(thr, pc, sz, align);
320 if (UNLIKELY(!ptr))
321 // OOM error is already taken care of by user_alloc_internal.
322 return errno_ENOMEM;
323 CHECK(IsAligned((uptr)ptr, align));
324 *memptr = ptr;
325 return 0;
326}
327
328void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
329 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
330 errno = errno_EINVAL;
331 if (AllocatorMayReturnNull())
332 return nullptr;
333 GET_STACK_TRACE_FATAL(thr, pc);
334 ReportInvalidAlignedAllocAlignment(size: sz, alignment: align, stack: &stack);
335 }
336 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
337}
338
339void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
340 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align: GetPageSizeCached()));
341}
342
343void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
344 uptr PageSize = GetPageSizeCached();
345 if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
346 errno = errno_ENOMEM;
347 if (AllocatorMayReturnNull())
348 return nullptr;
349 GET_STACK_TRACE_FATAL(thr, pc);
350 ReportPvallocOverflow(size: sz, stack: &stack);
351 }
352 // pvalloc(0) should allocate one page.
353 sz = sz ? RoundUpTo(size: sz, boundary: PageSize) : PageSize;
354 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align: PageSize));
355}
356
357static const void *user_alloc_begin(const void *p) {
358 if (p == nullptr || !IsAppMem(mem: (uptr)p))
359 return nullptr;
360 void *beg = allocator()->GetBlockBegin(p);
361 if (!beg)
362 return nullptr;
363
364 MBlock *b = ctx->metamap.GetBlock(p: (uptr)beg);
365 if (!b)
366 return nullptr; // Not a valid pointer.
367
368 return (const void *)beg;
369}
370
371uptr user_alloc_usable_size(const void *p) {
372 if (p == 0 || !IsAppMem(mem: (uptr)p))
373 return 0;
374 MBlock *b = ctx->metamap.GetBlock(p: (uptr)p);
375 if (!b)
376 return 0; // Not a valid pointer.
377 if (b->siz == 0)
378 return 1; // Zero-sized allocations are actually 1 byte.
379 return b->siz;
380}
381
382uptr user_alloc_usable_size_fast(const void *p) {
383 MBlock *b = ctx->metamap.GetBlock(p: (uptr)p);
384 // Static objects may have malloc'd before tsan completes
385 // initialization, and may believe returned ptrs to be valid.
386 if (!b)
387 return 0; // Not a valid pointer.
388 if (b->siz == 0)
389 return 1; // Zero-sized allocations are actually 1 byte.
390 return b->siz;
391}
392
393void invoke_malloc_hook(void *ptr, uptr size) {
394 ThreadState *thr = cur_thread();
395 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
396 return;
397 RunMallocHooks(ptr, size);
398}
399
400void invoke_free_hook(void *ptr) {
401 ThreadState *thr = cur_thread();
402 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
403 return;
404 RunFreeHooks(ptr);
405}
406
407void *Alloc(uptr sz) {
408 ThreadState *thr = cur_thread();
409 if (thr->nomalloc) {
410 thr->nomalloc = 0; // CHECK calls internal_malloc().
411 CHECK(0);
412 }
413 InternalAllocAccess();
414 return InternalAlloc(size: sz, cache: &thr->proc()->internal_alloc_cache);
415}
416
417void FreeImpl(void *p) {
418 ThreadState *thr = cur_thread();
419 if (thr->nomalloc) {
420 thr->nomalloc = 0; // CHECK calls internal_malloc().
421 CHECK(0);
422 }
423 InternalAllocAccess();
424 InternalFree(p, cache: &thr->proc()->internal_alloc_cache);
425}
426
427} // namespace __tsan
428
429using namespace __tsan;
430
431extern "C" {
432uptr __sanitizer_get_current_allocated_bytes() {
433 uptr stats[AllocatorStatCount];
434 allocator()->GetStats(s: stats);
435 return stats[AllocatorStatAllocated];
436}
437
438uptr __sanitizer_get_heap_size() {
439 uptr stats[AllocatorStatCount];
440 allocator()->GetStats(s: stats);
441 return stats[AllocatorStatMapped];
442}
443
444uptr __sanitizer_get_free_bytes() {
445 return 1;
446}
447
448uptr __sanitizer_get_unmapped_bytes() {
449 return 1;
450}
451
452uptr __sanitizer_get_estimated_allocated_size(uptr size) {
453 return size;
454}
455
456int __sanitizer_get_ownership(const void *p) {
457 return allocator()->GetBlockBegin(p) != 0;
458}
459
460const void *__sanitizer_get_allocated_begin(const void *p) {
461 return user_alloc_begin(p);
462}
463
464uptr __sanitizer_get_allocated_size(const void *p) {
465 return user_alloc_usable_size(p);
466}
467
468uptr __sanitizer_get_allocated_size_fast(const void *p) {
469 DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
470 uptr ret = user_alloc_usable_size_fast(p);
471 DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
472 return ret;
473}
474
475void __sanitizer_purge_allocator() {
476 allocator()->ForceReleaseToOS();
477}
478
479void __tsan_on_thread_idle() {
480 ThreadState *thr = cur_thread();
481 allocator()->SwallowCache(cache: &thr->proc()->alloc_cache);
482 internal_allocator()->SwallowCache(cache: &thr->proc()->internal_alloc_cache);
483 ctx->metamap.OnProcIdle(proc: thr->proc());
484}
485} // extern "C"
486

source code of compiler-rt/lib/tsan/rtl/tsan_mman.cpp