1//===-- asan_allocator.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// Implementation of ASan's memory allocator, 2-nd version.
12// This variant uses the allocator from sanitizer_common, i.e. the one shared
13// with ThreadSanitizer and MemorySanitizer.
14//
15//===----------------------------------------------------------------------===//
16
17#include "asan_allocator.h"
18
19#include "asan_internal.h"
20#include "asan_mapping.h"
21#include "asan_poisoning.h"
22#include "asan_report.h"
23#include "asan_stack.h"
24#include "asan_thread.h"
25#include "lsan/lsan_common.h"
26#include "sanitizer_common/sanitizer_allocator_checks.h"
27#include "sanitizer_common/sanitizer_allocator_interface.h"
28#include "sanitizer_common/sanitizer_common.h"
29#include "sanitizer_common/sanitizer_errno.h"
30#include "sanitizer_common/sanitizer_flags.h"
31#include "sanitizer_common/sanitizer_internal_defs.h"
32#include "sanitizer_common/sanitizer_list.h"
33#include "sanitizer_common/sanitizer_quarantine.h"
34#include "sanitizer_common/sanitizer_stackdepot.h"
35
36namespace __asan {
37
38// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
39// We use adaptive redzones: for larger allocation larger redzones are used.
40static u32 RZLog2Size(u32 rz_log) {
41 CHECK_LT(rz_log, 8);
42 return 16 << rz_log;
43}
44
45static u32 RZSize2Log(u32 rz_size) {
46 CHECK_GE(rz_size, 16);
47 CHECK_LE(rz_size, 2048);
48 CHECK(IsPowerOfTwo(rz_size));
49 u32 res = Log2(x: rz_size) - 4;
50 CHECK_EQ(rz_size, RZLog2Size(res));
51 return res;
52}
53
54static AsanAllocator &get_allocator();
55
56static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
57 u32 tid, u32 stack) {
58 u64 context = tid;
59 context <<= 32;
60 context += stack;
61 atomic_store(a: atomic_context, v: context, mo: memory_order_relaxed);
62}
63
64static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
65 u32 &tid, u32 &stack) {
66 u64 context = atomic_load(a: atomic_context, mo: memory_order_relaxed);
67 stack = context;
68 context >>= 32;
69 tid = context;
70}
71
72// The memory chunk allocated from the underlying allocator looks like this:
73// L L L L L L H H U U U U U U R R
74// L -- left redzone words (0 or more bytes)
75// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
76// U -- user memory.
77// R -- right redzone (0 or more bytes)
78// ChunkBase consists of ChunkHeader and other bytes that overlap with user
79// memory.
80
81// If the left redzone is greater than the ChunkHeader size we store a magic
82// value in the first uptr word of the memory block and store the address of
83// ChunkBase in the next uptr.
84// M B L L L L L L L L L H H U U U U U U
85// | ^
86// ---------------------|
87// M -- magic value kAllocBegMagic
88// B -- address of ChunkHeader pointing to the first 'H'
89
90class ChunkHeader {
91 public:
92 atomic_uint8_t chunk_state;
93 u8 alloc_type : 2;
94 u8 lsan_tag : 2;
95
96 // align < 8 -> 0
97 // else -> log2(min(align, 512)) - 2
98 u8 user_requested_alignment_log : 3;
99
100 private:
101 u16 user_requested_size_hi;
102 u32 user_requested_size_lo;
103 atomic_uint64_t alloc_context_id;
104
105 public:
106 uptr UsedSize() const {
107 static_assert(sizeof(user_requested_size_lo) == 4,
108 "Expression below requires this");
109 return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) +
110 user_requested_size_lo;
111 }
112
113 void SetUsedSize(uptr size) {
114 user_requested_size_lo = size;
115 static_assert(sizeof(user_requested_size_lo) == 4,
116 "Expression below requires this");
117 user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32);
118 CHECK_EQ(UsedSize(), size);
119 }
120
121 void SetAllocContext(u32 tid, u32 stack) {
122 AtomicContextStore(atomic_context: &alloc_context_id, tid, stack);
123 }
124
125 void GetAllocContext(u32 &tid, u32 &stack) const {
126 AtomicContextLoad(atomic_context: &alloc_context_id, tid, stack);
127 }
128};
129
130class ChunkBase : public ChunkHeader {
131 atomic_uint64_t free_context_id;
132
133 public:
134 void SetFreeContext(u32 tid, u32 stack) {
135 AtomicContextStore(atomic_context: &free_context_id, tid, stack);
136 }
137
138 void GetFreeContext(u32 &tid, u32 &stack) const {
139 AtomicContextLoad(atomic_context: &free_context_id, tid, stack);
140 }
141};
142
143static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
144static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
145COMPILER_CHECK(kChunkHeaderSize == 16);
146COMPILER_CHECK(kChunkHeader2Size <= 16);
147
148enum {
149 // Either just allocated by underlying allocator, but AsanChunk is not yet
150 // ready, or almost returned to undelying allocator and AsanChunk is already
151 // meaningless.
152 CHUNK_INVALID = 0,
153 // The chunk is allocated and not yet freed.
154 CHUNK_ALLOCATED = 2,
155 // The chunk was freed and put into quarantine zone.
156 CHUNK_QUARANTINE = 3,
157};
158
159class AsanChunk : public ChunkBase {
160 public:
161 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
162 bool AddrIsInside(uptr addr) {
163 return (addr >= Beg()) && (addr < Beg() + UsedSize());
164 }
165};
166
167class LargeChunkHeader {
168 static constexpr uptr kAllocBegMagic =
169 FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
170 atomic_uintptr_t magic;
171 AsanChunk *chunk_header;
172
173 public:
174 AsanChunk *Get() const {
175 return atomic_load(a: &magic, mo: memory_order_acquire) == kAllocBegMagic
176 ? chunk_header
177 : nullptr;
178 }
179
180 void Set(AsanChunk *p) {
181 if (p) {
182 chunk_header = p;
183 atomic_store(a: &magic, v: kAllocBegMagic, mo: memory_order_release);
184 return;
185 }
186
187 uptr old = kAllocBegMagic;
188 if (!atomic_compare_exchange_strong(a: &magic, cmp: &old, xchg: 0,
189 mo: memory_order_release)) {
190 CHECK_EQ(old, kAllocBegMagic);
191 }
192 }
193};
194
195static void FillChunk(AsanChunk *m) {
196 // FIXME: Use ReleaseMemoryPagesToOS.
197 Flags &fl = *flags();
198
199 if (fl.max_free_fill_size > 0) {
200 // We have to skip the chunk header, it contains free_context_id.
201 uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
202 if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
203 uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
204 size_to_fill = Min(a: size_to_fill, b: (uptr)fl.max_free_fill_size);
205 REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
206 }
207 }
208}
209
210struct QuarantineCallback {
211 QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
212 : cache_(cache),
213 stack_(stack) {
214 }
215
216 void PreQuarantine(AsanChunk *m) const {
217 FillChunk(m);
218 // Poison the region.
219 PoisonShadow(addr: m->Beg(), size: RoundUpTo(size: m->UsedSize(), ASAN_SHADOW_GRANULARITY),
220 value: kAsanHeapFreeMagic);
221 }
222
223 void Recycle(AsanChunk *m) const {
224 void *p = get_allocator().GetBlockBegin(p: m);
225
226 // The secondary will immediately unpoison and unmap the memory, so this
227 // branch is unnecessary.
228 if (get_allocator().FromPrimary(p)) {
229 if (p != m) {
230 // Clear the magic value, as allocator internals may overwrite the
231 // contents of deallocated chunk, confusing GetAsanChunk lookup.
232 reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
233 }
234
235 u8 old_chunk_state = CHUNK_QUARANTINE;
236 if (!atomic_compare_exchange_strong(a: &m->chunk_state, cmp: &old_chunk_state,
237 xchg: CHUNK_INVALID,
238 mo: memory_order_acquire)) {
239 CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
240 }
241
242 PoisonShadow(addr: m->Beg(), size: RoundUpTo(size: m->UsedSize(), ASAN_SHADOW_GRANULARITY),
243 value: kAsanHeapLeftRedzoneMagic);
244 }
245
246 // Statistics.
247 AsanStats &thread_stats = GetCurrentThreadStats();
248 thread_stats.real_frees++;
249 thread_stats.really_freed += m->UsedSize();
250
251 get_allocator().Deallocate(cache: cache_, p);
252 }
253
254 void RecyclePassThrough(AsanChunk *m) const {
255 // Recycle for the secondary will immediately unpoison and unmap the
256 // memory, so quarantine preparation is unnecessary.
257 if (get_allocator().FromPrimary(p: m)) {
258 // The primary allocation may need pattern fill if enabled.
259 FillChunk(m);
260 }
261 Recycle(m);
262 }
263
264 void *Allocate(uptr size) const {
265 void *res = get_allocator().Allocate(cache: cache_, size, alignment: 1);
266 // TODO(alekseys): Consider making quarantine OOM-friendly.
267 if (UNLIKELY(!res))
268 ReportOutOfMemory(requested_size: size, stack: stack_);
269 return res;
270 }
271
272 void Deallocate(void *p) const { get_allocator().Deallocate(cache: cache_, p); }
273
274 private:
275 AllocatorCache* const cache_;
276 BufferedStackTrace* const stack_;
277};
278
279typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
280typedef AsanQuarantine::Cache QuarantineCache;
281
282void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
283 PoisonShadow(addr: p, size, value: kAsanHeapLeftRedzoneMagic);
284 // Statistics.
285 AsanStats &thread_stats = GetCurrentThreadStats();
286 thread_stats.mmaps++;
287 thread_stats.mmaped += size;
288}
289
290void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin,
291 uptr user_size) const {
292 uptr user_end = RoundDownTo(x: user_begin + user_size, ASAN_SHADOW_GRANULARITY);
293 user_begin = RoundUpTo(size: user_begin, ASAN_SHADOW_GRANULARITY);
294 // The secondary mapping will be immediately returned to user, no value
295 // poisoning that with non-zero just before unpoisoning by Allocate(). So just
296 // poison head/tail invisible to Allocate().
297 PoisonShadow(addr: p, size: user_begin - p, value: kAsanHeapLeftRedzoneMagic);
298 PoisonShadow(addr: user_end, size: size - (user_end - p), value: kAsanHeapLeftRedzoneMagic);
299 // Statistics.
300 AsanStats &thread_stats = GetCurrentThreadStats();
301 thread_stats.mmaps++;
302 thread_stats.mmaped += size;
303}
304
305void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
306 PoisonShadow(addr: p, size, value: 0);
307 // We are about to unmap a chunk of user memory.
308 // Mark the corresponding shadow memory as not needed.
309 FlushUnneededASanShadowMemory(p, size);
310 // Statistics.
311 AsanStats &thread_stats = GetCurrentThreadStats();
312 thread_stats.munmaps++;
313 thread_stats.munmaped += size;
314}
315
316// We can not use THREADLOCAL because it is not supported on some of the
317// platforms we care about (OSX 10.6, Android).
318// static THREADLOCAL AllocatorCache cache;
319AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
320 CHECK(ms);
321 return &ms->allocator_cache;
322}
323
324QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
325 CHECK(ms);
326 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
327 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
328}
329
330void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
331 quarantine_size_mb = f->quarantine_size_mb;
332 thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
333 min_redzone = f->redzone;
334 max_redzone = f->max_redzone;
335 may_return_null = cf->allocator_may_return_null;
336 alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
337 release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
338}
339
340void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
341 f->quarantine_size_mb = quarantine_size_mb;
342 f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
343 f->redzone = min_redzone;
344 f->max_redzone = max_redzone;
345 cf->allocator_may_return_null = may_return_null;
346 f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
347 cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
348}
349
350struct Allocator {
351 static const uptr kMaxAllowedMallocSize =
352 FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
353
354 AsanAllocator allocator;
355 AsanQuarantine quarantine;
356 StaticSpinMutex fallback_mutex;
357 AllocatorCache fallback_allocator_cache;
358 QuarantineCache fallback_quarantine_cache;
359
360 uptr max_user_defined_malloc_size;
361
362 // ------------------- Options --------------------------
363 atomic_uint16_t min_redzone;
364 atomic_uint16_t max_redzone;
365 atomic_uint8_t alloc_dealloc_mismatch;
366
367 // ------------------- Initialization ------------------------
368 explicit Allocator(LinkerInitialized)
369 : quarantine(LINKER_INITIALIZED),
370 fallback_quarantine_cache(LINKER_INITIALIZED) {}
371
372 void CheckOptions(const AllocatorOptions &options) const {
373 CHECK_GE(options.min_redzone, 16);
374 CHECK_GE(options.max_redzone, options.min_redzone);
375 CHECK_LE(options.max_redzone, 2048);
376 CHECK(IsPowerOfTwo(options.min_redzone));
377 CHECK(IsPowerOfTwo(options.max_redzone));
378 }
379
380 void SharedInitCode(const AllocatorOptions &options) {
381 CheckOptions(options);
382 quarantine.Init(size: (uptr)options.quarantine_size_mb << 20,
383 cache_size: (uptr)options.thread_local_quarantine_size_kb << 10);
384 atomic_store(a: &alloc_dealloc_mismatch, v: options.alloc_dealloc_mismatch,
385 mo: memory_order_release);
386 atomic_store(a: &min_redzone, v: options.min_redzone, mo: memory_order_release);
387 atomic_store(a: &max_redzone, v: options.max_redzone, mo: memory_order_release);
388 }
389
390 void InitLinkerInitialized(const AllocatorOptions &options) {
391 SetAllocatorMayReturnNull(options.may_return_null);
392 allocator.InitLinkerInitialized(release_to_os_interval_ms: options.release_to_os_interval_ms);
393 SharedInitCode(options);
394 max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
395 ? common_flags()->max_allocation_size_mb
396 << 20
397 : kMaxAllowedMallocSize;
398 }
399
400 void RePoisonChunk(uptr chunk) {
401 // This could be a user-facing chunk (with redzones), or some internal
402 // housekeeping chunk, like TransferBatch. Start by assuming the former.
403 AsanChunk *ac = GetAsanChunk(alloc_beg: (void *)chunk);
404 uptr allocated_size = allocator.GetActuallyAllocatedSize(p: (void *)chunk);
405 if (ac && atomic_load(a: &ac->chunk_state, mo: memory_order_acquire) ==
406 CHUNK_ALLOCATED) {
407 uptr beg = ac->Beg();
408 uptr end = ac->Beg() + ac->UsedSize();
409 uptr chunk_end = chunk + allocated_size;
410 if (chunk < beg && beg < end && end <= chunk_end) {
411 // Looks like a valid AsanChunk in use, poison redzones only.
412 PoisonShadow(addr: chunk, size: beg - chunk, value: kAsanHeapLeftRedzoneMagic);
413 uptr end_aligned_down = RoundDownTo(x: end, ASAN_SHADOW_GRANULARITY);
414 FastPoisonShadowPartialRightRedzone(
415 aligned_addr: end_aligned_down, size: end - end_aligned_down,
416 redzone_size: chunk_end - end_aligned_down, value: kAsanHeapLeftRedzoneMagic);
417 return;
418 }
419 }
420
421 // This is either not an AsanChunk or freed or quarantined AsanChunk.
422 // In either case, poison everything.
423 PoisonShadow(addr: chunk, size: allocated_size, value: kAsanHeapLeftRedzoneMagic);
424 }
425
426 void ReInitialize(const AllocatorOptions &options) {
427 SetAllocatorMayReturnNull(options.may_return_null);
428 allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
429 SharedInitCode(options);
430
431 // Poison all existing allocation's redzones.
432 if (CanPoisonMemory()) {
433 allocator.ForceLock();
434 allocator.ForEachChunk(
435 callback: [](uptr chunk, void *alloc) {
436 ((Allocator *)alloc)->RePoisonChunk(chunk);
437 },
438 arg: this);
439 allocator.ForceUnlock();
440 }
441 }
442
443 void GetOptions(AllocatorOptions *options) const {
444 options->quarantine_size_mb = quarantine.GetMaxSize() >> 20;
445 options->thread_local_quarantine_size_kb =
446 quarantine.GetMaxCacheSize() >> 10;
447 options->min_redzone = atomic_load(a: &min_redzone, mo: memory_order_acquire);
448 options->max_redzone = atomic_load(a: &max_redzone, mo: memory_order_acquire);
449 options->may_return_null = AllocatorMayReturnNull();
450 options->alloc_dealloc_mismatch =
451 atomic_load(a: &alloc_dealloc_mismatch, mo: memory_order_acquire);
452 options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
453 }
454
455 // -------------------- Helper methods. -------------------------
456 uptr ComputeRZLog(uptr user_requested_size) {
457 u32 rz_log = user_requested_size <= 64 - 16 ? 0
458 : user_requested_size <= 128 - 32 ? 1
459 : user_requested_size <= 512 - 64 ? 2
460 : user_requested_size <= 4096 - 128 ? 3
461 : user_requested_size <= (1 << 14) - 256 ? 4
462 : user_requested_size <= (1 << 15) - 512 ? 5
463 : user_requested_size <= (1 << 16) - 1024 ? 6
464 : 7;
465 u32 hdr_log = RZSize2Log(rz_size: RoundUpToPowerOfTwo(size: sizeof(ChunkHeader)));
466 u32 min_log = RZSize2Log(rz_size: atomic_load(a: &min_redzone, mo: memory_order_acquire));
467 u32 max_log = RZSize2Log(rz_size: atomic_load(a: &max_redzone, mo: memory_order_acquire));
468 return Min(a: Max(a: rz_log, b: Max(a: min_log, b: hdr_log)), b: Max(a: max_log, b: hdr_log));
469 }
470
471 static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
472 if (user_requested_alignment < 8)
473 return 0;
474 if (user_requested_alignment > 512)
475 user_requested_alignment = 512;
476 return Log2(x: user_requested_alignment) - 2;
477 }
478
479 static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
480 if (user_requested_alignment_log == 0)
481 return 0;
482 return 1LL << (user_requested_alignment_log + 2);
483 }
484
485 // We have an address between two chunks, and we want to report just one.
486 AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
487 AsanChunk *right_chunk) {
488 if (!left_chunk)
489 return right_chunk;
490 if (!right_chunk)
491 return left_chunk;
492 // Prefer an allocated chunk over freed chunk and freed chunk
493 // over available chunk.
494 u8 left_state = atomic_load(a: &left_chunk->chunk_state, mo: memory_order_relaxed);
495 u8 right_state =
496 atomic_load(a: &right_chunk->chunk_state, mo: memory_order_relaxed);
497 if (left_state != right_state) {
498 if (left_state == CHUNK_ALLOCATED)
499 return left_chunk;
500 if (right_state == CHUNK_ALLOCATED)
501 return right_chunk;
502 if (left_state == CHUNK_QUARANTINE)
503 return left_chunk;
504 if (right_state == CHUNK_QUARANTINE)
505 return right_chunk;
506 }
507 // Same chunk_state: choose based on offset.
508 sptr l_offset = 0, r_offset = 0;
509 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
510 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
511 if (l_offset < r_offset)
512 return left_chunk;
513 return right_chunk;
514 }
515
516 bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
517 AsanChunk *m = GetAsanChunkByAddr(p: addr);
518 if (!m) return false;
519 if (atomic_load(a: &m->chunk_state, mo: memory_order_acquire) != CHUNK_ALLOCATED)
520 return false;
521 if (m->Beg() != addr) return false;
522 AsanThread *t = GetCurrentThread();
523 m->SetAllocContext(tid: t ? t->tid() : kMainTid, stack: StackDepotPut(stack: *stack));
524 return true;
525 }
526
527 // -------------------- Allocation/Deallocation routines ---------------
528 void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
529 AllocType alloc_type, bool can_fill) {
530 if (UNLIKELY(!AsanInited()))
531 AsanInitFromRtl();
532 if (UNLIKELY(IsRssLimitExceeded())) {
533 if (AllocatorMayReturnNull())
534 return nullptr;
535 ReportRssLimitExceeded(stack);
536 }
537 Flags &fl = *flags();
538 CHECK(stack);
539 const uptr min_alignment = ASAN_SHADOW_GRANULARITY;
540 const uptr user_requested_alignment_log =
541 ComputeUserRequestedAlignmentLog(user_requested_alignment: alignment);
542 if (alignment < min_alignment)
543 alignment = min_alignment;
544 if (size == 0) {
545 // We'd be happy to avoid allocating memory for zero-size requests, but
546 // some programs/tests depend on this behavior and assume that malloc
547 // would not return NULL even for zero-size allocations. Moreover, it
548 // looks like operator new should never return NULL, and results of
549 // consecutive "new" calls must be different even if the allocated size
550 // is zero.
551 size = 1;
552 }
553 CHECK(IsPowerOfTwo(alignment));
554 uptr rz_log = ComputeRZLog(user_requested_size: size);
555 uptr rz_size = RZLog2Size(rz_log);
556 uptr rounded_size = RoundUpTo(size: Max(a: size, b: kChunkHeader2Size), boundary: alignment);
557 uptr needed_size = rounded_size + rz_size;
558 if (alignment > min_alignment)
559 needed_size += alignment;
560 bool from_primary = PrimaryAllocator::CanAllocate(size: needed_size, alignment);
561 // If we are allocating from the secondary allocator, there will be no
562 // automatic right redzone, so add the right redzone manually.
563 if (!from_primary)
564 needed_size += rz_size;
565 CHECK(IsAligned(needed_size, min_alignment));
566 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
567 size > max_user_defined_malloc_size) {
568 if (AllocatorMayReturnNull()) {
569 Report(format: "WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
570 size);
571 return nullptr;
572 }
573 uptr malloc_limit =
574 Min(a: kMaxAllowedMallocSize, b: max_user_defined_malloc_size);
575 ReportAllocationSizeTooBig(user_size: size, total_size: needed_size, max_size: malloc_limit, stack);
576 }
577
578 AsanThread *t = GetCurrentThread();
579 void *allocated;
580 if (t) {
581 AllocatorCache *cache = GetAllocatorCache(ms: &t->malloc_storage());
582 allocated = allocator.Allocate(cache, size: needed_size, alignment: 8);
583 } else {
584 SpinMutexLock l(&fallback_mutex);
585 AllocatorCache *cache = &fallback_allocator_cache;
586 allocated = allocator.Allocate(cache, size: needed_size, alignment: 8);
587 }
588 if (UNLIKELY(!allocated)) {
589 SetAllocatorOutOfMemory();
590 if (AllocatorMayReturnNull())
591 return nullptr;
592 ReportOutOfMemory(requested_size: size, stack);
593 }
594
595 uptr alloc_beg = reinterpret_cast<uptr>(allocated);
596 uptr alloc_end = alloc_beg + needed_size;
597 uptr user_beg = alloc_beg + rz_size;
598 if (!IsAligned(a: user_beg, alignment))
599 user_beg = RoundUpTo(size: user_beg, boundary: alignment);
600 uptr user_end = user_beg + size;
601 CHECK_LE(user_end, alloc_end);
602 uptr chunk_beg = user_beg - kChunkHeaderSize;
603 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
604 m->alloc_type = alloc_type;
605 CHECK(size);
606 m->SetUsedSize(size);
607 m->user_requested_alignment_log = user_requested_alignment_log;
608
609 m->SetAllocContext(tid: t ? t->tid() : kMainTid, stack: StackDepotPut(stack: *stack));
610
611 if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) {
612 // The allocator provides an unpoisoned chunk. This is possible for the
613 // secondary allocator, or if CanPoisonMemory() was false for some time,
614 // for example, due to flags()->start_disabled. Anyway, poison left and
615 // right of the block before using it for anything else.
616 uptr tail_beg = RoundUpTo(size: user_end, ASAN_SHADOW_GRANULARITY);
617 uptr tail_end = alloc_beg + allocator.GetActuallyAllocatedSize(p: allocated);
618 PoisonShadow(addr: alloc_beg, size: user_beg - alloc_beg, value: kAsanHeapLeftRedzoneMagic);
619 PoisonShadow(addr: tail_beg, size: tail_end - tail_beg, value: kAsanHeapLeftRedzoneMagic);
620 }
621
622 uptr size_rounded_down_to_granularity =
623 RoundDownTo(x: size, ASAN_SHADOW_GRANULARITY);
624 // Unpoison the bulk of the memory region.
625 if (size_rounded_down_to_granularity)
626 PoisonShadow(addr: user_beg, size: size_rounded_down_to_granularity, value: 0);
627 // Deal with the end of the region if size is not aligned to granularity.
628 if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
629 u8 *shadow =
630 (u8 *)MemToShadow(p: user_beg + size_rounded_down_to_granularity);
631 *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0;
632 }
633
634 AsanStats &thread_stats = GetCurrentThreadStats();
635 thread_stats.mallocs++;
636 thread_stats.malloced += size;
637 thread_stats.malloced_redzones += needed_size - size;
638 if (needed_size > SizeClassMap::kMaxSize)
639 thread_stats.malloc_large++;
640 else
641 thread_stats.malloced_by_size[SizeClassMap::ClassID(size: needed_size)]++;
642
643 void *res = reinterpret_cast<void *>(user_beg);
644 if (can_fill && fl.max_malloc_fill_size) {
645 uptr fill_size = Min(a: size, b: (uptr)fl.max_malloc_fill_size);
646 REAL(memset)(res, fl.malloc_fill_byte, fill_size);
647 }
648#if CAN_SANITIZE_LEAKS
649 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
650 : __lsan::kDirectlyLeaked;
651#endif
652 // Must be the last mutation of metadata in this function.
653 atomic_store(a: &m->chunk_state, v: CHUNK_ALLOCATED, mo: memory_order_release);
654 if (alloc_beg != chunk_beg) {
655 CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
656 reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
657 }
658 RunMallocHooks(ptr: res, size);
659 return res;
660 }
661
662 // Set quarantine flag if chunk is allocated, issue ASan error report on
663 // available and quarantined chunks. Return true on success, false otherwise.
664 bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
665 BufferedStackTrace *stack) {
666 u8 old_chunk_state = CHUNK_ALLOCATED;
667 // Flip the chunk_state atomically to avoid race on double-free.
668 if (!atomic_compare_exchange_strong(a: &m->chunk_state, cmp: &old_chunk_state,
669 xchg: CHUNK_QUARANTINE,
670 mo: memory_order_acquire)) {
671 ReportInvalidFree(ptr, chunk_state: old_chunk_state, stack);
672 // It's not safe to push a chunk in quarantine on invalid free.
673 return false;
674 }
675 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
676 // It was a user data.
677 m->SetFreeContext(tid: kInvalidTid, stack: 0);
678 return true;
679 }
680
681 // Expects the chunk to already be marked as quarantined by using
682 // AtomicallySetQuarantineFlagIfAllocated.
683 void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
684 CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
685 CHUNK_QUARANTINE);
686 AsanThread *t = GetCurrentThread();
687 m->SetFreeContext(tid: t ? t->tid() : 0, stack: StackDepotPut(stack: *stack));
688
689 // Push into quarantine.
690 if (t) {
691 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
692 AllocatorCache *ac = GetAllocatorCache(ms);
693 quarantine.Put(c: GetQuarantineCache(ms), cb: QuarantineCallback(ac, stack), ptr: m,
694 size: m->UsedSize());
695 } else {
696 SpinMutexLock l(&fallback_mutex);
697 AllocatorCache *ac = &fallback_allocator_cache;
698 quarantine.Put(c: &fallback_quarantine_cache, cb: QuarantineCallback(ac, stack),
699 ptr: m, size: m->UsedSize());
700 }
701 }
702
703 void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
704 BufferedStackTrace *stack, AllocType alloc_type) {
705 uptr p = reinterpret_cast<uptr>(ptr);
706 if (p == 0) return;
707
708 uptr chunk_beg = p - kChunkHeaderSize;
709 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
710
711 // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
712 // malloc. Don't report an invalid free in this case.
713 if (SANITIZER_WINDOWS &&
714 !get_allocator().PointerIsMine(p: ptr)) {
715 if (!IsSystemHeapAddress(addr: p))
716 ReportFreeNotMalloced(addr: p, free_stack: stack);
717 return;
718 }
719
720 RunFreeHooks(ptr);
721
722 // Must mark the chunk as quarantined before any changes to its metadata.
723 // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
724 if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
725
726 if (m->alloc_type != alloc_type) {
727 if (atomic_load(a: &alloc_dealloc_mismatch, mo: memory_order_acquire)) {
728 ReportAllocTypeMismatch(addr: (uptr)ptr, free_stack: stack, alloc_type: (AllocType)m->alloc_type,
729 dealloc_type: (AllocType)alloc_type);
730 }
731 } else {
732 if (flags()->new_delete_type_mismatch &&
733 (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
734 ((delete_size && delete_size != m->UsedSize()) ||
735 ComputeUserRequestedAlignmentLog(user_requested_alignment: delete_alignment) !=
736 m->user_requested_alignment_log)) {
737 ReportNewDeleteTypeMismatch(addr: p, delete_size, delete_alignment, free_stack: stack);
738 }
739 }
740
741 AsanStats &thread_stats = GetCurrentThreadStats();
742 thread_stats.frees++;
743 thread_stats.freed += m->UsedSize();
744
745 QuarantineChunk(m, ptr, stack);
746 }
747
748 void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
749 CHECK(old_ptr && new_size);
750 uptr p = reinterpret_cast<uptr>(old_ptr);
751 uptr chunk_beg = p - kChunkHeaderSize;
752 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
753
754 AsanStats &thread_stats = GetCurrentThreadStats();
755 thread_stats.reallocs++;
756 thread_stats.realloced += new_size;
757
758 void *new_ptr = Allocate(size: new_size, alignment: 8, stack, alloc_type: FROM_MALLOC, can_fill: true);
759 if (new_ptr) {
760 u8 chunk_state = atomic_load(a: &m->chunk_state, mo: memory_order_acquire);
761 if (chunk_state != CHUNK_ALLOCATED)
762 ReportInvalidFree(ptr: old_ptr, chunk_state, stack);
763 CHECK_NE(REAL(memcpy), nullptr);
764 uptr memcpy_size = Min(a: new_size, b: m->UsedSize());
765 // If realloc() races with free(), we may start copying freed memory.
766 // However, we will report racy double-free later anyway.
767 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
768 Deallocate(ptr: old_ptr, delete_size: 0, delete_alignment: 0, stack, alloc_type: FROM_MALLOC);
769 }
770 return new_ptr;
771 }
772
773 void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
774 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
775 if (AllocatorMayReturnNull())
776 return nullptr;
777 ReportCallocOverflow(count: nmemb, size, stack);
778 }
779 void *ptr = Allocate(size: nmemb * size, alignment: 8, stack, alloc_type: FROM_MALLOC, can_fill: false);
780 // If the memory comes from the secondary allocator no need to clear it
781 // as it comes directly from mmap.
782 if (ptr && allocator.FromPrimary(p: ptr))
783 REAL(memset)(ptr, 0, nmemb * size);
784 return ptr;
785 }
786
787 void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
788 if (chunk_state == CHUNK_QUARANTINE)
789 ReportDoubleFree(addr: (uptr)ptr, free_stack: stack);
790 else
791 ReportFreeNotMalloced(addr: (uptr)ptr, free_stack: stack);
792 }
793
794 void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
795 AllocatorCache *ac = GetAllocatorCache(ms);
796 quarantine.Drain(c: GetQuarantineCache(ms), cb: QuarantineCallback(ac, stack));
797 allocator.SwallowCache(cache: ac);
798 }
799
800 // -------------------------- Chunk lookup ----------------------
801
802 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
803 // Returns nullptr if AsanChunk is not yet initialized just after
804 // get_allocator().Allocate(), or is being destroyed just before
805 // get_allocator().Deallocate().
806 AsanChunk *GetAsanChunk(void *alloc_beg) {
807 if (!alloc_beg)
808 return nullptr;
809 AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
810 if (!p) {
811 if (!allocator.FromPrimary(p: alloc_beg))
812 return nullptr;
813 p = reinterpret_cast<AsanChunk *>(alloc_beg);
814 }
815 u8 state = atomic_load(a: &p->chunk_state, mo: memory_order_relaxed);
816 // It does not guaranty that Chunk is initialized, but it's
817 // definitely not for any other value.
818 if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
819 return p;
820 return nullptr;
821 }
822
823 AsanChunk *GetAsanChunkByAddr(uptr p) {
824 void *alloc_beg = allocator.GetBlockBegin(p: reinterpret_cast<void *>(p));
825 return GetAsanChunk(alloc_beg);
826 }
827
828 // Allocator must be locked when this function is called.
829 AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
830 void *alloc_beg =
831 allocator.GetBlockBeginFastLocked(p: reinterpret_cast<void *>(p));
832 return GetAsanChunk(alloc_beg);
833 }
834
835 uptr AllocationSize(uptr p) {
836 AsanChunk *m = GetAsanChunkByAddr(p);
837 if (!m) return 0;
838 if (atomic_load(a: &m->chunk_state, mo: memory_order_acquire) != CHUNK_ALLOCATED)
839 return 0;
840 if (m->Beg() != p) return 0;
841 return m->UsedSize();
842 }
843
844 uptr AllocationSizeFast(uptr p) {
845 return reinterpret_cast<AsanChunk *>(p - kChunkHeaderSize)->UsedSize();
846 }
847
848 AsanChunkView FindHeapChunkByAddress(uptr addr) {
849 AsanChunk *m1 = GetAsanChunkByAddr(p: addr);
850 sptr offset = 0;
851 if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, access_size: 1, offset: &offset)) {
852 // The address is in the chunk's left redzone, so maybe it is actually
853 // a right buffer overflow from the other chunk before.
854 // Search a bit before to see if there is another chunk.
855 AsanChunk *m2 = nullptr;
856 for (uptr l = 1; l < GetPageSizeCached(); l++) {
857 m2 = GetAsanChunkByAddr(p: addr - l);
858 if (m2 == m1) continue; // Still the same chunk.
859 break;
860 }
861 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, access_size: 1, offset: &offset))
862 m1 = ChooseChunk(addr, left_chunk: m2, right_chunk: m1);
863 }
864 return AsanChunkView(m1);
865 }
866
867 void Purge(BufferedStackTrace *stack) {
868 AsanThread *t = GetCurrentThread();
869 if (t) {
870 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
871 quarantine.DrainAndRecycle(c: GetQuarantineCache(ms),
872 cb: QuarantineCallback(GetAllocatorCache(ms),
873 stack));
874 }
875 {
876 SpinMutexLock l(&fallback_mutex);
877 quarantine.DrainAndRecycle(c: &fallback_quarantine_cache,
878 cb: QuarantineCallback(&fallback_allocator_cache,
879 stack));
880 }
881
882 allocator.ForceReleaseToOS();
883 }
884
885 void PrintStats() {
886 allocator.PrintStats();
887 quarantine.PrintStats();
888 }
889
890 void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
891 allocator.ForceLock();
892 fallback_mutex.Lock();
893 }
894
895 void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
896 fallback_mutex.Unlock();
897 allocator.ForceUnlock();
898 }
899};
900
901static Allocator instance(LINKER_INITIALIZED);
902
903static AsanAllocator &get_allocator() {
904 return instance.allocator;
905}
906
907bool AsanChunkView::IsValid() const {
908 return chunk_ && atomic_load(a: &chunk_->chunk_state, mo: memory_order_relaxed) !=
909 CHUNK_INVALID;
910}
911bool AsanChunkView::IsAllocated() const {
912 return chunk_ && atomic_load(a: &chunk_->chunk_state, mo: memory_order_relaxed) ==
913 CHUNK_ALLOCATED;
914}
915bool AsanChunkView::IsQuarantined() const {
916 return chunk_ && atomic_load(a: &chunk_->chunk_state, mo: memory_order_relaxed) ==
917 CHUNK_QUARANTINE;
918}
919uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
920uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
921uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
922u32 AsanChunkView::UserRequestedAlignment() const {
923 return Allocator::ComputeUserAlignment(user_requested_alignment_log: chunk_->user_requested_alignment_log);
924}
925
926uptr AsanChunkView::AllocTid() const {
927 u32 tid = 0;
928 u32 stack = 0;
929 chunk_->GetAllocContext(tid, stack);
930 return tid;
931}
932
933uptr AsanChunkView::FreeTid() const {
934 if (!IsQuarantined())
935 return kInvalidTid;
936 u32 tid = 0;
937 u32 stack = 0;
938 chunk_->GetFreeContext(tid, stack);
939 return tid;
940}
941
942AllocType AsanChunkView::GetAllocType() const {
943 return (AllocType)chunk_->alloc_type;
944}
945
946u32 AsanChunkView::GetAllocStackId() const {
947 u32 tid = 0;
948 u32 stack = 0;
949 chunk_->GetAllocContext(tid, stack);
950 return stack;
951}
952
953u32 AsanChunkView::GetFreeStackId() const {
954 if (!IsQuarantined())
955 return 0;
956 u32 tid = 0;
957 u32 stack = 0;
958 chunk_->GetFreeContext(tid, stack);
959 return stack;
960}
961
962void InitializeAllocator(const AllocatorOptions &options) {
963 instance.InitLinkerInitialized(options);
964}
965
966void ReInitializeAllocator(const AllocatorOptions &options) {
967 instance.ReInitialize(options);
968}
969
970void GetAllocatorOptions(AllocatorOptions *options) {
971 instance.GetOptions(options);
972}
973
974AsanChunkView FindHeapChunkByAddress(uptr addr) {
975 return instance.FindHeapChunkByAddress(addr);
976}
977AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
978 return AsanChunkView(instance.GetAsanChunk(alloc_beg: reinterpret_cast<void*>(addr)));
979}
980
981void AsanThreadLocalMallocStorage::CommitBack() {
982 GET_STACK_TRACE_MALLOC;
983 instance.CommitBack(ms: this, stack: &stack);
984}
985
986void PrintInternalAllocatorStats() {
987 instance.PrintStats();
988}
989
990void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
991 instance.Deallocate(ptr, delete_size: 0, delete_alignment: 0, stack, alloc_type);
992}
993
994void asan_delete(void *ptr, uptr size, uptr alignment,
995 BufferedStackTrace *stack, AllocType alloc_type) {
996 instance.Deallocate(ptr, delete_size: size, delete_alignment: alignment, stack, alloc_type);
997}
998
999void *asan_malloc(uptr size, BufferedStackTrace *stack) {
1000 return SetErrnoOnNull(instance.Allocate(size, alignment: 8, stack, alloc_type: FROM_MALLOC, can_fill: true));
1001}
1002
1003void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
1004 return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
1005}
1006
1007void *asan_reallocarray(void *p, uptr nmemb, uptr size,
1008 BufferedStackTrace *stack) {
1009 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
1010 errno = errno_ENOMEM;
1011 if (AllocatorMayReturnNull())
1012 return nullptr;
1013 ReportReallocArrayOverflow(count: nmemb, size, stack);
1014 }
1015 return asan_realloc(p, size: nmemb * size, stack);
1016}
1017
1018void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
1019 if (!p)
1020 return SetErrnoOnNull(instance.Allocate(size, alignment: 8, stack, alloc_type: FROM_MALLOC, can_fill: true));
1021 if (size == 0) {
1022 if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
1023 instance.Deallocate(ptr: p, delete_size: 0, delete_alignment: 0, stack, alloc_type: FROM_MALLOC);
1024 return nullptr;
1025 }
1026 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
1027 size = 1;
1028 }
1029 return SetErrnoOnNull(instance.Reallocate(old_ptr: p, new_size: size, stack));
1030}
1031
1032void *asan_valloc(uptr size, BufferedStackTrace *stack) {
1033 return SetErrnoOnNull(
1034 instance.Allocate(size, alignment: GetPageSizeCached(), stack, alloc_type: FROM_MALLOC, can_fill: true));
1035}
1036
1037void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
1038 uptr PageSize = GetPageSizeCached();
1039 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
1040 errno = errno_ENOMEM;
1041 if (AllocatorMayReturnNull())
1042 return nullptr;
1043 ReportPvallocOverflow(size, stack);
1044 }
1045 // pvalloc(0) should allocate one page.
1046 size = size ? RoundUpTo(size, boundary: PageSize) : PageSize;
1047 return SetErrnoOnNull(
1048 instance.Allocate(size, alignment: PageSize, stack, alloc_type: FROM_MALLOC, can_fill: true));
1049}
1050
1051void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
1052 AllocType alloc_type) {
1053 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
1054 errno = errno_EINVAL;
1055 if (AllocatorMayReturnNull())
1056 return nullptr;
1057 ReportInvalidAllocationAlignment(alignment, stack);
1058 }
1059 return SetErrnoOnNull(
1060 instance.Allocate(size, alignment, stack, alloc_type, can_fill: true));
1061}
1062
1063void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
1064 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
1065 errno = errno_EINVAL;
1066 if (AllocatorMayReturnNull())
1067 return nullptr;
1068 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
1069 }
1070 return SetErrnoOnNull(
1071 instance.Allocate(size, alignment, stack, alloc_type: FROM_MALLOC, can_fill: true));
1072}
1073
1074int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
1075 BufferedStackTrace *stack) {
1076 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
1077 if (AllocatorMayReturnNull())
1078 return errno_EINVAL;
1079 ReportInvalidPosixMemalignAlignment(alignment, stack);
1080 }
1081 void *ptr = instance.Allocate(size, alignment, stack, alloc_type: FROM_MALLOC, can_fill: true);
1082 if (UNLIKELY(!ptr))
1083 // OOM error is already taken care of by Allocate.
1084 return errno_ENOMEM;
1085 CHECK(IsAligned((uptr)ptr, alignment));
1086 *memptr = ptr;
1087 return 0;
1088}
1089
1090uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
1091 if (!ptr) return 0;
1092 uptr usable_size = instance.AllocationSize(p: reinterpret_cast<uptr>(ptr));
1093 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
1094 GET_STACK_TRACE_FATAL(pc, bp);
1095 ReportMallocUsableSizeNotOwned(addr: (uptr)ptr, stack: &stack);
1096 }
1097 return usable_size;
1098}
1099
1100uptr asan_mz_size(const void *ptr) {
1101 return instance.AllocationSize(p: reinterpret_cast<uptr>(ptr));
1102}
1103
1104void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1105 instance.ForceLock();
1106}
1107
1108void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
1109 instance.ForceUnlock();
1110}
1111
1112} // namespace __asan
1113
1114// --- Implementation of LSan-specific functions --- {{{1
1115namespace __lsan {
1116void LockAllocator() {
1117 __asan::get_allocator().ForceLock();
1118}
1119
1120void UnlockAllocator() {
1121 __asan::get_allocator().ForceUnlock();
1122}
1123
1124void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
1125 *begin = (uptr)&__asan::get_allocator();
1126 *end = *begin + sizeof(__asan::get_allocator());
1127}
1128
1129uptr PointsIntoChunk(void *p) {
1130 uptr addr = reinterpret_cast<uptr>(p);
1131 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(p: addr);
1132 if (!m || atomic_load(a: &m->chunk_state, mo: memory_order_acquire) !=
1133 __asan::CHUNK_ALLOCATED)
1134 return 0;
1135 uptr chunk = m->Beg();
1136 if (m->AddrIsInside(addr))
1137 return chunk;
1138 if (IsSpecialCaseOfOperatorNew0(chunk_beg: chunk, chunk_size: m->UsedSize(), addr))
1139 return chunk;
1140 return 0;
1141}
1142
1143uptr GetUserBegin(uptr chunk) {
1144 // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
1145 // not needed.
1146 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(p: chunk);
1147 return m ? m->Beg() : 0;
1148}
1149
1150uptr GetUserAddr(uptr chunk) {
1151 return chunk;
1152}
1153
1154LsanMetadata::LsanMetadata(uptr chunk) {
1155 metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
1156 : nullptr;
1157}
1158
1159bool LsanMetadata::allocated() const {
1160 if (!metadata_)
1161 return false;
1162 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1163 return atomic_load(a: &m->chunk_state, mo: memory_order_relaxed) ==
1164 __asan::CHUNK_ALLOCATED;
1165}
1166
1167ChunkTag LsanMetadata::tag() const {
1168 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1169 return static_cast<ChunkTag>(m->lsan_tag);
1170}
1171
1172void LsanMetadata::set_tag(ChunkTag value) {
1173 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1174 m->lsan_tag = value;
1175}
1176
1177uptr LsanMetadata::requested_size() const {
1178 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1179 return m->UsedSize();
1180}
1181
1182u32 LsanMetadata::stack_trace_id() const {
1183 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
1184 u32 tid = 0;
1185 u32 stack = 0;
1186 m->GetAllocContext(tid, stack);
1187 return stack;
1188}
1189
1190void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1191 __asan::get_allocator().ForEachChunk(callback, arg);
1192}
1193
1194IgnoreObjectResult IgnoreObject(const void *p) {
1195 uptr addr = reinterpret_cast<uptr>(p);
1196 __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(p: addr);
1197 if (!m ||
1198 (atomic_load(a: &m->chunk_state, mo: memory_order_acquire) !=
1199 __asan::CHUNK_ALLOCATED) ||
1200 !m->AddrIsInside(addr)) {
1201 return kIgnoreObjectInvalid;
1202 }
1203 if (m->lsan_tag == kIgnored)
1204 return kIgnoreObjectAlreadyIgnored;
1205 m->lsan_tag = __lsan::kIgnored;
1206 return kIgnoreObjectSuccess;
1207}
1208
1209} // namespace __lsan
1210
1211// ---------------------- Interface ---------------- {{{1
1212using namespace __asan;
1213
1214static const void *AllocationBegin(const void *p) {
1215 AsanChunk *m = __asan::instance.GetAsanChunkByAddr(p: (uptr)p);
1216 if (!m)
1217 return nullptr;
1218 if (atomic_load(a: &m->chunk_state, mo: memory_order_acquire) != CHUNK_ALLOCATED)
1219 return nullptr;
1220 if (m->UsedSize() == 0)
1221 return nullptr;
1222 return (const void *)(m->Beg());
1223}
1224
1225// ASan allocator doesn't reserve extra bytes, so normally we would
1226// just return "size". We don't want to expose our redzone sizes, etc here.
1227uptr __sanitizer_get_estimated_allocated_size(uptr size) {
1228 return size;
1229}
1230
1231int __sanitizer_get_ownership(const void *p) {
1232 uptr ptr = reinterpret_cast<uptr>(p);
1233 return instance.AllocationSize(p: ptr) > 0;
1234}
1235
1236uptr __sanitizer_get_allocated_size(const void *p) {
1237 if (!p) return 0;
1238 uptr ptr = reinterpret_cast<uptr>(p);
1239 uptr allocated_size = instance.AllocationSize(p: ptr);
1240 // Die if p is not malloced or if it is already freed.
1241 if (allocated_size == 0) {
1242 GET_STACK_TRACE_FATAL_HERE;
1243 ReportSanitizerGetAllocatedSizeNotOwned(addr: ptr, stack: &stack);
1244 }
1245 return allocated_size;
1246}
1247
1248uptr __sanitizer_get_allocated_size_fast(const void *p) {
1249 DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
1250 uptr ret = instance.AllocationSizeFast(p: reinterpret_cast<uptr>(p));
1251 DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
1252 return ret;
1253}
1254
1255const void *__sanitizer_get_allocated_begin(const void *p) {
1256 return AllocationBegin(p);
1257}
1258
1259void __sanitizer_purge_allocator() {
1260 GET_STACK_TRACE_MALLOC;
1261 instance.Purge(stack: &stack);
1262}
1263
1264int __asan_update_allocation_context(void* addr) {
1265 GET_STACK_TRACE_MALLOC;
1266 return instance.UpdateAllocationStack(addr: (uptr)addr, stack: &stack);
1267}
1268

source code of compiler-rt/lib/asan/asan_allocator.cpp