1//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between run-time libraries of sanitizers.
10//
11// It declares common functions and classes that are used in both runtimes.
12// Implementation of some functions are provided in sanitizer_common, while
13// others must be defined by run-time library itself.
14//===----------------------------------------------------------------------===//
15#ifndef SANITIZER_COMMON_H
16#define SANITIZER_COMMON_H
17
18#include "sanitizer_flags.h"
19#include "sanitizer_interface_internal.h"
20#include "sanitizer_internal_defs.h"
21#include "sanitizer_libc.h"
22#include "sanitizer_list.h"
23#include "sanitizer_mutex.h"
24
25#if defined(_MSC_VER) && !defined(__clang__)
26extern "C" void _ReadWriteBarrier();
27#pragma intrinsic(_ReadWriteBarrier)
28#endif
29
30namespace __sanitizer {
31
32struct AddressInfo;
33struct BufferedStackTrace;
34struct SignalContext;
35struct StackTrace;
36
37// Constants.
38const uptr kWordSize = SANITIZER_WORDSIZE / 8;
39const uptr kWordSizeInBits = 8 * kWordSize;
40
41const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
42
43const uptr kMaxPathLength = 4096;
44
45const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
46
47const uptr kErrorMessageBufferSize = 1 << 16;
48
49// Denotes fake PC values that come from JIT/JAVA/etc.
50// For such PC values __tsan_symbolize_external_ex() will be called.
51const u64 kExternalPCBit = 1ULL << 60;
52
53extern const char *SanitizerToolName; // Can be changed by the tool.
54
55extern atomic_uint32_t current_verbosity;
56inline void SetVerbosity(int verbosity) {
57 atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
58}
59inline int Verbosity() {
60 return atomic_load(&current_verbosity, memory_order_relaxed);
61}
62
63#if SANITIZER_ANDROID
64inline uptr GetPageSize() {
65// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
66 return 4096;
67}
68inline uptr GetPageSizeCached() {
69 return 4096;
70}
71#else
72uptr GetPageSize();
73extern uptr PageSizeCached;
74inline uptr GetPageSizeCached() {
75 if (!PageSizeCached)
76 PageSizeCached = GetPageSize();
77 return PageSizeCached;
78}
79#endif
80uptr GetMmapGranularity();
81uptr GetMaxVirtualAddress();
82uptr GetMaxUserVirtualAddress();
83// Threads
84tid_t GetTid();
85int TgKill(pid_t pid, tid_t tid, int sig);
86uptr GetThreadSelf();
87void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
88 uptr *stack_bottom);
89void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
90 uptr *tls_addr, uptr *tls_size);
91
92// Memory management
93void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
94inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
95 return MmapOrDie(size, mem_type, /*raw_report*/ true);
96}
97void UnmapOrDie(void *addr, uptr size);
98// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
99// case returns nullptr.
100void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
101bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
102 WARN_UNUSED_RESULT;
103bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
104 const char *name = nullptr) WARN_UNUSED_RESULT;
105void *MmapNoReserveOrDie(uptr size, const char *mem_type);
106void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
107// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
108// that case returns nullptr.
109void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
110 const char *name = nullptr);
111void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
112void *MmapNoAccess(uptr size);
113// Map aligned chunk of address space; size and alignment are powers of two.
114// Dies on all but out of memory errors, in the latter case returns nullptr.
115void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
116 const char *mem_type);
117// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
118// unaccessible memory.
119bool MprotectNoAccess(uptr addr, uptr size);
120bool MprotectReadOnly(uptr addr, uptr size);
121
122void MprotectMallocZones(void *addr, int prot);
123
124#if SANITIZER_LINUX
125// Unmap memory. Currently only used on Linux.
126void UnmapFromTo(uptr from, uptr to);
127#endif
128
129// Maps shadow_size_bytes of shadow memory and returns shadow address. It will
130// be aligned to the mmap granularity * 2^shadow_scale, or to
131// 2^min_shadow_base_alignment if that is larger. The returned address will
132// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
133// shadow_size_bytes bytes on the right, which on linux is mapped no access.
134// The high_mem_end may be updated if the original shadow size doesn't fit.
135uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
136 uptr min_shadow_base_alignment, uptr &high_mem_end);
137
138// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
139// Reserves 2*S bytes of address space to the right of the returned address and
140// ring_buffer_size bytes to the left. The returned address is aligned to 2*S.
141// Also creates num_aliases regions of accessible memory starting at offset S
142// from the returned address. Each region has size alias_size and is backed by
143// the same physical memory.
144uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
145 uptr num_aliases, uptr ring_buffer_size);
146
147// Reserve memory range [beg, end]. If madvise_shadow is true then apply
148// madvise (e.g. hugepages, core dumping) requested by options.
149void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
150 bool madvise_shadow = true);
151
152// Protect size bytes of memory starting at addr. Also try to protect
153// several pages at the start of the address space as specified by
154// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
155void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
156 uptr zero_base_max_shadow_start);
157
158// Find an available address space.
159uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
160 uptr *largest_gap_found, uptr *max_occupied_addr);
161
162// Used to check if we can map shadow memory to a fixed location.
163bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
164// Releases memory pages entirely within the [beg, end] address range. Noop if
165// the provided range does not contain at least one entire page.
166void ReleaseMemoryPagesToOS(uptr beg, uptr end);
167void IncreaseTotalMmap(uptr size);
168void DecreaseTotalMmap(uptr size);
169uptr GetRSS();
170void SetShadowRegionHugePageMode(uptr addr, uptr length);
171bool DontDumpShadowMemory(uptr addr, uptr length);
172// Check if the built VMA size matches the runtime one.
173void CheckVMASize();
174void RunMallocHooks(const void *ptr, uptr size);
175void RunFreeHooks(const void *ptr);
176
177class ReservedAddressRange {
178 public:
179 uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
180 uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
181 uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
182 uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
183 void Unmap(uptr addr, uptr size);
184 void *base() const { return base_; }
185 uptr size() const { return size_; }
186
187 private:
188 void* base_;
189 uptr size_;
190 const char* name_;
191 uptr os_handle_;
192};
193
194typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
195 /*out*/uptr *stats, uptr stats_size);
196
197// Parse the contents of /proc/self/smaps and generate a memory profile.
198// |cb| is a tool-specific callback that fills the |stats| array containing
199// |stats_size| elements.
200void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
201
202// Simple low-level (mmap-based) allocator for internal use. Doesn't have
203// constructor, so all instances of LowLevelAllocator should be
204// linker initialized.
205class LowLevelAllocator {
206 public:
207 // Requires an external lock.
208 void *Allocate(uptr size);
209 private:
210 char *allocated_end_;
211 char *allocated_current_;
212};
213// Set the min alignment of LowLevelAllocator to at least alignment.
214void SetLowLevelAllocateMinAlignment(uptr alignment);
215typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
216// Allows to register tool-specific callbacks for LowLevelAllocator.
217// Passing NULL removes the callback.
218void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
219
220// IO
221void CatastrophicErrorWrite(const char *buffer, uptr length);
222void RawWrite(const char *buffer);
223bool ColorizeReports();
224void RemoveANSIEscapeSequencesFromString(char *buffer);
225void Printf(const char *format, ...);
226void Report(const char *format, ...);
227void SetPrintfAndReportCallback(void (*callback)(const char *));
228#define VReport(level, ...) \
229 do { \
230 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
231 } while (0)
232#define VPrintf(level, ...) \
233 do { \
234 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
235 } while (0)
236
237// Lock sanitizer error reporting and protects against nested errors.
238class ScopedErrorReportLock {
239 public:
240 ScopedErrorReportLock();
241 ~ScopedErrorReportLock();
242
243 static void CheckLocked();
244};
245
246extern uptr stoptheworld_tracer_pid;
247extern uptr stoptheworld_tracer_ppid;
248
249bool IsAccessibleMemoryRange(uptr beg, uptr size);
250
251// Error report formatting.
252const char *StripPathPrefix(const char *filepath,
253 const char *strip_file_prefix);
254// Strip the directories from the module name.
255const char *StripModuleName(const char *module);
256
257// OS
258uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
259uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
260uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
261uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
262const char *GetProcessName();
263void UpdateProcessName();
264void CacheBinaryName();
265void DisableCoreDumperIfNecessary();
266void DumpProcessMap();
267const char *GetEnv(const char *name);
268bool SetEnv(const char *name, const char *value);
269
270u32 GetUid();
271void ReExec();
272void CheckASLR();
273void CheckMPROTECT();
274char **GetArgv();
275char **GetEnviron();
276void PrintCmdline();
277bool StackSizeIsUnlimited();
278void SetStackSizeLimitInBytes(uptr limit);
279bool AddressSpaceIsUnlimited();
280void SetAddressSpaceUnlimited();
281void AdjustStackSize(void *attr);
282void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
283void SetSandboxingCallback(void (*f)());
284
285void InitializeCoverage(bool enabled, const char *coverage_dir);
286
287void InitTlsSize();
288uptr GetTlsSize();
289
290// Other
291void SleepForSeconds(int seconds);
292void SleepForMillis(int millis);
293u64 NanoTime();
294u64 MonotonicNanoTime();
295int Atexit(void (*function)(void));
296bool TemplateMatch(const char *templ, const char *str);
297
298// Exit
299void NORETURN Abort();
300void NORETURN Die();
301void NORETURN
302CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
303void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
304 const char *mmap_type, error_t err,
305 bool raw_report = false);
306
307// Specific tools may override behavior of "Die" and "CheckFailed" functions
308// to do tool-specific job.
309typedef void (*DieCallbackType)(void);
310
311// It's possible to add several callbacks that would be run when "Die" is
312// called. The callbacks will be run in the opposite order. The tools are
313// strongly recommended to setup all callbacks during initialization, when there
314// is only a single thread.
315bool AddDieCallback(DieCallbackType callback);
316bool RemoveDieCallback(DieCallbackType callback);
317
318void SetUserDieCallback(DieCallbackType callback);
319
320typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
321 u64, u64);
322void SetCheckFailedCallback(CheckFailedCallbackType callback);
323
324// Callback will be called if soft_rss_limit_mb is given and the limit is
325// exceeded (exceeded==true) or if rss went down below the limit
326// (exceeded==false).
327// The callback should be registered once at the tool init time.
328void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
329
330// Functions related to signal handling.
331typedef void (*SignalHandlerType)(int, void *, void *);
332HandleSignalMode GetHandleSignalMode(int signum);
333void InstallDeadlySignalHandlers(SignalHandlerType handler);
334
335// Signal reporting.
336// Each sanitizer uses slightly different implementation of stack unwinding.
337typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
338 const void *callback_context,
339 BufferedStackTrace *stack);
340// Print deadly signal report and die.
341void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
342 UnwindSignalStackCallbackType unwind,
343 const void *unwind_context);
344
345// Part of HandleDeadlySignal, exposed for asan.
346void StartReportDeadlySignal();
347// Part of HandleDeadlySignal, exposed for asan.
348void ReportDeadlySignal(const SignalContext &sig, u32 tid,
349 UnwindSignalStackCallbackType unwind,
350 const void *unwind_context);
351
352// Alternative signal stack (POSIX-only).
353void SetAlternateSignalStack();
354void UnsetAlternateSignalStack();
355
356// Construct a one-line string:
357// SUMMARY: SanitizerToolName: error_message
358// and pass it to __sanitizer_report_error_summary.
359// If alt_tool_name is provided, it's used in place of SanitizerToolName.
360void ReportErrorSummary(const char *error_message,
361 const char *alt_tool_name = nullptr);
362// Same as above, but construct error_message as:
363// error_type file:line[:column][ function]
364void ReportErrorSummary(const char *error_type, const AddressInfo &info,
365 const char *alt_tool_name = nullptr);
366// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
367void ReportErrorSummary(const char *error_type, const StackTrace *trace,
368 const char *alt_tool_name = nullptr);
369
370void ReportMmapWriteExec(int prot);
371
372// Math
373#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
374extern "C" {
375unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
376unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
377#if defined(_WIN64)
378unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
379unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
380#endif
381}
382#endif
383
384inline uptr MostSignificantSetBitIndex(uptr x) {
385 CHECK_NE(x, 0U);
386 unsigned long up;
387#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
388# ifdef _WIN64
389 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
390# else
391 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
392# endif
393#elif defined(_WIN64)
394 _BitScanReverse64(&up, x);
395#else
396 _BitScanReverse(&up, x);
397#endif
398 return up;
399}
400
401inline uptr LeastSignificantSetBitIndex(uptr x) {
402 CHECK_NE(x, 0U);
403 unsigned long up;
404#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
405# ifdef _WIN64
406 up = __builtin_ctzll(x);
407# else
408 up = __builtin_ctzl(x);
409# endif
410#elif defined(_WIN64)
411 _BitScanForward64(&up, x);
412#else
413 _BitScanForward(&up, x);
414#endif
415 return up;
416}
417
418inline bool IsPowerOfTwo(uptr x) {
419 return (x & (x - 1)) == 0;
420}
421
422inline uptr RoundUpToPowerOfTwo(uptr size) {
423 CHECK(size);
424 if (IsPowerOfTwo(size)) return size;
425
426 uptr up = MostSignificantSetBitIndex(size);
427 CHECK_LT(size, (1ULL << (up + 1)));
428 CHECK_GT(size, (1ULL << up));
429 return 1ULL << (up + 1);
430}
431
432inline uptr RoundUpTo(uptr size, uptr boundary) {
433 RAW_CHECK(IsPowerOfTwo(boundary));
434 return (size + boundary - 1) & ~(boundary - 1);
435}
436
437inline uptr RoundDownTo(uptr x, uptr boundary) {
438 return x & ~(boundary - 1);
439}
440
441inline bool IsAligned(uptr a, uptr alignment) {
442 return (a & (alignment - 1)) == 0;
443}
444
445inline uptr Log2(uptr x) {
446 CHECK(IsPowerOfTwo(x));
447 return LeastSignificantSetBitIndex(x);
448}
449
450// Don't use std::min, std::max or std::swap, to minimize dependency
451// on libstdc++.
452template <class T>
453constexpr T Min(T a, T b) {
454 return a < b ? a : b;
455}
456template <class T>
457constexpr T Max(T a, T b) {
458 return a > b ? a : b;
459}
460template<class T> void Swap(T& a, T& b) {
461 T tmp = a;
462 a = b;
463 b = tmp;
464}
465
466// Char handling
467inline bool IsSpace(int c) {
468 return (c == ' ') || (c == '\n') || (c == '\t') ||
469 (c == '\f') || (c == '\r') || (c == '\v');
470}
471inline bool IsDigit(int c) {
472 return (c >= '0') && (c <= '9');
473}
474inline int ToLower(int c) {
475 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
476}
477
478// A low-level vector based on mmap. May incur a significant memory overhead for
479// small vectors.
480// WARNING: The current implementation supports only POD types.
481template<typename T>
482class InternalMmapVectorNoCtor {
483 public:
484 using value_type = T;
485 void Initialize(uptr initial_capacity) {
486 capacity_bytes_ = 0;
487 size_ = 0;
488 data_ = 0;
489 reserve(initial_capacity);
490 }
491 void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
492 T &operator[](uptr i) {
493 CHECK_LT(i, size_);
494 return data_[i];
495 }
496 const T &operator[](uptr i) const {
497 CHECK_LT(i, size_);
498 return data_[i];
499 }
500 void push_back(const T &element) {
501 CHECK_LE(size_, capacity());
502 if (size_ == capacity()) {
503 uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
504 Realloc(new_capacity);
505 }
506 internal_memcpy(&data_[size_++], &element, sizeof(T));
507 }
508 T &back() {
509 CHECK_GT(size_, 0);
510 return data_[size_ - 1];
511 }
512 void pop_back() {
513 CHECK_GT(size_, 0);
514 size_--;
515 }
516 uptr size() const {
517 return size_;
518 }
519 const T *data() const {
520 return data_;
521 }
522 T *data() {
523 return data_;
524 }
525 uptr capacity() const { return capacity_bytes_ / sizeof(T); }
526 void reserve(uptr new_size) {
527 // Never downsize internal buffer.
528 if (new_size > capacity())
529 Realloc(new_size);
530 }
531 void resize(uptr new_size) {
532 if (new_size > size_) {
533 reserve(new_size);
534 internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
535 }
536 size_ = new_size;
537 }
538
539 void clear() { size_ = 0; }
540 bool empty() const { return size() == 0; }
541
542 const T *begin() const {
543 return data();
544 }
545 T *begin() {
546 return data();
547 }
548 const T *end() const {
549 return data() + size();
550 }
551 T *end() {
552 return data() + size();
553 }
554
555 void swap(InternalMmapVectorNoCtor &other) {
556 Swap(data_, other.data_);
557 Swap(capacity_bytes_, other.capacity_bytes_);
558 Swap(size_, other.size_);
559 }
560
561 private:
562 void Realloc(uptr new_capacity) {
563 CHECK_GT(new_capacity, 0);
564 CHECK_LE(size_, new_capacity);
565 uptr new_capacity_bytes =
566 RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
567 T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
568 internal_memcpy(new_data, data_, size_ * sizeof(T));
569 UnmapOrDie(data_, capacity_bytes_);
570 data_ = new_data;
571 capacity_bytes_ = new_capacity_bytes;
572 }
573
574 T *data_;
575 uptr capacity_bytes_;
576 uptr size_;
577};
578
579template <typename T>
580bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
581 const InternalMmapVectorNoCtor<T> &rhs) {
582 if (lhs.size() != rhs.size()) return false;
583 return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
584}
585
586template <typename T>
587bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
588 const InternalMmapVectorNoCtor<T> &rhs) {
589 return !(lhs == rhs);
590}
591
592template<typename T>
593class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
594 public:
595 InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
596 explicit InternalMmapVector(uptr cnt) {
597 InternalMmapVectorNoCtor<T>::Initialize(cnt);
598 this->resize(cnt);
599 }
600 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
601 // Disallow copies and moves.
602 InternalMmapVector(const InternalMmapVector &) = delete;
603 InternalMmapVector &operator=(const InternalMmapVector &) = delete;
604 InternalMmapVector(InternalMmapVector &&) = delete;
605 InternalMmapVector &operator=(InternalMmapVector &&) = delete;
606};
607
608class InternalScopedString {
609 public:
610 InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
611
612 uptr length() const { return buffer_.size() - 1; }
613 void clear() {
614 buffer_.resize(1);
615 buffer_[0] = '\0';
616 }
617 void append(const char *format, ...);
618 const char *data() const { return buffer_.data(); }
619 char *data() { return buffer_.data(); }
620
621 private:
622 InternalMmapVector<char> buffer_;
623};
624
625template <class T>
626struct CompareLess {
627 bool operator()(const T &a, const T &b) const { return a < b; }
628};
629
630// HeapSort for arrays and InternalMmapVector.
631template <class T, class Compare = CompareLess<T>>
632void Sort(T *v, uptr size, Compare comp = {}) {
633 if (size < 2)
634 return;
635 // Stage 1: insert elements to the heap.
636 for (uptr i = 1; i < size; i++) {
637 uptr j, p;
638 for (j = i; j > 0; j = p) {
639 p = (j - 1) / 2;
640 if (comp(v[p], v[j]))
641 Swap(v[j], v[p]);
642 else
643 break;
644 }
645 }
646 // Stage 2: swap largest element with the last one,
647 // and sink the new top.
648 for (uptr i = size - 1; i > 0; i--) {
649 Swap(v[0], v[i]);
650 uptr j, max_ind;
651 for (j = 0; j < i; j = max_ind) {
652 uptr left = 2 * j + 1;
653 uptr right = 2 * j + 2;
654 max_ind = j;
655 if (left < i && comp(v[max_ind], v[left]))
656 max_ind = left;
657 if (right < i && comp(v[max_ind], v[right]))
658 max_ind = right;
659 if (max_ind != j)
660 Swap(v[j], v[max_ind]);
661 else
662 break;
663 }
664 }
665}
666
667// Works like std::lower_bound: finds the first element that is not less
668// than the val.
669template <class Container,
670 class Compare = CompareLess<typename Container::value_type>>
671uptr InternalLowerBound(const Container &v,
672 const typename Container::value_type &val,
673 Compare comp = {}) {
674 uptr first = 0;
675 uptr last = v.size();
676 while (last > first) {
677 uptr mid = (first + last) / 2;
678 if (comp(v[mid], val))
679 first = mid + 1;
680 else
681 last = mid;
682 }
683 return first;
684}
685
686enum ModuleArch {
687 kModuleArchUnknown,
688 kModuleArchI386,
689 kModuleArchX86_64,
690 kModuleArchX86_64H,
691 kModuleArchARMV6,
692 kModuleArchARMV7,
693 kModuleArchARMV7S,
694 kModuleArchARMV7K,
695 kModuleArchARM64,
696 kModuleArchRISCV64
697};
698
699// Sorts and removes duplicates from the container.
700template <class Container,
701 class Compare = CompareLess<typename Container::value_type>>
702void SortAndDedup(Container &v, Compare comp = {}) {
703 Sort(v.data(), v.size(), comp);
704 uptr size = v.size();
705 if (size < 2)
706 return;
707 uptr last = 0;
708 for (uptr i = 1; i < size; ++i) {
709 if (comp(v[last], v[i])) {
710 ++last;
711 if (last != i)
712 v[last] = v[i];
713 } else {
714 CHECK(!comp(v[i], v[last]));
715 }
716 }
717 v.resize(last + 1);
718}
719
720// Opens the file 'file_name" and reads up to 'max_len' bytes.
721// The resulting buffer is mmaped and stored in '*buff'.
722// Returns true if file was successfully opened and read.
723bool ReadFileToVector(const char *file_name,
724 InternalMmapVectorNoCtor<char> *buff,
725 uptr max_len = 1 << 26, error_t *errno_p = nullptr);
726
727// Opens the file 'file_name" and reads up to 'max_len' bytes.
728// This function is less I/O efficient than ReadFileToVector as it may reread
729// file multiple times to avoid mmap during read attempts. It's used to read
730// procmap, so short reads with mmap in between can produce inconsistent result.
731// The resulting buffer is mmaped and stored in '*buff'.
732// The size of the mmaped region is stored in '*buff_size'.
733// The total number of read bytes is stored in '*read_len'.
734// Returns true if file was successfully opened and read.
735bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
736 uptr *read_len, uptr max_len = 1 << 26,
737 error_t *errno_p = nullptr);
738
739// When adding a new architecture, don't forget to also update
740// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
741inline const char *ModuleArchToString(ModuleArch arch) {
742 switch (arch) {
743 case kModuleArchUnknown:
744 return "";
745 case kModuleArchI386:
746 return "i386";
747 case kModuleArchX86_64:
748 return "x86_64";
749 case kModuleArchX86_64H:
750 return "x86_64h";
751 case kModuleArchARMV6:
752 return "armv6";
753 case kModuleArchARMV7:
754 return "armv7";
755 case kModuleArchARMV7S:
756 return "armv7s";
757 case kModuleArchARMV7K:
758 return "armv7k";
759 case kModuleArchARM64:
760 return "arm64";
761 case kModuleArchRISCV64:
762 return "riscv64";
763 }
764 CHECK(0 && "Invalid module arch");
765 return "";
766}
767
768const uptr kModuleUUIDSize = 16;
769const uptr kMaxSegName = 16;
770
771// Represents a binary loaded into virtual memory (e.g. this can be an
772// executable or a shared object).
773class LoadedModule {
774 public:
775 LoadedModule()
776 : full_name_(nullptr),
777 base_address_(0),
778 max_executable_address_(0),
779 arch_(kModuleArchUnknown),
780 instrumented_(false) {
781 internal_memset(uuid_, 0, kModuleUUIDSize);
782 ranges_.clear();
783 }
784 void set(const char *module_name, uptr base_address);
785 void set(const char *module_name, uptr base_address, ModuleArch arch,
786 u8 uuid[kModuleUUIDSize], bool instrumented);
787 void clear();
788 void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
789 const char *name = nullptr);
790 bool containsAddress(uptr address) const;
791
792 const char *full_name() const { return full_name_; }
793 uptr base_address() const { return base_address_; }
794 uptr max_executable_address() const { return max_executable_address_; }
795 ModuleArch arch() const { return arch_; }
796 const u8 *uuid() const { return uuid_; }
797 bool instrumented() const { return instrumented_; }
798
799 struct AddressRange {
800 AddressRange *next;
801 uptr beg;
802 uptr end;
803 bool executable;
804 bool writable;
805 char name[kMaxSegName];
806
807 AddressRange(uptr beg, uptr end, bool executable, bool writable,
808 const char *name)
809 : next(nullptr),
810 beg(beg),
811 end(end),
812 executable(executable),
813 writable(writable) {
814 internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
815 }
816 };
817
818 const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
819
820 private:
821 char *full_name_; // Owned.
822 uptr base_address_;
823 uptr max_executable_address_;
824 ModuleArch arch_;
825 u8 uuid_[kModuleUUIDSize];
826 bool instrumented_;
827 IntrusiveList<AddressRange> ranges_;
828};
829
830// List of LoadedModules. OS-dependent implementation is responsible for
831// filling this information.
832class ListOfModules {
833 public:
834 ListOfModules() : initialized(false) {}
835 ~ListOfModules() { clear(); }
836 void init();
837 void fallbackInit(); // Uses fallback init if available, otherwise clears
838 const LoadedModule *begin() const { return modules_.begin(); }
839 LoadedModule *begin() { return modules_.begin(); }
840 const LoadedModule *end() const { return modules_.end(); }
841 LoadedModule *end() { return modules_.end(); }
842 uptr size() const { return modules_.size(); }
843 const LoadedModule &operator[](uptr i) const {
844 CHECK_LT(i, modules_.size());
845 return modules_[i];
846 }
847
848 private:
849 void clear() {
850 for (auto &module : modules_) module.clear();
851 modules_.clear();
852 }
853 void clearOrInit() {
854 initialized ? clear() : modules_.Initialize(kInitialCapacity);
855 initialized = true;
856 }
857
858 InternalMmapVectorNoCtor<LoadedModule> modules_;
859 // We rarely have more than 16K loaded modules.
860 static const uptr kInitialCapacity = 1 << 14;
861 bool initialized;
862};
863
864// Callback type for iterating over a set of memory ranges.
865typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
866
867enum AndroidApiLevel {
868 ANDROID_NOT_ANDROID = 0,
869 ANDROID_KITKAT = 19,
870 ANDROID_LOLLIPOP_MR1 = 22,
871 ANDROID_POST_LOLLIPOP = 23
872};
873
874void WriteToSyslog(const char *buffer);
875
876#if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
877#define SANITIZER_WIN_TRACE 1
878#else
879#define SANITIZER_WIN_TRACE 0
880#endif
881
882#if SANITIZER_MAC || SANITIZER_WIN_TRACE
883void LogFullErrorReport(const char *buffer);
884#else
885inline void LogFullErrorReport(const char *buffer) {}
886#endif
887
888#if SANITIZER_LINUX || SANITIZER_MAC
889void WriteOneLineToSyslog(const char *s);
890void LogMessageOnPrintf(const char *str);
891#else
892inline void WriteOneLineToSyslog(const char *s) {}
893inline void LogMessageOnPrintf(const char *str) {}
894#endif
895
896#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
897// Initialize Android logging. Any writes before this are silently lost.
898void AndroidLogInit();
899void SetAbortMessage(const char *);
900#else
901inline void AndroidLogInit() {}
902// FIXME: MacOS implementation could use CRSetCrashLogMessage.
903inline void SetAbortMessage(const char *) {}
904#endif
905
906#if SANITIZER_ANDROID
907void SanitizerInitializeUnwinder();
908AndroidApiLevel AndroidGetApiLevel();
909#else
910inline void AndroidLogWrite(const char *buffer_unused) {}
911inline void SanitizerInitializeUnwinder() {}
912inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
913#endif
914
915inline uptr GetPthreadDestructorIterations() {
916#if SANITIZER_ANDROID
917 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
918#elif SANITIZER_POSIX
919 return 4;
920#else
921// Unused on Windows.
922 return 0;
923#endif
924}
925
926void *internal_start_thread(void *(*func)(void*), void *arg);
927void internal_join_thread(void *th);
928void MaybeStartBackgroudThread();
929
930// Make the compiler think that something is going on there.
931// Use this inside a loop that looks like memset/memcpy/etc to prevent the
932// compiler from recognising it and turning it into an actual call to
933// memset/memcpy/etc.
934static inline void SanitizerBreakOptimization(void *arg) {
935#if defined(_MSC_VER) && !defined(__clang__)
936 _ReadWriteBarrier();
937#else
938 __asm__ __volatile__("" : : "r" (arg) : "memory");
939#endif
940}
941
942struct SignalContext {
943 void *siginfo;
944 void *context;
945 uptr addr;
946 uptr pc;
947 uptr sp;
948 uptr bp;
949 bool is_memory_access;
950 enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
951
952 // In some cases the kernel cannot provide the true faulting address; `addr`
953 // will be zero then. This field allows to distinguish between these cases
954 // and dereferences of null.
955 bool is_true_faulting_addr;
956
957 // VS2013 doesn't implement unrestricted unions, so we need a trivial default
958 // constructor
959 SignalContext() = default;
960
961 // Creates signal context in a platform-specific manner.
962 // SignalContext is going to keep pointers to siginfo and context without
963 // owning them.
964 SignalContext(void *siginfo, void *context)
965 : siginfo(siginfo),
966 context(context),
967 addr(GetAddress()),
968 is_memory_access(IsMemoryAccess()),
969 write_flag(GetWriteFlag()),
970 is_true_faulting_addr(IsTrueFaultingAddress()) {
971 InitPcSpBp();
972 }
973
974 static void DumpAllRegisters(void *context);
975
976 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
977 int GetType() const;
978
979 // String description of the signal.
980 const char *Describe() const;
981
982 // Returns true if signal is stack overflow.
983 bool IsStackOverflow() const;
984
985 private:
986 // Platform specific initialization.
987 void InitPcSpBp();
988 uptr GetAddress() const;
989 WriteFlag GetWriteFlag() const;
990 bool IsMemoryAccess() const;
991 bool IsTrueFaultingAddress() const;
992};
993
994void InitializePlatformEarly();
995void MaybeReexec();
996
997template <typename Fn>
998class RunOnDestruction {
999 public:
1000 explicit RunOnDestruction(Fn fn) : fn_(fn) {}
1001 ~RunOnDestruction() { fn_(); }
1002
1003 private:
1004 Fn fn_;
1005};
1006
1007// A simple scope guard. Usage:
1008// auto cleanup = at_scope_exit([]{ do_cleanup; });
1009template <typename Fn>
1010RunOnDestruction<Fn> at_scope_exit(Fn fn) {
1011 return RunOnDestruction<Fn>(fn);
1012}
1013
1014// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
1015// if a process uses virtual memory over 4TB (as many sanitizers like
1016// to do). This function will abort the process if running on a kernel
1017// that looks vulnerable.
1018#if SANITIZER_LINUX && SANITIZER_S390_64
1019void AvoidCVE_2016_2143();
1020#else
1021inline void AvoidCVE_2016_2143() {}
1022#endif
1023
1024struct StackDepotStats {
1025 uptr n_uniq_ids;
1026 uptr allocated;
1027};
1028
1029// The default value for allocator_release_to_os_interval_ms common flag to
1030// indicate that sanitizer allocator should not attempt to release memory to OS.
1031const s32 kReleaseToOSIntervalNever = -1;
1032
1033void CheckNoDeepBind(const char *filename, int flag);
1034
1035// Returns the requested amount of random data (up to 256 bytes) that can then
1036// be used to seed a PRNG. Defaults to blocking like the underlying syscall.
1037bool GetRandom(void *buffer, uptr length, bool blocking = true);
1038
1039// Returns the number of logical processors on the system.
1040u32 GetNumberOfCPUs();
1041extern u32 NumberOfCPUsCached;
1042inline u32 GetNumberOfCPUsCached() {
1043 if (!NumberOfCPUsCached)
1044 NumberOfCPUsCached = GetNumberOfCPUs();
1045 return NumberOfCPUsCached;
1046}
1047
1048template <typename T>
1049class ArrayRef {
1050 public:
1051 ArrayRef() {}
1052 ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
1053
1054 T *begin() { return begin_; }
1055 T *end() { return end_; }
1056
1057 private:
1058 T *begin_ = nullptr;
1059 T *end_ = nullptr;
1060};
1061
1062} // namespace __sanitizer
1063
1064inline void *operator new(__sanitizer::operator_new_size_type size,
1065 __sanitizer::LowLevelAllocator &alloc) { // NOLINT
1066 return alloc.Allocate(size);
1067}
1068
1069#endif // SANITIZER_COMMON_H
1070