1//===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the section-based memory manager used by the MCJIT
10// execution engine and RuntimeDyld
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ExecutionEngine/SectionMemoryManager.h"
15#include "llvm/Config/config.h"
16#include "llvm/Support/MathExtras.h"
17#include "llvm/Support/Process.h"
18
19namespace llvm {
20
21uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
22 unsigned Alignment,
23 unsigned SectionID,
24 StringRef SectionName,
25 bool IsReadOnly) {
26 if (IsReadOnly)
27 return allocateSection(Purpose: SectionMemoryManager::AllocationPurpose::ROData,
28 Size, Alignment);
29 return allocateSection(Purpose: SectionMemoryManager::AllocationPurpose::RWData, Size,
30 Alignment);
31}
32
33uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size,
34 unsigned Alignment,
35 unsigned SectionID,
36 StringRef SectionName) {
37 return allocateSection(Purpose: SectionMemoryManager::AllocationPurpose::Code, Size,
38 Alignment);
39}
40
41uint8_t *SectionMemoryManager::allocateSection(
42 SectionMemoryManager::AllocationPurpose Purpose, uintptr_t Size,
43 unsigned Alignment) {
44 if (!Alignment)
45 Alignment = 16;
46
47 assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
48
49 uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
50 uintptr_t Addr = 0;
51
52 MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
53 switch (Purpose) {
54 case AllocationPurpose::Code:
55 return CodeMem;
56 case AllocationPurpose::ROData:
57 return RODataMem;
58 case AllocationPurpose::RWData:
59 return RWDataMem;
60 }
61 llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
62 }();
63
64 // Look in the list of free memory regions and use a block there if one
65 // is available.
66 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
67 if (FreeMB.Free.allocatedSize() >= RequiredSize) {
68 Addr = (uintptr_t)FreeMB.Free.base();
69 uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
70 // Align the address.
71 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
72
73 if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
74 // The part of the block we're giving out to the user is now pending
75 MemGroup.PendingMem.push_back(Elt: sys::MemoryBlock((void *)Addr, Size));
76
77 // Remember this pending block, such that future allocations can just
78 // modify it rather than creating a new one
79 FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
80 } else {
81 sys::MemoryBlock &PendingMB =
82 MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
83 PendingMB = sys::MemoryBlock(PendingMB.base(),
84 Addr + Size - (uintptr_t)PendingMB.base());
85 }
86
87 // Remember how much free space is now left in this block
88 FreeMB.Free =
89 sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
90 return (uint8_t *)Addr;
91 }
92 }
93
94 // No pre-allocated free block was large enough. Allocate a new memory region.
95 // Note that all sections get allocated as read-write. The permissions will
96 // be updated later based on memory group.
97 //
98 // FIXME: It would be useful to define a default allocation size (or add
99 // it as a constructor parameter) to minimize the number of allocations.
100 //
101 // FIXME: Initialize the Near member for each memory group to avoid
102 // interleaving.
103 std::error_code ec;
104 sys::MemoryBlock MB = MMapper->allocateMappedMemory(
105 Purpose, NumBytes: RequiredSize, NearBlock: &MemGroup.Near,
106 Flags: sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC&: ec);
107 if (ec) {
108 // FIXME: Add error propagation to the interface.
109 return nullptr;
110 }
111
112 // Save this address as the basis for our next request
113 MemGroup.Near = MB;
114
115 // Copy the address to all the other groups, if they have not
116 // been initialized.
117 if (CodeMem.Near.base() == nullptr)
118 CodeMem.Near = MB;
119 if (RODataMem.Near.base() == nullptr)
120 RODataMem.Near = MB;
121 if (RWDataMem.Near.base() == nullptr)
122 RWDataMem.Near = MB;
123
124 // Remember that we allocated this memory
125 MemGroup.AllocatedMem.push_back(Elt: MB);
126 Addr = (uintptr_t)MB.base();
127 uintptr_t EndOfBlock = Addr + MB.allocatedSize();
128
129 // Align the address.
130 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
131
132 // The part of the block we're giving out to the user is now pending
133 MemGroup.PendingMem.push_back(Elt: sys::MemoryBlock((void *)Addr, Size));
134
135 // The allocateMappedMemory may allocate much more memory than we need. In
136 // this case, we store the unused memory as a free memory block.
137 unsigned FreeSize = EndOfBlock - Addr - Size;
138 if (FreeSize > 16) {
139 FreeMemBlock FreeMB;
140 FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
141 FreeMB.PendingPrefixIndex = (unsigned)-1;
142 MemGroup.FreeMem.push_back(Elt: FreeMB);
143 }
144
145 // Return aligned address
146 return (uint8_t *)Addr;
147}
148
149bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
150 // FIXME: Should in-progress permissions be reverted if an error occurs?
151 std::error_code ec;
152
153 // Make code memory executable.
154 ec = applyMemoryGroupPermissions(MemGroup&: CodeMem,
155 Permissions: sys::Memory::MF_READ | sys::Memory::MF_EXEC);
156 if (ec) {
157 if (ErrMsg) {
158 *ErrMsg = ec.message();
159 }
160 return true;
161 }
162
163 // Make read-only data memory read-only.
164 ec = applyMemoryGroupPermissions(MemGroup&: RODataMem, Permissions: sys::Memory::MF_READ);
165 if (ec) {
166 if (ErrMsg) {
167 *ErrMsg = ec.message();
168 }
169 return true;
170 }
171
172 // Read-write data memory already has the correct permissions
173
174 // Some platforms with separate data cache and instruction cache require
175 // explicit cache flush, otherwise JIT code manipulations (like resolved
176 // relocations) will get to the data cache but not to the instruction cache.
177 invalidateInstructionCache();
178
179 return false;
180}
181
182static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
183 static const size_t PageSize = sys::Process::getPageSizeEstimate();
184
185 size_t StartOverlap =
186 (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
187
188 size_t TrimmedSize = M.allocatedSize();
189 TrimmedSize -= StartOverlap;
190 TrimmedSize -= TrimmedSize % PageSize;
191
192 sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
193 TrimmedSize);
194
195 assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
196 assert((Trimmed.allocatedSize() % PageSize) == 0);
197 assert(M.base() <= Trimmed.base() &&
198 Trimmed.allocatedSize() <= M.allocatedSize());
199
200 return Trimmed;
201}
202
203std::error_code
204SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
205 unsigned Permissions) {
206 for (sys::MemoryBlock &MB : MemGroup.PendingMem)
207 if (std::error_code EC = MMapper->protectMappedMemory(Block: MB, Flags: Permissions))
208 return EC;
209
210 MemGroup.PendingMem.clear();
211
212 // Now go through free blocks and trim any of them that don't span the entire
213 // page because one of the pending blocks may have overlapped it.
214 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
215 FreeMB.Free = trimBlockToPageSize(M: FreeMB.Free);
216 // We cleared the PendingMem list, so all these pointers are now invalid
217 FreeMB.PendingPrefixIndex = (unsigned)-1;
218 }
219
220 // Remove all blocks which are now empty
221 erase_if(C&: MemGroup.FreeMem, P: [](FreeMemBlock &FreeMB) {
222 return FreeMB.Free.allocatedSize() == 0;
223 });
224
225 return std::error_code();
226}
227
228void SectionMemoryManager::invalidateInstructionCache() {
229 for (sys::MemoryBlock &Block : CodeMem.PendingMem)
230 sys::Memory::InvalidateInstructionCache(Addr: Block.base(),
231 Len: Block.allocatedSize());
232}
233
234SectionMemoryManager::~SectionMemoryManager() {
235 for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
236 for (sys::MemoryBlock &Block : Group->AllocatedMem)
237 MMapper->releaseMappedMemory(M&: Block);
238 }
239}
240
241SectionMemoryManager::MemoryMapper::~MemoryMapper() = default;
242
243void SectionMemoryManager::anchor() {}
244
245namespace {
246// Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
247// into sys::Memory.
248class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
249public:
250 sys::MemoryBlock
251 allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
252 size_t NumBytes, const sys::MemoryBlock *const NearBlock,
253 unsigned Flags, std::error_code &EC) override {
254 return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
255 }
256
257 std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
258 unsigned Flags) override {
259 return sys::Memory::protectMappedMemory(Block, Flags);
260 }
261
262 std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
263 return sys::Memory::releaseMappedMemory(Block&: M);
264 }
265};
266} // namespace
267
268SectionMemoryManager::SectionMemoryManager(MemoryMapper *UnownedMM)
269 : MMapper(UnownedMM), OwnedMMapper(nullptr) {
270 if (!MMapper) {
271 OwnedMMapper = std::make_unique<DefaultMMapper>();
272 MMapper = OwnedMMapper.get();
273 }
274}
275
276} // namespace llvm
277

source code of llvm/lib/ExecutionEngine/SectionMemoryManager.cpp