1//===- MemoryMapper.cpp - Cross-process memory mapper ------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/ExecutionEngine/Orc/MemoryMapper.h"
10
11#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
12#include "llvm/Support/WindowsError.h"
13
14#include <algorithm>
15
16#if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
17#include <fcntl.h>
18#include <sys/mman.h>
19#include <unistd.h>
20#elif defined(_WIN32)
21#include <windows.h>
22#endif
23
24namespace llvm {
25namespace orc {
26
27MemoryMapper::~MemoryMapper() {}
28
29InProcessMemoryMapper::InProcessMemoryMapper(size_t PageSize)
30 : PageSize(PageSize) {}
31
32Expected<std::unique_ptr<InProcessMemoryMapper>>
33InProcessMemoryMapper::Create() {
34 auto PageSize = sys::Process::getPageSize();
35 if (!PageSize)
36 return PageSize.takeError();
37 return std::make_unique<InProcessMemoryMapper>(args&: *PageSize);
38}
39
40void InProcessMemoryMapper::reserve(size_t NumBytes,
41 OnReservedFunction OnReserved) {
42 std::error_code EC;
43 auto MB = sys::Memory::allocateMappedMemory(
44 NumBytes, NearBlock: nullptr, Flags: sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
45
46 if (EC)
47 return OnReserved(errorCodeToError(EC));
48
49 {
50 std::lock_guard<std::mutex> Lock(Mutex);
51 Reservations[MB.base()].Size = MB.allocatedSize();
52 }
53
54 OnReserved(
55 ExecutorAddrRange(ExecutorAddr::fromPtr(Ptr: MB.base()), MB.allocatedSize()));
56}
57
58char *InProcessMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
59 return Addr.toPtr<char *>();
60}
61
62void InProcessMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
63 OnInitializedFunction OnInitialized) {
64 ExecutorAddr MinAddr(~0ULL);
65 ExecutorAddr MaxAddr(0);
66
67 // FIXME: Release finalize lifetime segments.
68 for (auto &Segment : AI.Segments) {
69 auto Base = AI.MappingBase + Segment.Offset;
70 auto Size = Segment.ContentSize + Segment.ZeroFillSize;
71
72 if (Base < MinAddr)
73 MinAddr = Base;
74
75 if (Base + Size > MaxAddr)
76 MaxAddr = Base + Size;
77
78 std::memset(s: (Base + Segment.ContentSize).toPtr<void *>(), c: 0,
79 n: Segment.ZeroFillSize);
80
81 if (auto EC = sys::Memory::protectMappedMemory(
82 Block: {Base.toPtr<void *>(), Size},
83 Flags: toSysMemoryProtectionFlags(MP: Segment.AG.getMemProt()))) {
84 return OnInitialized(errorCodeToError(EC));
85 }
86 if ((Segment.AG.getMemProt() & MemProt::Exec) == MemProt::Exec)
87 sys::Memory::InvalidateInstructionCache(Addr: Base.toPtr<void *>(), Len: Size);
88 }
89
90 auto DeinitializeActions = shared::runFinalizeActions(AAs&: AI.Actions);
91 if (!DeinitializeActions)
92 return OnInitialized(DeinitializeActions.takeError());
93
94 {
95 std::lock_guard<std::mutex> Lock(Mutex);
96
97 // This is the maximum range whose permission have been possibly modified
98 Allocations[MinAddr].Size = MaxAddr - MinAddr;
99 Allocations[MinAddr].DeinitializationActions =
100 std::move(*DeinitializeActions);
101 Reservations[AI.MappingBase.toPtr<void *>()].Allocations.push_back(x: MinAddr);
102 }
103
104 OnInitialized(MinAddr);
105}
106
107void InProcessMemoryMapper::deinitialize(
108 ArrayRef<ExecutorAddr> Bases,
109 MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
110 Error AllErr = Error::success();
111
112 {
113 std::lock_guard<std::mutex> Lock(Mutex);
114
115 for (auto Base : llvm::reverse(C&: Bases)) {
116
117 if (Error Err = shared::runDeallocActions(
118 DAs: Allocations[Base].DeinitializationActions)) {
119 AllErr = joinErrors(E1: std::move(AllErr), E2: std::move(Err));
120 }
121
122 // Reset protections to read/write so the area can be reused
123 if (auto EC = sys::Memory::protectMappedMemory(
124 Block: {Base.toPtr<void *>(), Allocations[Base].Size},
125 Flags: sys::Memory::ProtectionFlags::MF_READ |
126 sys::Memory::ProtectionFlags::MF_WRITE)) {
127 AllErr = joinErrors(E1: std::move(AllErr), E2: errorCodeToError(EC));
128 }
129
130 Allocations.erase(Val: Base);
131 }
132 }
133
134 OnDeinitialized(std::move(AllErr));
135}
136
137void InProcessMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
138 OnReleasedFunction OnReleased) {
139 Error Err = Error::success();
140
141 for (auto Base : Bases) {
142 std::vector<ExecutorAddr> AllocAddrs;
143 size_t Size;
144 {
145 std::lock_guard<std::mutex> Lock(Mutex);
146 auto &R = Reservations[Base.toPtr<void *>()];
147 Size = R.Size;
148 AllocAddrs.swap(x&: R.Allocations);
149 }
150
151 // deinitialize sub allocations
152 std::promise<MSVCPError> P;
153 auto F = P.get_future();
154 deinitialize(Bases: AllocAddrs, OnDeinitialized: [&](Error Err) { P.set_value(std::move(Err)); });
155 if (Error E = F.get()) {
156 Err = joinErrors(E1: std::move(Err), E2: std::move(E));
157 }
158
159 // free the memory
160 auto MB = sys::MemoryBlock(Base.toPtr<void *>(), Size);
161
162 auto EC = sys::Memory::releaseMappedMemory(Block&: MB);
163 if (EC) {
164 Err = joinErrors(E1: std::move(Err), E2: errorCodeToError(EC));
165 }
166
167 std::lock_guard<std::mutex> Lock(Mutex);
168 Reservations.erase(Val: Base.toPtr<void *>());
169 }
170
171 OnReleased(std::move(Err));
172}
173
174InProcessMemoryMapper::~InProcessMemoryMapper() {
175 std::vector<ExecutorAddr> ReservationAddrs;
176 {
177 std::lock_guard<std::mutex> Lock(Mutex);
178
179 ReservationAddrs.reserve(n: Reservations.size());
180 for (const auto &R : Reservations) {
181 ReservationAddrs.push_back(x: ExecutorAddr::fromPtr(Ptr: R.getFirst()));
182 }
183 }
184
185 std::promise<MSVCPError> P;
186 auto F = P.get_future();
187 release(Bases: ReservationAddrs, OnReleased: [&](Error Err) { P.set_value(std::move(Err)); });
188 cantFail(Err: F.get());
189}
190
191// SharedMemoryMapper
192
193SharedMemoryMapper::SharedMemoryMapper(ExecutorProcessControl &EPC,
194 SymbolAddrs SAs, size_t PageSize)
195 : EPC(EPC), SAs(SAs), PageSize(PageSize) {
196#if (!defined(LLVM_ON_UNIX) || defined(__ANDROID__)) && !defined(_WIN32)
197 llvm_unreachable("SharedMemoryMapper is not supported on this platform yet");
198#endif
199}
200
201Expected<std::unique_ptr<SharedMemoryMapper>>
202SharedMemoryMapper::Create(ExecutorProcessControl &EPC, SymbolAddrs SAs) {
203#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
204 auto PageSize = sys::Process::getPageSize();
205 if (!PageSize)
206 return PageSize.takeError();
207
208 return std::make_unique<SharedMemoryMapper>(args&: EPC, args&: SAs, args&: *PageSize);
209#else
210 return make_error<StringError>(
211 "SharedMemoryMapper is not supported on this platform yet",
212 inconvertibleErrorCode());
213#endif
214}
215
216void SharedMemoryMapper::reserve(size_t NumBytes,
217 OnReservedFunction OnReserved) {
218#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
219
220 EPC.callSPSWrapperAsync<
221 rt::SPSExecutorSharedMemoryMapperServiceReserveSignature>(
222 WrapperFnAddr: SAs.Reserve,
223 SendResult: [this, NumBytes, OnReserved = std::move(OnReserved)](
224 Error SerializationErr,
225 Expected<std::pair<ExecutorAddr, std::string>> Result) mutable {
226 if (SerializationErr) {
227 cantFail(Err: Result.takeError());
228 return OnReserved(std::move(SerializationErr));
229 }
230
231 if (!Result)
232 return OnReserved(Result.takeError());
233
234 ExecutorAddr RemoteAddr;
235 std::string SharedMemoryName;
236 std::tie(args&: RemoteAddr, args&: SharedMemoryName) = std::move(*Result);
237
238 void *LocalAddr = nullptr;
239
240#if defined(LLVM_ON_UNIX)
241
242 int SharedMemoryFile = shm_open(name: SharedMemoryName.c_str(), O_RDWR, mode: 0700);
243 if (SharedMemoryFile < 0) {
244 return OnReserved(errorCodeToError(EC: errnoAsErrorCode()));
245 }
246
247 // this prevents other processes from accessing it by name
248 shm_unlink(name: SharedMemoryName.c_str());
249
250 LocalAddr = mmap(addr: nullptr, len: NumBytes, PROT_READ | PROT_WRITE, MAP_SHARED,
251 fd: SharedMemoryFile, offset: 0);
252 if (LocalAddr == MAP_FAILED) {
253 return OnReserved(errorCodeToError(EC: errnoAsErrorCode()));
254 }
255
256 close(fd: SharedMemoryFile);
257
258#elif defined(_WIN32)
259
260 std::wstring WideSharedMemoryName(SharedMemoryName.begin(),
261 SharedMemoryName.end());
262 HANDLE SharedMemoryFile = OpenFileMappingW(
263 FILE_MAP_ALL_ACCESS, FALSE, WideSharedMemoryName.c_str());
264 if (!SharedMemoryFile)
265 return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
266
267 LocalAddr =
268 MapViewOfFile(SharedMemoryFile, FILE_MAP_ALL_ACCESS, 0, 0, 0);
269 if (!LocalAddr) {
270 CloseHandle(SharedMemoryFile);
271 return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
272 }
273
274 CloseHandle(SharedMemoryFile);
275
276#endif
277 {
278 std::lock_guard<std::mutex> Lock(Mutex);
279 Reservations.insert(x: {RemoteAddr, {.LocalAddr: LocalAddr, .Size: NumBytes}});
280 }
281
282 OnReserved(ExecutorAddrRange(RemoteAddr, NumBytes));
283 },
284 Args: SAs.Instance, Args: static_cast<uint64_t>(NumBytes));
285
286#else
287 OnReserved(make_error<StringError>(
288 "SharedMemoryMapper is not supported on this platform yet",
289 inconvertibleErrorCode()));
290#endif
291}
292
293char *SharedMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
294 auto R = Reservations.upper_bound(x: Addr);
295 assert(R != Reservations.begin() && "Attempt to prepare unreserved range");
296 R--;
297
298 ExecutorAddrDiff Offset = Addr - R->first;
299
300 return static_cast<char *>(R->second.LocalAddr) + Offset;
301}
302
303void SharedMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
304 OnInitializedFunction OnInitialized) {
305 auto Reservation = Reservations.upper_bound(x: AI.MappingBase);
306 assert(Reservation != Reservations.begin() && "Attempt to initialize unreserved range");
307 Reservation--;
308
309 auto AllocationOffset = AI.MappingBase - Reservation->first;
310
311 tpctypes::SharedMemoryFinalizeRequest FR;
312
313 AI.Actions.swap(x&: FR.Actions);
314
315 FR.Segments.reserve(n: AI.Segments.size());
316
317 for (auto Segment : AI.Segments) {
318 char *Base = static_cast<char *>(Reservation->second.LocalAddr) +
319 AllocationOffset + Segment.Offset;
320 std::memset(s: Base + Segment.ContentSize, c: 0, n: Segment.ZeroFillSize);
321
322 tpctypes::SharedMemorySegFinalizeRequest SegReq;
323 SegReq.RAG = {Segment.AG.getMemProt(),
324 Segment.AG.getMemLifetime() == MemLifetime::Finalize};
325 SegReq.Addr = AI.MappingBase + Segment.Offset;
326 SegReq.Size = Segment.ContentSize + Segment.ZeroFillSize;
327
328 FR.Segments.push_back(x: SegReq);
329 }
330
331 EPC.callSPSWrapperAsync<
332 rt::SPSExecutorSharedMemoryMapperServiceInitializeSignature>(
333 WrapperFnAddr: SAs.Initialize,
334 SendResult: [OnInitialized = std::move(OnInitialized)](
335 Error SerializationErr, Expected<ExecutorAddr> Result) mutable {
336 if (SerializationErr) {
337 cantFail(Err: Result.takeError());
338 return OnInitialized(std::move(SerializationErr));
339 }
340
341 OnInitialized(std::move(Result));
342 },
343 Args: SAs.Instance, Args: Reservation->first, Args: std::move(FR));
344}
345
346void SharedMemoryMapper::deinitialize(
347 ArrayRef<ExecutorAddr> Allocations,
348 MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
349 EPC.callSPSWrapperAsync<
350 rt::SPSExecutorSharedMemoryMapperServiceDeinitializeSignature>(
351 WrapperFnAddr: SAs.Deinitialize,
352 SendResult: [OnDeinitialized = std::move(OnDeinitialized)](Error SerializationErr,
353 Error Result) mutable {
354 if (SerializationErr) {
355 cantFail(Err: std::move(Result));
356 return OnDeinitialized(std::move(SerializationErr));
357 }
358
359 OnDeinitialized(std::move(Result));
360 },
361 Args: SAs.Instance, Args: Allocations);
362}
363
364void SharedMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
365 OnReleasedFunction OnReleased) {
366#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
367 Error Err = Error::success();
368
369 {
370 std::lock_guard<std::mutex> Lock(Mutex);
371
372 for (auto Base : Bases) {
373
374#if defined(LLVM_ON_UNIX)
375
376 if (munmap(addr: Reservations[Base].LocalAddr, len: Reservations[Base].Size) != 0)
377 Err = joinErrors(E1: std::move(Err), E2: errorCodeToError(EC: errnoAsErrorCode()));
378
379#elif defined(_WIN32)
380
381 if (!UnmapViewOfFile(Reservations[Base].LocalAddr))
382 Err = joinErrors(std::move(Err),
383 errorCodeToError(mapWindowsError(GetLastError())));
384
385#endif
386
387 Reservations.erase(x: Base);
388 }
389 }
390
391 EPC.callSPSWrapperAsync<
392 rt::SPSExecutorSharedMemoryMapperServiceReleaseSignature>(
393 WrapperFnAddr: SAs.Release,
394 SendResult: [OnReleased = std::move(OnReleased),
395 Err = std::move(Err)](Error SerializationErr, Error Result) mutable {
396 if (SerializationErr) {
397 cantFail(Err: std::move(Result));
398 return OnReleased(
399 joinErrors(E1: std::move(Err), E2: std::move(SerializationErr)));
400 }
401
402 return OnReleased(joinErrors(E1: std::move(Err), E2: std::move(Result)));
403 },
404 Args: SAs.Instance, Args: Bases);
405#else
406 OnReleased(make_error<StringError>(
407 "SharedMemoryMapper is not supported on this platform yet",
408 inconvertibleErrorCode()));
409#endif
410}
411
412SharedMemoryMapper::~SharedMemoryMapper() {
413 std::lock_guard<std::mutex> Lock(Mutex);
414 for (const auto &R : Reservations) {
415
416#if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
417
418 munmap(addr: R.second.LocalAddr, len: R.second.Size);
419
420#elif defined(_WIN32)
421
422 UnmapViewOfFile(R.second.LocalAddr);
423
424#else
425
426 (void)R;
427
428#endif
429 }
430}
431
432} // namespace orc
433
434} // namespace llvm
435

source code of llvm/lib/ExecutionEngine/Orc/MemoryMapper.cpp