1//===- MappedBlockStream.cpp - Reads stream data from an MSF file ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
10#include "llvm/ADT/ArrayRef.h"
11#include "llvm/DebugInfo/MSF/MSFCommon.h"
12#include "llvm/Support/BinaryStreamWriter.h"
13#include "llvm/Support/Error.h"
14#include "llvm/Support/MathExtras.h"
15#include <algorithm>
16#include <cassert>
17#include <cstdint>
18#include <cstring>
19#include <utility>
20#include <vector>
21
22using namespace llvm;
23using namespace llvm::msf;
24
25namespace {
26
27template <typename Base> class MappedBlockStreamImpl : public Base {
28public:
29 template <typename... Args>
30 MappedBlockStreamImpl(Args &&... Params)
31 : Base(std::forward<Args>(Params)...) {}
32};
33
34} // end anonymous namespace
35
36using Interval = std::pair<uint64_t, uint64_t>;
37
38static Interval intersect(const Interval &I1, const Interval &I2) {
39 return std::make_pair(x: std::max(a: I1.first, b: I2.first),
40 y: std::min(a: I1.second, b: I2.second));
41}
42
43MappedBlockStream::MappedBlockStream(uint32_t BlockSize,
44 const MSFStreamLayout &Layout,
45 BinaryStreamRef MsfData,
46 BumpPtrAllocator &Allocator)
47 : BlockSize(BlockSize), StreamLayout(Layout), MsfData(MsfData),
48 Allocator(Allocator) {}
49
50std::unique_ptr<MappedBlockStream> MappedBlockStream::createStream(
51 uint32_t BlockSize, const MSFStreamLayout &Layout, BinaryStreamRef MsfData,
52 BumpPtrAllocator &Allocator) {
53 return std::make_unique<MappedBlockStreamImpl<MappedBlockStream>>(
54 args&: BlockSize, args: Layout, args&: MsfData, args&: Allocator);
55}
56
57std::unique_ptr<MappedBlockStream> MappedBlockStream::createIndexedStream(
58 const MSFLayout &Layout, BinaryStreamRef MsfData, uint32_t StreamIndex,
59 BumpPtrAllocator &Allocator) {
60 assert(StreamIndex < Layout.StreamMap.size() && "Invalid stream index");
61 MSFStreamLayout SL;
62 SL.Blocks = Layout.StreamMap[StreamIndex];
63 SL.Length = Layout.StreamSizes[StreamIndex];
64 return std::make_unique<MappedBlockStreamImpl<MappedBlockStream>>(
65 args: Layout.SB->BlockSize, args&: SL, args&: MsfData, args&: Allocator);
66}
67
68std::unique_ptr<MappedBlockStream>
69MappedBlockStream::createDirectoryStream(const MSFLayout &Layout,
70 BinaryStreamRef MsfData,
71 BumpPtrAllocator &Allocator) {
72 MSFStreamLayout SL;
73 SL.Blocks = Layout.DirectoryBlocks;
74 SL.Length = Layout.SB->NumDirectoryBytes;
75 return createStream(BlockSize: Layout.SB->BlockSize, Layout: SL, MsfData, Allocator);
76}
77
78std::unique_ptr<MappedBlockStream>
79MappedBlockStream::createFpmStream(const MSFLayout &Layout,
80 BinaryStreamRef MsfData,
81 BumpPtrAllocator &Allocator) {
82 MSFStreamLayout SL(getFpmStreamLayout(Msf: Layout));
83 return createStream(BlockSize: Layout.SB->BlockSize, Layout: SL, MsfData, Allocator);
84}
85
86Error MappedBlockStream::readBytes(uint64_t Offset, uint64_t Size,
87 ArrayRef<uint8_t> &Buffer) {
88 // Make sure we aren't trying to read beyond the end of the stream.
89 if (auto EC = checkOffsetForRead(Offset, DataSize: Size))
90 return EC;
91
92 if (tryReadContiguously(Offset, Size, Buffer))
93 return Error::success();
94
95 auto CacheIter = CacheMap.find(Val: Offset);
96 if (CacheIter != CacheMap.end()) {
97 // Try to find an alloc that was large enough for this request.
98 for (auto &Entry : CacheIter->second) {
99 if (Entry.size() >= Size) {
100 Buffer = Entry.slice(N: 0, M: Size);
101 return Error::success();
102 }
103 }
104 }
105
106 // We couldn't find a buffer that started at the correct offset (the most
107 // common scenario). Try to see if there is a buffer that starts at some
108 // other offset but overlaps the desired range.
109 for (auto &CacheItem : CacheMap) {
110 Interval RequestExtent = std::make_pair(x&: Offset, y: Offset + Size);
111
112 // We already checked this one on the fast path above.
113 if (CacheItem.first == Offset)
114 continue;
115 // If the initial extent of the cached item is beyond the ending extent
116 // of the request, there is no overlap.
117 if (CacheItem.first >= Offset + Size)
118 continue;
119
120 // We really only have to check the last item in the list, since we append
121 // in order of increasing length.
122 if (CacheItem.second.empty())
123 continue;
124
125 auto CachedAlloc = CacheItem.second.back();
126 // If the initial extent of the request is beyond the ending extent of
127 // the cached item, there is no overlap.
128 Interval CachedExtent =
129 std::make_pair(x&: CacheItem.first, y: CacheItem.first + CachedAlloc.size());
130 if (RequestExtent.first >= CachedExtent.first + CachedExtent.second)
131 continue;
132
133 Interval Intersection = intersect(I1: CachedExtent, I2: RequestExtent);
134 // Only use this if the entire request extent is contained in the cached
135 // extent.
136 if (Intersection != RequestExtent)
137 continue;
138
139 uint64_t CacheRangeOffset =
140 AbsoluteDifference(X: CachedExtent.first, Y: Intersection.first);
141 Buffer = CachedAlloc.slice(N: CacheRangeOffset, M: Size);
142 return Error::success();
143 }
144
145 // Otherwise allocate a large enough buffer in the pool, memcpy the data
146 // into it, and return an ArrayRef to that. Do not touch existing pool
147 // allocations, as existing clients may be holding a pointer which must
148 // not be invalidated.
149 uint8_t *WriteBuffer = static_cast<uint8_t *>(Allocator.Allocate(Size, Alignment: 8));
150 if (auto EC = readBytes(Offset, Buffer: MutableArrayRef<uint8_t>(WriteBuffer, Size)))
151 return EC;
152
153 if (CacheIter != CacheMap.end()) {
154 CacheIter->second.emplace_back(args&: WriteBuffer, args&: Size);
155 } else {
156 std::vector<CacheEntry> List;
157 List.emplace_back(args&: WriteBuffer, args&: Size);
158 CacheMap.insert(KV: std::make_pair(x&: Offset, y&: List));
159 }
160 Buffer = ArrayRef<uint8_t>(WriteBuffer, Size);
161 return Error::success();
162}
163
164Error MappedBlockStream::readLongestContiguousChunk(uint64_t Offset,
165 ArrayRef<uint8_t> &Buffer) {
166 // Make sure we aren't trying to read beyond the end of the stream.
167 if (auto EC = checkOffsetForRead(Offset, DataSize: 1))
168 return EC;
169
170 uint64_t First = Offset / BlockSize;
171 uint64_t Last = First;
172
173 while (Last < getNumBlocks() - 1) {
174 if (StreamLayout.Blocks[Last] != StreamLayout.Blocks[Last + 1] - 1)
175 break;
176 ++Last;
177 }
178
179 uint64_t OffsetInFirstBlock = Offset % BlockSize;
180 uint64_t BytesFromFirstBlock = BlockSize - OffsetInFirstBlock;
181 uint64_t BlockSpan = Last - First + 1;
182 uint64_t ByteSpan = BytesFromFirstBlock + (BlockSpan - 1) * BlockSize;
183
184 ArrayRef<uint8_t> BlockData;
185 uint64_t MsfOffset = blockToOffset(BlockNumber: StreamLayout.Blocks[First], BlockSize);
186 if (auto EC = MsfData.readBytes(Offset: MsfOffset, Size: BlockSize, Buffer&: BlockData))
187 return EC;
188
189 BlockData = BlockData.drop_front(N: OffsetInFirstBlock);
190 Buffer = ArrayRef<uint8_t>(BlockData.data(), ByteSpan);
191 return Error::success();
192}
193
194uint64_t MappedBlockStream::getLength() { return StreamLayout.Length; }
195
196bool MappedBlockStream::tryReadContiguously(uint64_t Offset, uint64_t Size,
197 ArrayRef<uint8_t> &Buffer) {
198 if (Size == 0) {
199 Buffer = ArrayRef<uint8_t>();
200 return true;
201 }
202 // Attempt to fulfill the request with a reference directly into the stream.
203 // This can work even if the request crosses a block boundary, provided that
204 // all subsequent blocks are contiguous. For example, a 10k read with a 4k
205 // block size can be filled with a reference if, from the starting offset,
206 // 3 blocks in a row are contiguous.
207 uint64_t BlockNum = Offset / BlockSize;
208 uint64_t OffsetInBlock = Offset % BlockSize;
209 uint64_t BytesFromFirstBlock = std::min(a: Size, b: BlockSize - OffsetInBlock);
210 uint64_t NumAdditionalBlocks =
211 alignTo(Value: Size - BytesFromFirstBlock, Align: BlockSize) / BlockSize;
212
213 uint64_t RequiredContiguousBlocks = NumAdditionalBlocks + 1;
214 uint64_t E = StreamLayout.Blocks[BlockNum];
215 for (uint64_t I = 0; I < RequiredContiguousBlocks; ++I, ++E) {
216 if (StreamLayout.Blocks[I + BlockNum] != E)
217 return false;
218 }
219
220 // Read out the entire block where the requested offset starts. Then drop
221 // bytes from the beginning so that the actual starting byte lines up with
222 // the requested starting byte. Then, since we know this is a contiguous
223 // cross-block span, explicitly resize the ArrayRef to cover the entire
224 // request length.
225 ArrayRef<uint8_t> BlockData;
226 uint64_t FirstBlockAddr = StreamLayout.Blocks[BlockNum];
227 uint64_t MsfOffset = blockToOffset(BlockNumber: FirstBlockAddr, BlockSize);
228 if (auto EC = MsfData.readBytes(Offset: MsfOffset, Size: BlockSize, Buffer&: BlockData)) {
229 consumeError(Err: std::move(EC));
230 return false;
231 }
232 BlockData = BlockData.drop_front(N: OffsetInBlock);
233 Buffer = ArrayRef<uint8_t>(BlockData.data(), Size);
234 return true;
235}
236
237Error MappedBlockStream::readBytes(uint64_t Offset,
238 MutableArrayRef<uint8_t> Buffer) {
239 uint64_t BlockNum = Offset / BlockSize;
240 uint64_t OffsetInBlock = Offset % BlockSize;
241
242 // Make sure we aren't trying to read beyond the end of the stream.
243 if (auto EC = checkOffsetForRead(Offset, DataSize: Buffer.size()))
244 return EC;
245
246 uint64_t BytesLeft = Buffer.size();
247 uint64_t BytesWritten = 0;
248 uint8_t *WriteBuffer = Buffer.data();
249 while (BytesLeft > 0) {
250 uint64_t StreamBlockAddr = StreamLayout.Blocks[BlockNum];
251
252 ArrayRef<uint8_t> BlockData;
253 uint64_t Offset = blockToOffset(BlockNumber: StreamBlockAddr, BlockSize);
254 if (auto EC = MsfData.readBytes(Offset, Size: BlockSize, Buffer&: BlockData))
255 return EC;
256
257 const uint8_t *ChunkStart = BlockData.data() + OffsetInBlock;
258 uint64_t BytesInChunk = std::min(a: BytesLeft, b: BlockSize - OffsetInBlock);
259 ::memcpy(dest: WriteBuffer + BytesWritten, src: ChunkStart, n: BytesInChunk);
260
261 BytesWritten += BytesInChunk;
262 BytesLeft -= BytesInChunk;
263 ++BlockNum;
264 OffsetInBlock = 0;
265 }
266
267 return Error::success();
268}
269
270void MappedBlockStream::invalidateCache() { CacheMap.shrink_and_clear(); }
271
272void MappedBlockStream::fixCacheAfterWrite(uint64_t Offset,
273 ArrayRef<uint8_t> Data) const {
274 // If this write overlapped a read which previously came from the pool,
275 // someone may still be holding a pointer to that alloc which is now invalid.
276 // Compute the overlapping range and update the cache entry, so any
277 // outstanding buffers are automatically updated.
278 for (const auto &MapEntry : CacheMap) {
279 // If the end of the written extent precedes the beginning of the cached
280 // extent, ignore this map entry.
281 if (Offset + Data.size() < MapEntry.first)
282 continue;
283 for (const auto &Alloc : MapEntry.second) {
284 // If the end of the cached extent precedes the beginning of the written
285 // extent, ignore this alloc.
286 if (MapEntry.first + Alloc.size() < Offset)
287 continue;
288
289 // If we get here, they are guaranteed to overlap.
290 Interval WriteInterval = std::make_pair(x&: Offset, y: Offset + Data.size());
291 Interval CachedInterval =
292 std::make_pair(x: MapEntry.first, y: MapEntry.first + Alloc.size());
293 // If they overlap, we need to write the new data into the overlapping
294 // range.
295 auto Intersection = intersect(I1: WriteInterval, I2: CachedInterval);
296 assert(Intersection.first <= Intersection.second);
297
298 uint64_t Length = Intersection.second - Intersection.first;
299 uint64_t SrcOffset =
300 AbsoluteDifference(X: WriteInterval.first, Y: Intersection.first);
301 uint64_t DestOffset =
302 AbsoluteDifference(X: CachedInterval.first, Y: Intersection.first);
303 ::memcpy(dest: Alloc.data() + DestOffset, src: Data.data() + SrcOffset, n: Length);
304 }
305 }
306}
307
308WritableMappedBlockStream::WritableMappedBlockStream(
309 uint32_t BlockSize, const MSFStreamLayout &Layout,
310 WritableBinaryStreamRef MsfData, BumpPtrAllocator &Allocator)
311 : ReadInterface(BlockSize, Layout, MsfData, Allocator),
312 WriteInterface(MsfData) {}
313
314std::unique_ptr<WritableMappedBlockStream>
315WritableMappedBlockStream::createStream(uint32_t BlockSize,
316 const MSFStreamLayout &Layout,
317 WritableBinaryStreamRef MsfData,
318 BumpPtrAllocator &Allocator) {
319 return std::make_unique<MappedBlockStreamImpl<WritableMappedBlockStream>>(
320 args&: BlockSize, args: Layout, args&: MsfData, args&: Allocator);
321}
322
323std::unique_ptr<WritableMappedBlockStream>
324WritableMappedBlockStream::createIndexedStream(const MSFLayout &Layout,
325 WritableBinaryStreamRef MsfData,
326 uint32_t StreamIndex,
327 BumpPtrAllocator &Allocator) {
328 assert(StreamIndex < Layout.StreamMap.size() && "Invalid stream index");
329 MSFStreamLayout SL;
330 SL.Blocks = Layout.StreamMap[StreamIndex];
331 SL.Length = Layout.StreamSizes[StreamIndex];
332 return createStream(BlockSize: Layout.SB->BlockSize, Layout: SL, MsfData, Allocator);
333}
334
335std::unique_ptr<WritableMappedBlockStream>
336WritableMappedBlockStream::createDirectoryStream(
337 const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
338 BumpPtrAllocator &Allocator) {
339 MSFStreamLayout SL;
340 SL.Blocks = Layout.DirectoryBlocks;
341 SL.Length = Layout.SB->NumDirectoryBytes;
342 return createStream(BlockSize: Layout.SB->BlockSize, Layout: SL, MsfData, Allocator);
343}
344
345std::unique_ptr<WritableMappedBlockStream>
346WritableMappedBlockStream::createFpmStream(const MSFLayout &Layout,
347 WritableBinaryStreamRef MsfData,
348 BumpPtrAllocator &Allocator,
349 bool AltFpm) {
350 // We only want to give the user a stream containing the bytes of the FPM that
351 // are actually valid, but we want to initialize all of the bytes, even those
352 // that come from reserved FPM blocks where the entire block is unused. To do
353 // this, we first create the full layout, which gives us a stream with all
354 // bytes and all blocks, and initialize everything to 0xFF (all blocks in the
355 // file are unused). Then we create the minimal layout (which contains only a
356 // subset of the bytes previously initialized), and return that to the user.
357 MSFStreamLayout MinLayout(getFpmStreamLayout(Msf: Layout, IncludeUnusedFpmData: false, AltFpm));
358
359 MSFStreamLayout FullLayout(getFpmStreamLayout(Msf: Layout, IncludeUnusedFpmData: true, AltFpm));
360 auto Result =
361 createStream(BlockSize: Layout.SB->BlockSize, Layout: FullLayout, MsfData, Allocator);
362 if (!Result)
363 return Result;
364 std::vector<uint8_t> InitData(Layout.SB->BlockSize, 0xFF);
365 BinaryStreamWriter Initializer(*Result);
366 while (Initializer.bytesRemaining() > 0)
367 cantFail(Err: Initializer.writeBytes(Buffer: InitData));
368 return createStream(BlockSize: Layout.SB->BlockSize, Layout: MinLayout, MsfData, Allocator);
369}
370
371Error WritableMappedBlockStream::readBytes(uint64_t Offset, uint64_t Size,
372 ArrayRef<uint8_t> &Buffer) {
373 return ReadInterface.readBytes(Offset, Size, Buffer);
374}
375
376Error WritableMappedBlockStream::readLongestContiguousChunk(
377 uint64_t Offset, ArrayRef<uint8_t> &Buffer) {
378 return ReadInterface.readLongestContiguousChunk(Offset, Buffer);
379}
380
381uint64_t WritableMappedBlockStream::getLength() {
382 return ReadInterface.getLength();
383}
384
385Error WritableMappedBlockStream::writeBytes(uint64_t Offset,
386 ArrayRef<uint8_t> Buffer) {
387 // Make sure we aren't trying to write beyond the end of the stream.
388 if (auto EC = checkOffsetForWrite(Offset, DataSize: Buffer.size()))
389 return EC;
390
391 uint64_t BlockNum = Offset / getBlockSize();
392 uint64_t OffsetInBlock = Offset % getBlockSize();
393
394 uint64_t BytesLeft = Buffer.size();
395 uint64_t BytesWritten = 0;
396 while (BytesLeft > 0) {
397 uint64_t StreamBlockAddr = getStreamLayout().Blocks[BlockNum];
398 uint64_t BytesToWriteInChunk =
399 std::min(a: BytesLeft, b: getBlockSize() - OffsetInBlock);
400
401 const uint8_t *Chunk = Buffer.data() + BytesWritten;
402 ArrayRef<uint8_t> ChunkData(Chunk, BytesToWriteInChunk);
403 uint64_t MsfOffset = blockToOffset(BlockNumber: StreamBlockAddr, BlockSize: getBlockSize());
404 MsfOffset += OffsetInBlock;
405 if (auto EC = WriteInterface.writeBytes(Offset: MsfOffset, Data: ChunkData))
406 return EC;
407
408 BytesLeft -= BytesToWriteInChunk;
409 BytesWritten += BytesToWriteInChunk;
410 ++BlockNum;
411 OffsetInBlock = 0;
412 }
413
414 ReadInterface.fixCacheAfterWrite(Offset, Data: Buffer);
415
416 return Error::success();
417}
418
419Error WritableMappedBlockStream::commit() { return WriteInterface.commit(); }
420

source code of llvm/lib/DebugInfo/MSF/MappedBlockStream.cpp