1//===- Delta.cpp - Delta Debugging Algorithm Implementation ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation for the Delta Debugging Algorithm:
10// it splits a given set of Targets (i.e. Functions, Instructions, BBs, etc.)
11// into chunks and tries to reduce the number chunks that are interesting.
12//
13//===----------------------------------------------------------------------===//
14
15#include "Delta.h"
16#include "ReducerWorkItem.h"
17#include "TestRunner.h"
18#include "Utils.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/Analysis/ModuleSummaryAnalysis.h"
21#include "llvm/Analysis/ProfileSummaryInfo.h"
22#include "llvm/Bitcode/BitcodeReader.h"
23#include "llvm/Bitcode/BitcodeWriter.h"
24#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/IR/Module.h"
26#include "llvm/IR/Verifier.h"
27#include "llvm/MC/TargetRegistry.h"
28#include "llvm/Support/CommandLine.h"
29#include "llvm/Support/MemoryBufferRef.h"
30#include "llvm/Support/ThreadPool.h"
31#include <fstream>
32
33using namespace llvm;
34
35extern cl::OptionCategory LLVMReduceOptions;
36
37static cl::opt<bool> AbortOnInvalidReduction(
38 "abort-on-invalid-reduction",
39 cl::desc("Abort if any reduction results in invalid IR"),
40 cl::cat(LLVMReduceOptions));
41
42static cl::opt<unsigned int> StartingGranularityLevel(
43 "starting-granularity-level",
44 cl::desc("Number of times to divide chunks prior to first test"),
45 cl::cat(LLVMReduceOptions));
46
47#ifdef LLVM_ENABLE_THREADS
48static cl::opt<unsigned> NumJobs(
49 "j",
50 cl::desc("Maximum number of threads to use to process chunks. Set to 1 to "
51 "disable parallelism."),
52 cl::init(Val: 1), cl::cat(LLVMReduceOptions));
53#else
54unsigned NumJobs = 1;
55#endif
56
57/// Splits Chunks in half and prints them.
58/// If unable to split (when chunk size is 1) returns false.
59static bool increaseGranularity(std::vector<Chunk> &Chunks) {
60 if (Verbose)
61 errs() << "Increasing granularity...";
62 std::vector<Chunk> NewChunks;
63 bool SplitAny = false;
64
65 for (Chunk C : Chunks) {
66 if (C.End - C.Begin == 0)
67 NewChunks.push_back(x: C);
68 else {
69 int Half = (C.Begin + C.End) / 2;
70 NewChunks.push_back(x: {.Begin: C.Begin, .End: Half});
71 NewChunks.push_back(x: {.Begin: Half + 1, .End: C.End});
72 SplitAny = true;
73 }
74 }
75 if (SplitAny) {
76 Chunks = NewChunks;
77 if (Verbose) {
78 errs() << "Success! " << NewChunks.size() << " New Chunks:\n";
79 for (auto C : Chunks) {
80 errs() << '\t';
81 C.print();
82 errs() << '\n';
83 }
84 }
85 }
86 return SplitAny;
87}
88
89// Check if \p ChunkToCheckForUninterestingness is interesting. Returns the
90// modified module if the chunk resulted in a reduction.
91static std::unique_ptr<ReducerWorkItem>
92CheckChunk(const Chunk ChunkToCheckForUninterestingness,
93 std::unique_ptr<ReducerWorkItem> Clone, const TestRunner &Test,
94 ReductionFunc ExtractChunksFromModule,
95 const DenseSet<Chunk> &UninterestingChunks,
96 const std::vector<Chunk> &ChunksStillConsideredInteresting) {
97 // Take all of ChunksStillConsideredInteresting chunks, except those we've
98 // already deemed uninteresting (UninterestingChunks) but didn't remove
99 // from ChunksStillConsideredInteresting yet, and additionally ignore
100 // ChunkToCheckForUninterestingness chunk.
101 std::vector<Chunk> CurrentChunks;
102 CurrentChunks.reserve(n: ChunksStillConsideredInteresting.size() -
103 UninterestingChunks.size() - 1);
104 copy_if(Range: ChunksStillConsideredInteresting, Out: std::back_inserter(x&: CurrentChunks),
105 P: [&](const Chunk &C) {
106 return C != ChunkToCheckForUninterestingness &&
107 !UninterestingChunks.count(V: C);
108 });
109
110 // Generate Module with only Targets inside Current Chunks
111 Oracle O(CurrentChunks);
112 ExtractChunksFromModule(O, *Clone);
113
114 // Some reductions may result in invalid IR. Skip such reductions.
115 if (Clone->verify(OS: &errs())) {
116 if (AbortOnInvalidReduction) {
117 errs() << "Invalid reduction, aborting.\n";
118 Clone->print(ROS&: errs());
119 exit(status: 1);
120 }
121 if (Verbose) {
122 errs() << " **** WARNING | reduction resulted in invalid module, "
123 "skipping\n";
124 }
125 return nullptr;
126 }
127
128 if (Verbose) {
129 errs() << "Ignoring: ";
130 ChunkToCheckForUninterestingness.print();
131 for (const Chunk &C : UninterestingChunks)
132 C.print();
133 errs() << "\n";
134 }
135
136 if (!Clone->isReduced(Test)) {
137 // Program became non-reduced, so this chunk appears to be interesting.
138 if (Verbose)
139 errs() << "\n";
140 return nullptr;
141 }
142 return Clone;
143}
144
145static SmallString<0> ProcessChunkFromSerializedBitcode(
146 const Chunk ChunkToCheckForUninterestingness, const TestRunner &Test,
147 ReductionFunc ExtractChunksFromModule,
148 const DenseSet<Chunk> &UninterestingChunks,
149 ArrayRef<Chunk> ChunksStillConsideredInteresting, StringRef OriginalBC,
150 std::atomic<bool> &AnyReduced) {
151 LLVMContext Ctx;
152 auto CloneMMM = std::make_unique<ReducerWorkItem>();
153 MemoryBufferRef Data(OriginalBC, "<bc file>");
154 CloneMMM->readBitcode(Data, Ctx, ToolName: Test.getToolName());
155
156 SmallString<0> Result;
157 if (std::unique_ptr<ReducerWorkItem> ChunkResult =
158 CheckChunk(ChunkToCheckForUninterestingness, Clone: std::move(CloneMMM),
159 Test, ExtractChunksFromModule, UninterestingChunks,
160 ChunksStillConsideredInteresting)) {
161 raw_svector_ostream BCOS(Result);
162 ChunkResult->writeBitcode(OutStream&: BCOS);
163 // Communicate that the task reduced a chunk.
164 AnyReduced = true;
165 }
166 return Result;
167}
168
169using SharedTaskQueue = std::deque<std::shared_future<SmallString<0>>>;
170
171/// Runs the Delta Debugging algorithm, splits the code into chunks and
172/// reduces the amount of chunks that are considered interesting by the
173/// given test. The number of chunks is determined by a preliminary run of the
174/// reduction pass where no change must be made to the module.
175void llvm::runDeltaPass(TestRunner &Test, ReductionFunc ExtractChunksFromModule,
176 StringRef Message) {
177 assert(!Test.getProgram().verify(&errs()) &&
178 "input module is broken before making changes");
179 errs() << "*** " << Message << "...\n";
180
181 int Targets;
182 {
183 // Count the number of chunks by counting the number of calls to
184 // Oracle::shouldKeep() but always returning true so no changes are
185 // made.
186 std::vector<Chunk> AllChunks = {{.Begin: 0, INT_MAX}};
187 Oracle Counter(AllChunks);
188 ExtractChunksFromModule(Counter, Test.getProgram());
189 Targets = Counter.count();
190
191 assert(!Test.getProgram().verify(&errs()) &&
192 "input module is broken after counting chunks");
193 assert(Test.getProgram().isReduced(Test) &&
194 "input module no longer interesting after counting chunks");
195
196#ifndef NDEBUG
197 // Make sure that the number of chunks does not change as we reduce.
198 std::vector<Chunk> NoChunks = {{.Begin: 0, INT_MAX}};
199 Oracle NoChunksCounter(NoChunks);
200 std::unique_ptr<ReducerWorkItem> Clone =
201 Test.getProgram().clone(TM: Test.getTargetMachine());
202 ExtractChunksFromModule(NoChunksCounter, *Clone);
203 assert(Targets == NoChunksCounter.count() &&
204 "number of chunks changes when reducing");
205#endif
206 }
207 if (!Targets) {
208 if (Verbose)
209 errs() << "\nNothing to reduce\n";
210 errs() << "----------------------------\n";
211 return;
212 }
213
214 std::vector<Chunk> ChunksStillConsideredInteresting = {{.Begin: 0, .End: Targets - 1}};
215 std::unique_ptr<ReducerWorkItem> ReducedProgram;
216
217 for (unsigned int Level = 0; Level < StartingGranularityLevel; Level++) {
218 increaseGranularity(Chunks&: ChunksStillConsideredInteresting);
219 }
220
221 std::atomic<bool> AnyReduced;
222 std::unique_ptr<ThreadPoolInterface> ChunkThreadPoolPtr;
223 if (NumJobs > 1)
224 ChunkThreadPoolPtr =
225 std::make_unique<DefaultThreadPool>(args: hardware_concurrency(ThreadCount: NumJobs));
226
227 bool FoundAtLeastOneNewUninterestingChunkWithCurrentGranularity;
228 do {
229 FoundAtLeastOneNewUninterestingChunkWithCurrentGranularity = false;
230
231 DenseSet<Chunk> UninterestingChunks;
232
233 // When running with more than one thread, serialize the original bitcode
234 // to OriginalBC.
235 SmallString<0> OriginalBC;
236 if (NumJobs > 1) {
237 raw_svector_ostream BCOS(OriginalBC);
238 Test.getProgram().writeBitcode(OutStream&: BCOS);
239 }
240
241 SharedTaskQueue TaskQueue;
242 for (auto I = ChunksStillConsideredInteresting.rbegin(),
243 E = ChunksStillConsideredInteresting.rend();
244 I != E; ++I) {
245 std::unique_ptr<ReducerWorkItem> Result = nullptr;
246 unsigned WorkLeft = std::distance(first: I, last: E);
247
248 // Run in parallel mode, if the user requested more than one thread and
249 // there are at least a few chunks to process.
250 if (NumJobs > 1 && WorkLeft > 1) {
251 unsigned NumInitialTasks = std::min(a: WorkLeft, b: unsigned(NumJobs));
252 unsigned NumChunksProcessed = 0;
253
254 ThreadPoolInterface &ChunkThreadPool = *ChunkThreadPoolPtr;
255 assert(TaskQueue.empty());
256
257 AnyReduced = false;
258 // Queue jobs to process NumInitialTasks chunks in parallel using
259 // ChunkThreadPool. When the tasks are added to the pool, parse the
260 // original module from OriginalBC with a fresh LLVMContext object. This
261 // ensures that the cloned module of each task uses an independent
262 // LLVMContext object. If a task reduces the input, serialize the result
263 // back in the corresponding Result element.
264 for (unsigned J = 0; J < NumInitialTasks; ++J) {
265 Chunk ChunkToCheck = *(I + J);
266 TaskQueue.emplace_back(args: ChunkThreadPool.async(
267 F&: ProcessChunkFromSerializedBitcode, ArgList&: ChunkToCheck, ArgList: std::ref(t&: Test),
268 ArgList&: ExtractChunksFromModule, ArgList&: UninterestingChunks,
269 ArgList&: ChunksStillConsideredInteresting, ArgList&: OriginalBC,
270 ArgList: std::ref(t&: AnyReduced)));
271 }
272
273 // Start processing results of the queued tasks. We wait for the first
274 // task in the queue to finish. If it reduced a chunk, we parse the
275 // result and exit the loop.
276 // Otherwise we will try to schedule a new task, if
277 // * no other pending job reduced a chunk and
278 // * we have not reached the end of the chunk.
279 while (!TaskQueue.empty()) {
280 auto &Future = TaskQueue.front();
281 Future.wait();
282
283 NumChunksProcessed++;
284 SmallString<0> Res = Future.get();
285 TaskQueue.pop_front();
286 if (Res.empty()) {
287 unsigned NumScheduledTasks = NumChunksProcessed + TaskQueue.size();
288 if (!AnyReduced && I + NumScheduledTasks != E) {
289 Chunk ChunkToCheck = *(I + NumScheduledTasks);
290 TaskQueue.emplace_back(args: ChunkThreadPool.async(
291 F&: ProcessChunkFromSerializedBitcode, ArgList&: ChunkToCheck,
292 ArgList: std::ref(t&: Test), ArgList&: ExtractChunksFromModule, ArgList&: UninterestingChunks,
293 ArgList&: ChunksStillConsideredInteresting, ArgList&: OriginalBC,
294 ArgList: std::ref(t&: AnyReduced)));
295 }
296 continue;
297 }
298
299 Result = std::make_unique<ReducerWorkItem>();
300 MemoryBufferRef Data(StringRef(Res), "<bc file>");
301 Result->readBitcode(Data, Ctx&: Test.getProgram().M->getContext(),
302 ToolName: Test.getToolName());
303 break;
304 }
305
306 // If we broke out of the loop, we still need to wait for everything to
307 // avoid race access to the chunk set.
308 //
309 // TODO: Create a way to kill remaining items we're ignoring; they could
310 // take a long time.
311 ChunkThreadPoolPtr->wait();
312 TaskQueue.clear();
313
314 // Forward I to the last chunk processed in parallel.
315 I += NumChunksProcessed - 1;
316 } else {
317 Result =
318 CheckChunk(ChunkToCheckForUninterestingness: *I, Clone: Test.getProgram().clone(TM: Test.getTargetMachine()),
319 Test, ExtractChunksFromModule, UninterestingChunks,
320 ChunksStillConsideredInteresting);
321 }
322
323 if (!Result)
324 continue;
325
326 const Chunk ChunkToCheckForUninterestingness = *I;
327 FoundAtLeastOneNewUninterestingChunkWithCurrentGranularity = true;
328 UninterestingChunks.insert(V: ChunkToCheckForUninterestingness);
329 ReducedProgram = std::move(Result);
330 }
331 // Delete uninteresting chunks
332 erase_if(C&: ChunksStillConsideredInteresting,
333 P: [&UninterestingChunks](const Chunk &C) {
334 return UninterestingChunks.count(V: C);
335 });
336 } while (!ChunksStillConsideredInteresting.empty() &&
337 (FoundAtLeastOneNewUninterestingChunkWithCurrentGranularity ||
338 increaseGranularity(Chunks&: ChunksStillConsideredInteresting)));
339
340 // If we reduced the testcase replace it
341 if (ReducedProgram) {
342 Test.setProgram(std::move(ReducedProgram));
343 // FIXME: Report meaningful progress info
344 Test.writeOutput(Message: " **** SUCCESS | Saved new best reduction to ");
345 }
346 if (Verbose)
347 errs() << "Couldn't increase anymore.\n";
348 errs() << "----------------------------\n";
349}
350

source code of llvm/tools/llvm-reduce/deltas/Delta.cpp