1//===- ReducerWorkItem.cpp - Wrapper for Module and MachineFunction -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ReducerWorkItem.h"
10#include "TestRunner.h"
11#include "llvm/Analysis/ModuleSummaryAnalysis.h"
12#include "llvm/Analysis/ProfileSummaryInfo.h"
13#include "llvm/Bitcode/BitcodeReader.h"
14#include "llvm/Bitcode/BitcodeWriter.h"
15#include "llvm/CodeGen/CommandFlags.h"
16#include "llvm/CodeGen/MIRParser/MIRParser.h"
17#include "llvm/CodeGen/MIRPrinter.h"
18#include "llvm/CodeGen/MachineDominators.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineFunction.h"
21#include "llvm/CodeGen/MachineFunctionPass.h"
22#include "llvm/CodeGen/MachineJumpTableInfo.h"
23#include "llvm/CodeGen/MachineModuleInfo.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/PseudoSourceValueManager.h"
26#include "llvm/CodeGen/TargetInstrInfo.h"
27#include "llvm/IR/Constants.h"
28#include "llvm/IR/Instructions.h"
29#include "llvm/IR/ModuleSummaryIndex.h"
30#include "llvm/IR/Operator.h"
31#include "llvm/IR/Verifier.h"
32#include "llvm/IRReader/IRReader.h"
33#include "llvm/MC/TargetRegistry.h"
34#include "llvm/Passes/PassBuilder.h"
35#include "llvm/Support/MemoryBufferRef.h"
36#include "llvm/Support/SourceMgr.h"
37#include "llvm/Support/TargetSelect.h"
38#include "llvm/Support/ToolOutputFile.h"
39#include "llvm/Support/WithColor.h"
40#include "llvm/Target/TargetMachine.h"
41#include "llvm/TargetParser/Host.h"
42#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
43#include "llvm/Transforms/Utils/Cloning.h"
44#include <optional>
45
46using namespace llvm;
47
48ReducerWorkItem::ReducerWorkItem() = default;
49ReducerWorkItem::~ReducerWorkItem() = default;
50
51extern cl::OptionCategory LLVMReduceOptions;
52static cl::opt<std::string> TargetTriple("mtriple",
53 cl::desc("Set the target triple"),
54 cl::cat(LLVMReduceOptions));
55
56static cl::opt<bool> TmpFilesAsBitcode(
57 "write-tmp-files-as-bitcode",
58 cl::desc("Always write temporary files as bitcode instead of textual IR"),
59 cl::init(Val: false), cl::cat(LLVMReduceOptions));
60
61static void cloneFrameInfo(
62 MachineFrameInfo &DstMFI, const MachineFrameInfo &SrcMFI,
63 const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB) {
64 DstMFI.setFrameAddressIsTaken(SrcMFI.isFrameAddressTaken());
65 DstMFI.setReturnAddressIsTaken(SrcMFI.isReturnAddressTaken());
66 DstMFI.setHasStackMap(SrcMFI.hasStackMap());
67 DstMFI.setHasPatchPoint(SrcMFI.hasPatchPoint());
68 DstMFI.setUseLocalStackAllocationBlock(
69 SrcMFI.getUseLocalStackAllocationBlock());
70 DstMFI.setOffsetAdjustment(SrcMFI.getOffsetAdjustment());
71
72 DstMFI.ensureMaxAlignment(Alignment: SrcMFI.getMaxAlign());
73 assert(DstMFI.getMaxAlign() == SrcMFI.getMaxAlign() &&
74 "we need to set exact alignment");
75
76 DstMFI.setAdjustsStack(SrcMFI.adjustsStack());
77 DstMFI.setHasCalls(SrcMFI.hasCalls());
78 DstMFI.setHasOpaqueSPAdjustment(SrcMFI.hasOpaqueSPAdjustment());
79 DstMFI.setHasCopyImplyingStackAdjustment(
80 SrcMFI.hasCopyImplyingStackAdjustment());
81 DstMFI.setHasVAStart(SrcMFI.hasVAStart());
82 DstMFI.setHasMustTailInVarArgFunc(SrcMFI.hasMustTailInVarArgFunc());
83 DstMFI.setHasTailCall(SrcMFI.hasTailCall());
84
85 if (SrcMFI.isMaxCallFrameSizeComputed())
86 DstMFI.setMaxCallFrameSize(SrcMFI.getMaxCallFrameSize());
87
88 DstMFI.setCVBytesOfCalleeSavedRegisters(
89 SrcMFI.getCVBytesOfCalleeSavedRegisters());
90
91 if (MachineBasicBlock *SavePt = SrcMFI.getSavePoint())
92 DstMFI.setSavePoint(Src2DstMBB.find(Val: SavePt)->second);
93 if (MachineBasicBlock *RestorePt = SrcMFI.getRestorePoint())
94 DstMFI.setRestorePoint(Src2DstMBB.find(Val: RestorePt)->second);
95
96
97 auto CopyObjectProperties = [](MachineFrameInfo &DstMFI,
98 const MachineFrameInfo &SrcMFI, int FI) {
99 if (SrcMFI.isStatepointSpillSlotObjectIndex(ObjectIdx: FI))
100 DstMFI.markAsStatepointSpillSlotObjectIndex(ObjectIdx: FI);
101 DstMFI.setObjectSSPLayout(ObjectIdx: FI, Kind: SrcMFI.getObjectSSPLayout(ObjectIdx: FI));
102 DstMFI.setObjectZExt(ObjectIdx: FI, IsZExt: SrcMFI.isObjectZExt(ObjectIdx: FI));
103 DstMFI.setObjectSExt(ObjectIdx: FI, IsSExt: SrcMFI.isObjectSExt(ObjectIdx: FI));
104 };
105
106 for (int i = 0, e = SrcMFI.getNumObjects() - SrcMFI.getNumFixedObjects();
107 i != e; ++i) {
108 int NewFI;
109
110 assert(!SrcMFI.isFixedObjectIndex(i));
111 if (SrcMFI.isVariableSizedObjectIndex(ObjectIdx: i)) {
112 NewFI = DstMFI.CreateVariableSizedObject(Alignment: SrcMFI.getObjectAlign(ObjectIdx: i),
113 Alloca: SrcMFI.getObjectAllocation(ObjectIdx: i));
114 } else {
115 NewFI = DstMFI.CreateStackObject(
116 Size: SrcMFI.getObjectSize(ObjectIdx: i), Alignment: SrcMFI.getObjectAlign(ObjectIdx: i),
117 isSpillSlot: SrcMFI.isSpillSlotObjectIndex(ObjectIdx: i), Alloca: SrcMFI.getObjectAllocation(ObjectIdx: i),
118 ID: SrcMFI.getStackID(ObjectIdx: i));
119 DstMFI.setObjectOffset(ObjectIdx: NewFI, SPOffset: SrcMFI.getObjectOffset(ObjectIdx: i));
120 }
121
122 CopyObjectProperties(DstMFI, SrcMFI, i);
123
124 (void)NewFI;
125 assert(i == NewFI && "expected to keep stable frame index numbering");
126 }
127
128 // Copy the fixed frame objects backwards to preserve frame index numbers,
129 // since CreateFixedObject uses front insertion.
130 for (int i = -1; i >= (int)-SrcMFI.getNumFixedObjects(); --i) {
131 assert(SrcMFI.isFixedObjectIndex(i));
132 int NewFI = DstMFI.CreateFixedObject(
133 Size: SrcMFI.getObjectSize(ObjectIdx: i), SPOffset: SrcMFI.getObjectOffset(ObjectIdx: i),
134 IsImmutable: SrcMFI.isImmutableObjectIndex(ObjectIdx: i), isAliased: SrcMFI.isAliasedObjectIndex(ObjectIdx: i));
135 CopyObjectProperties(DstMFI, SrcMFI, i);
136
137 (void)NewFI;
138 assert(i == NewFI && "expected to keep stable frame index numbering");
139 }
140
141 for (unsigned I = 0, E = SrcMFI.getLocalFrameObjectCount(); I < E; ++I) {
142 auto LocalObject = SrcMFI.getLocalFrameObjectMap(i: I);
143 DstMFI.mapLocalFrameObject(ObjectIndex: LocalObject.first, Offset: LocalObject.second);
144 }
145
146 DstMFI.setCalleeSavedInfo(SrcMFI.getCalleeSavedInfo());
147
148 if (SrcMFI.hasStackProtectorIndex()) {
149 DstMFI.setStackProtectorIndex(SrcMFI.getStackProtectorIndex());
150 }
151
152 // FIXME: Needs test, missing MIR serialization.
153 if (SrcMFI.hasFunctionContextIndex()) {
154 DstMFI.setFunctionContextIndex(SrcMFI.getFunctionContextIndex());
155 }
156}
157
158static void cloneJumpTableInfo(
159 MachineFunction &DstMF, const MachineJumpTableInfo &SrcJTI,
160 const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB) {
161
162 auto *DstJTI = DstMF.getOrCreateJumpTableInfo(JTEntryKind: SrcJTI.getEntryKind());
163
164 std::vector<MachineBasicBlock *> DstBBs;
165
166 for (const MachineJumpTableEntry &Entry : SrcJTI.getJumpTables()) {
167 for (MachineBasicBlock *X : Entry.MBBs)
168 DstBBs.push_back(x: Src2DstMBB.find(Val: X)->second);
169
170 DstJTI->createJumpTableIndex(DestBBs: DstBBs);
171 DstBBs.clear();
172 }
173}
174
175static void cloneMemOperands(MachineInstr &DstMI, MachineInstr &SrcMI,
176 MachineFunction &SrcMF, MachineFunction &DstMF) {
177 // The new MachineMemOperands should be owned by the new function's
178 // Allocator.
179 PseudoSourceValueManager &PSVMgr = DstMF.getPSVManager();
180
181 // We also need to remap the PseudoSourceValues from the new function's
182 // PseudoSourceValueManager.
183 SmallVector<MachineMemOperand *, 2> NewMMOs;
184 for (MachineMemOperand *OldMMO : SrcMI.memoperands()) {
185 MachinePointerInfo NewPtrInfo(OldMMO->getPointerInfo());
186 if (const PseudoSourceValue *PSV =
187 dyn_cast_if_present<const PseudoSourceValue *>(Val&: NewPtrInfo.V)) {
188 switch (PSV->kind()) {
189 case PseudoSourceValue::Stack:
190 NewPtrInfo.V = PSVMgr.getStack();
191 break;
192 case PseudoSourceValue::GOT:
193 NewPtrInfo.V = PSVMgr.getGOT();
194 break;
195 case PseudoSourceValue::JumpTable:
196 NewPtrInfo.V = PSVMgr.getJumpTable();
197 break;
198 case PseudoSourceValue::ConstantPool:
199 NewPtrInfo.V = PSVMgr.getConstantPool();
200 break;
201 case PseudoSourceValue::FixedStack:
202 NewPtrInfo.V = PSVMgr.getFixedStack(
203 FI: cast<FixedStackPseudoSourceValue>(Val: PSV)->getFrameIndex());
204 break;
205 case PseudoSourceValue::GlobalValueCallEntry:
206 NewPtrInfo.V = PSVMgr.getGlobalValueCallEntry(
207 GV: cast<GlobalValuePseudoSourceValue>(Val: PSV)->getValue());
208 break;
209 case PseudoSourceValue::ExternalSymbolCallEntry:
210 NewPtrInfo.V = PSVMgr.getExternalSymbolCallEntry(
211 ES: cast<ExternalSymbolPseudoSourceValue>(Val: PSV)->getSymbol());
212 break;
213 case PseudoSourceValue::TargetCustom:
214 default:
215 // FIXME: We have no generic interface for allocating custom PSVs.
216 report_fatal_error(reason: "Cloning TargetCustom PSV not handled");
217 }
218 }
219
220 MachineMemOperand *NewMMO = DstMF.getMachineMemOperand(
221 PtrInfo: NewPtrInfo, f: OldMMO->getFlags(), MemTy: OldMMO->getMemoryType(),
222 base_alignment: OldMMO->getBaseAlign(), AAInfo: OldMMO->getAAInfo(), Ranges: OldMMO->getRanges(),
223 SSID: OldMMO->getSyncScopeID(), Ordering: OldMMO->getSuccessOrdering(),
224 FailureOrdering: OldMMO->getFailureOrdering());
225 NewMMOs.push_back(Elt: NewMMO);
226 }
227
228 DstMI.setMemRefs(MF&: DstMF, MemRefs: NewMMOs);
229}
230
231static std::unique_ptr<MachineFunction> cloneMF(MachineFunction *SrcMF,
232 MachineModuleInfo &DestMMI) {
233 auto DstMF = std::make_unique<MachineFunction>(
234 args&: SrcMF->getFunction(), args: SrcMF->getTarget(), args: SrcMF->getSubtarget(),
235 args: SrcMF->getFunctionNumber(), args&: DestMMI);
236 DenseMap<MachineBasicBlock *, MachineBasicBlock *> Src2DstMBB;
237
238 auto *SrcMRI = &SrcMF->getRegInfo();
239 auto *DstMRI = &DstMF->getRegInfo();
240
241 // Clone blocks.
242 for (MachineBasicBlock &SrcMBB : *SrcMF) {
243 MachineBasicBlock *DstMBB =
244 DstMF->CreateMachineBasicBlock(BB: SrcMBB.getBasicBlock());
245 Src2DstMBB[&SrcMBB] = DstMBB;
246
247 DstMBB->setCallFrameSize(SrcMBB.getCallFrameSize());
248
249 if (SrcMBB.isIRBlockAddressTaken())
250 DstMBB->setAddressTakenIRBlock(SrcMBB.getAddressTakenIRBlock());
251 if (SrcMBB.isMachineBlockAddressTaken())
252 DstMBB->setMachineBlockAddressTaken();
253
254 // FIXME: This is not serialized
255 if (SrcMBB.hasLabelMustBeEmitted())
256 DstMBB->setLabelMustBeEmitted();
257
258 DstMBB->setAlignment(SrcMBB.getAlignment());
259
260 // FIXME: This is not serialized
261 DstMBB->setMaxBytesForAlignment(SrcMBB.getMaxBytesForAlignment());
262
263 DstMBB->setIsEHPad(SrcMBB.isEHPad());
264 DstMBB->setIsEHScopeEntry(SrcMBB.isEHScopeEntry());
265 DstMBB->setIsEHCatchretTarget(SrcMBB.isEHCatchretTarget());
266 DstMBB->setIsEHFuncletEntry(SrcMBB.isEHFuncletEntry());
267
268 // FIXME: These are not serialized
269 DstMBB->setIsCleanupFuncletEntry(SrcMBB.isCleanupFuncletEntry());
270 DstMBB->setIsBeginSection(SrcMBB.isBeginSection());
271 DstMBB->setIsEndSection(SrcMBB.isEndSection());
272
273 DstMBB->setSectionID(SrcMBB.getSectionID());
274 DstMBB->setIsInlineAsmBrIndirectTarget(
275 SrcMBB.isInlineAsmBrIndirectTarget());
276
277 // FIXME: This is not serialized
278 if (std::optional<uint64_t> Weight = SrcMBB.getIrrLoopHeaderWeight())
279 DstMBB->setIrrLoopHeaderWeight(*Weight);
280 }
281
282 const MachineFrameInfo &SrcMFI = SrcMF->getFrameInfo();
283 MachineFrameInfo &DstMFI = DstMF->getFrameInfo();
284
285 // Copy stack objects and other info
286 cloneFrameInfo(DstMFI, SrcMFI, Src2DstMBB);
287
288 if (MachineJumpTableInfo *SrcJTI = SrcMF->getJumpTableInfo()) {
289 cloneJumpTableInfo(DstMF&: *DstMF, SrcJTI: *SrcJTI, Src2DstMBB);
290 }
291
292 // Remap the debug info frame index references.
293 DstMF->VariableDbgInfos = SrcMF->VariableDbgInfos;
294
295 // Clone virtual registers
296 for (unsigned I = 0, E = SrcMRI->getNumVirtRegs(); I != E; ++I) {
297 Register Reg = Register::index2VirtReg(Index: I);
298 Register NewReg = DstMRI->createIncompleteVirtualRegister(
299 Name: SrcMRI->getVRegName(Reg));
300 assert(NewReg == Reg && "expected to preserve virtreg number");
301
302 DstMRI->setRegClassOrRegBank(Reg: NewReg, RCOrRB: SrcMRI->getRegClassOrRegBank(Reg));
303
304 LLT RegTy = SrcMRI->getType(Reg);
305 if (RegTy.isValid())
306 DstMRI->setType(VReg: NewReg, Ty: RegTy);
307
308 // Copy register allocation hints.
309 const auto &Hints = SrcMRI->getRegAllocationHints(VReg: Reg);
310 for (Register PrefReg : Hints.second)
311 DstMRI->addRegAllocationHint(VReg: NewReg, PrefReg);
312 }
313
314 const TargetSubtargetInfo &STI = DstMF->getSubtarget();
315 const TargetInstrInfo *TII = STI.getInstrInfo();
316 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
317
318 // Link blocks.
319 for (auto &SrcMBB : *SrcMF) {
320 auto *DstMBB = Src2DstMBB[&SrcMBB];
321 DstMF->push_back(MBB: DstMBB);
322
323 for (auto It = SrcMBB.succ_begin(), IterEnd = SrcMBB.succ_end();
324 It != IterEnd; ++It) {
325 auto *SrcSuccMBB = *It;
326 auto *DstSuccMBB = Src2DstMBB[SrcSuccMBB];
327 DstMBB->addSuccessor(Succ: DstSuccMBB, Prob: SrcMBB.getSuccProbability(Succ: It));
328 }
329
330 for (auto &LI : SrcMBB.liveins_dbg())
331 DstMBB->addLiveIn(RegMaskPair: LI);
332
333 // Make sure MRI knows about registers clobbered by unwinder.
334 if (DstMBB->isEHPad()) {
335 if (auto *RegMask = TRI->getCustomEHPadPreservedMask(MF: *DstMF))
336 DstMRI->addPhysRegsUsedFromRegMask(RegMask);
337 }
338 }
339
340 DenseSet<const uint32_t *> ConstRegisterMasks;
341
342 // Track predefined/named regmasks which we ignore.
343 for (const uint32_t *Mask : TRI->getRegMasks())
344 ConstRegisterMasks.insert(V: Mask);
345
346 // Clone instructions.
347 for (auto &SrcMBB : *SrcMF) {
348 auto *DstMBB = Src2DstMBB[&SrcMBB];
349 for (auto &SrcMI : SrcMBB) {
350 const auto &MCID = TII->get(Opcode: SrcMI.getOpcode());
351 auto *DstMI = DstMF->CreateMachineInstr(MCID, DL: SrcMI.getDebugLoc(),
352 /*NoImplicit=*/true);
353 DstMI->setFlags(SrcMI.getFlags());
354 DstMI->setAsmPrinterFlag(SrcMI.getAsmPrinterFlags());
355
356 DstMBB->push_back(MI: DstMI);
357 for (auto &SrcMO : SrcMI.operands()) {
358 MachineOperand DstMO(SrcMO);
359 DstMO.clearParent();
360
361 // Update MBB.
362 if (DstMO.isMBB())
363 DstMO.setMBB(Src2DstMBB[DstMO.getMBB()]);
364 else if (DstMO.isRegMask()) {
365 DstMRI->addPhysRegsUsedFromRegMask(RegMask: DstMO.getRegMask());
366
367 if (!ConstRegisterMasks.count(V: DstMO.getRegMask())) {
368 uint32_t *DstMask = DstMF->allocateRegMask();
369 std::memcpy(dest: DstMask, src: SrcMO.getRegMask(),
370 n: sizeof(*DstMask) *
371 MachineOperand::getRegMaskSize(NumRegs: TRI->getNumRegs()));
372 DstMO.setRegMask(DstMask);
373 }
374 }
375
376 DstMI->addOperand(Op: DstMO);
377 }
378
379 cloneMemOperands(DstMI&: *DstMI, SrcMI, SrcMF&: *SrcMF, DstMF&: *DstMF);
380 }
381 }
382
383 DstMF->setAlignment(SrcMF->getAlignment());
384 DstMF->setExposesReturnsTwice(SrcMF->exposesReturnsTwice());
385 DstMF->setHasInlineAsm(SrcMF->hasInlineAsm());
386 DstMF->setHasWinCFI(SrcMF->hasWinCFI());
387
388 DstMF->getProperties().reset().set(SrcMF->getProperties());
389
390 if (!SrcMF->getFrameInstructions().empty() ||
391 !SrcMF->getLongjmpTargets().empty() ||
392 !SrcMF->getCatchretTargets().empty())
393 report_fatal_error(reason: "cloning not implemented for machine function property");
394
395 DstMF->setCallsEHReturn(SrcMF->callsEHReturn());
396 DstMF->setCallsUnwindInit(SrcMF->callsUnwindInit());
397 DstMF->setHasEHCatchret(SrcMF->hasEHCatchret());
398 DstMF->setHasEHScopes(SrcMF->hasEHScopes());
399 DstMF->setHasEHFunclets(SrcMF->hasEHFunclets());
400 DstMF->setIsOutlined(SrcMF->isOutlined());
401
402 if (!SrcMF->getLandingPads().empty() ||
403 !SrcMF->getCodeViewAnnotations().empty() ||
404 !SrcMF->getTypeInfos().empty() ||
405 !SrcMF->getFilterIds().empty() ||
406 SrcMF->hasAnyWasmLandingPadIndex() ||
407 SrcMF->hasAnyCallSiteLandingPad() ||
408 SrcMF->hasAnyCallSiteLabel() ||
409 !SrcMF->getCallSitesInfo().empty())
410 report_fatal_error(reason: "cloning not implemented for machine function property");
411
412 DstMF->setDebugInstrNumberingCount(SrcMF->DebugInstrNumberingCount);
413
414 if (!DstMF->cloneInfoFrom(OrigMF: *SrcMF, Src2DstMBB))
415 report_fatal_error(reason: "target does not implement MachineFunctionInfo cloning");
416
417 DstMRI->freezeReservedRegs();
418
419 DstMF->verify(p: nullptr, Banner: "", /*AbortOnError=*/true);
420 return DstMF;
421}
422
423static void initializeTargetInfo() {
424 InitializeAllTargets();
425 InitializeAllTargetMCs();
426 InitializeAllAsmPrinters();
427 InitializeAllAsmParsers();
428}
429
430void ReducerWorkItem::print(raw_ostream &ROS, void *p) const {
431 if (MMI) {
432 printMIR(OS&: ROS, M: *M);
433 for (Function &F : *M) {
434 if (auto *MF = MMI->getMachineFunction(F))
435 printMIR(OS&: ROS, MF: *MF);
436 }
437 } else {
438 M->print(OS&: ROS, /*AssemblyAnnotationWriter=*/AAW: nullptr,
439 /*ShouldPreserveUseListOrder=*/true);
440 }
441}
442
443bool ReducerWorkItem::verify(raw_fd_ostream *OS) const {
444 if (verifyModule(M: *M, OS))
445 return true;
446
447 if (!MMI)
448 return false;
449
450 for (const Function &F : getModule()) {
451 if (const MachineFunction *MF = MMI->getMachineFunction(F)) {
452 if (!MF->verify(p: nullptr, Banner: "", /*AbortOnError=*/false))
453 return true;
454 }
455 }
456
457 return false;
458}
459
460bool ReducerWorkItem::isReduced(const TestRunner &Test) const {
461 const bool UseBitcode = Test.inputIsBitcode() || TmpFilesAsBitcode;
462
463 SmallString<128> CurrentFilepath;
464
465 // Write ReducerWorkItem to tmp file
466 int FD;
467 std::error_code EC = sys::fs::createTemporaryFile(
468 Prefix: "llvm-reduce", Suffix: isMIR() ? "mir" : (UseBitcode ? "bc" : "ll"), ResultFD&: FD,
469 ResultPath&: CurrentFilepath,
470 Flags: UseBitcode && !isMIR() ? sys::fs::OF_None : sys::fs::OF_Text);
471 if (EC) {
472 WithColor::error(OS&: errs(), Prefix: Test.getToolName())
473 << "error making unique filename: " << EC.message() << '\n';
474 exit(status: 1);
475 }
476
477 ToolOutputFile Out(CurrentFilepath, FD);
478
479 writeOutput(OS&: Out.os(), EmitBitcode: UseBitcode);
480
481 Out.os().close();
482 if (Out.os().has_error()) {
483 WithColor::error(OS&: errs(), Prefix: Test.getToolName())
484 << "error emitting bitcode to file '" << CurrentFilepath
485 << "': " << Out.os().error().message() << '\n';
486 exit(status: 1);
487 }
488
489 // Current Chunks aren't interesting
490 return Test.run(Filename: CurrentFilepath);
491}
492
493std::unique_ptr<ReducerWorkItem>
494ReducerWorkItem::clone(const TargetMachine *TM) const {
495 auto CloneMMM = std::make_unique<ReducerWorkItem>();
496 if (TM) {
497 // We're assuming the Module IR contents are always unchanged by MIR
498 // reductions, and can share it as a constant.
499 CloneMMM->M = M;
500
501 // MachineModuleInfo contains a lot of other state used during codegen which
502 // we won't be using here, but we should be able to ignore it (although this
503 // is pretty ugly).
504 const LLVMTargetMachine *LLVMTM =
505 static_cast<const LLVMTargetMachine *>(TM);
506 CloneMMM->MMI = std::make_unique<MachineModuleInfo>(args&: LLVMTM);
507
508 for (const Function &F : getModule()) {
509 if (auto *MF = MMI->getMachineFunction(F))
510 CloneMMM->MMI->insertFunction(F, MF: cloneMF(SrcMF: MF, DestMMI&: *CloneMMM->MMI));
511 }
512 } else {
513 CloneMMM->M = CloneModule(M: *M);
514 }
515 return CloneMMM;
516}
517
518/// Try to produce some number that indicates a function is getting smaller /
519/// simpler.
520static uint64_t computeMIRComplexityScoreImpl(const MachineFunction &MF) {
521 uint64_t Score = 0;
522 const MachineFrameInfo &MFI = MF.getFrameInfo();
523
524 // Add for stack objects
525 Score += MFI.getNumObjects();
526
527 // Add in the block count.
528 Score += 2 * MF.size();
529
530 const MachineRegisterInfo &MRI = MF.getRegInfo();
531 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
532 Register Reg = Register::index2VirtReg(Index: I);
533 Score += MRI.getRegAllocationHints(VReg: Reg).second.size();
534 }
535
536 for (const MachineBasicBlock &MBB : MF) {
537 for (const MachineInstr &MI : MBB) {
538 const unsigned Opc = MI.getOpcode();
539
540 // Reductions may want or need to introduce implicit_defs, so don't count
541 // them.
542 // TODO: These probably should count in some way.
543 if (Opc == TargetOpcode::IMPLICIT_DEF ||
544 Opc == TargetOpcode::G_IMPLICIT_DEF)
545 continue;
546
547 // Each instruction adds to the score
548 Score += 4;
549
550 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI ||
551 Opc == TargetOpcode::INLINEASM || Opc == TargetOpcode::INLINEASM_BR)
552 ++Score;
553
554 if (MI.getFlags() != 0)
555 ++Score;
556
557 // Increase weight for more operands.
558 for (const MachineOperand &MO : MI.operands()) {
559 ++Score;
560
561 // Treat registers as more complex.
562 if (MO.isReg()) {
563 ++Score;
564
565 // And subregisters as even more complex.
566 if (MO.getSubReg()) {
567 ++Score;
568 if (MO.isDef())
569 ++Score;
570 }
571 } else if (MO.isRegMask())
572 ++Score;
573 }
574 }
575 }
576
577 return Score;
578}
579
580uint64_t ReducerWorkItem::computeMIRComplexityScore() const {
581 uint64_t Score = 0;
582
583 for (const Function &F : getModule()) {
584 if (auto *MF = MMI->getMachineFunction(F))
585 Score += computeMIRComplexityScoreImpl(MF: *MF);
586 }
587
588 return Score;
589}
590
591// FIXME: ReduceOperandsSkip has similar function, except it uses larger numbers
592// for more reduced.
593static unsigned classifyReductivePower(const Value *V) {
594 if (auto *C = dyn_cast<ConstantData>(Val: V)) {
595 if (C->isNullValue())
596 return 0;
597 if (C->isOneValue())
598 return 1;
599 if (isa<UndefValue>(Val: V))
600 return 2;
601 return 3;
602 }
603
604 if (isa<GlobalValue>(Val: V))
605 return 4;
606
607 // TODO: Account for expression size
608 if (isa<ConstantExpr>(Val: V))
609 return 5;
610
611 if (isa<Constant>(Val: V))
612 return 1;
613
614 if (isa<Argument>(Val: V))
615 return 6;
616
617 if (isa<Instruction>(Val: V))
618 return 7;
619
620 return 0;
621}
622
623// TODO: Additional flags and attributes may be complexity reducing. If we start
624// adding flags and attributes, they could have negative cost.
625static uint64_t computeIRComplexityScoreImpl(const Function &F) {
626 uint64_t Score = 1; // Count the function itself
627 SmallVector<std::pair<unsigned, MDNode *>> MDs;
628
629 AttributeList Attrs = F.getAttributes();
630 for (AttributeSet AttrSet : Attrs)
631 Score += AttrSet.getNumAttributes();
632
633 for (const BasicBlock &BB : F) {
634 ++Score;
635
636 for (const Instruction &I : BB) {
637 ++Score;
638
639 if (const auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(Val: &I)) {
640 if (OverflowOp->hasNoUnsignedWrap())
641 ++Score;
642 if (OverflowOp->hasNoSignedWrap())
643 ++Score;
644 } else if (const auto *GEP = dyn_cast<GEPOperator>(Val: &I)) {
645 if (GEP->isInBounds())
646 ++Score;
647 } else if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Val: &I)) {
648 if (ExactOp->isExact())
649 ++Score;
650 } else if (const auto *FPOp = dyn_cast<FPMathOperator>(Val: &I)) {
651 FastMathFlags FMF = FPOp->getFastMathFlags();
652 if (FMF.allowReassoc())
653 ++Score;
654 if (FMF.noNaNs())
655 ++Score;
656 if (FMF.noInfs())
657 ++Score;
658 if (FMF.noSignedZeros())
659 ++Score;
660 if (FMF.allowReciprocal())
661 ++Score;
662 if (FMF.allowContract())
663 ++Score;
664 if (FMF.approxFunc())
665 ++Score;
666 }
667
668 for (const Value *Operand : I.operands()) {
669 ++Score;
670 Score += classifyReductivePower(V: Operand);
671 }
672
673 I.getAllMetadata(MDs);
674 Score += MDs.size();
675 MDs.clear();
676 }
677 }
678
679 return Score;
680}
681
682uint64_t ReducerWorkItem::computeIRComplexityScore() const {
683 uint64_t Score = 0;
684
685 const Module &M = getModule();
686 Score += M.named_metadata_size();
687
688 SmallVector<std::pair<unsigned, MDNode *>, 32> GlobalMetadata;
689 for (const GlobalVariable &GV : M.globals()) {
690 ++Score;
691
692 if (GV.hasInitializer())
693 Score += classifyReductivePower(V: GV.getInitializer());
694
695 // TODO: Account for linkage?
696
697 GV.getAllMetadata(MDs&: GlobalMetadata);
698 Score += GlobalMetadata.size();
699 GlobalMetadata.clear();
700 }
701
702 for (const GlobalAlias &GA : M.aliases())
703 Score += classifyReductivePower(V: GA.getAliasee());
704
705 for (const GlobalIFunc &GI : M.ifuncs())
706 Score += classifyReductivePower(V: GI.getResolver());
707
708 for (const Function &F : M)
709 Score += computeIRComplexityScoreImpl(F);
710
711 return Score;
712}
713
714void ReducerWorkItem::writeOutput(raw_ostream &OS, bool EmitBitcode) const {
715 // Requesting bitcode emission with mir is nonsense, so just ignore it.
716 if (EmitBitcode && !isMIR())
717 writeBitcode(OutStream&: OS);
718 else
719 print(ROS&: OS, /*AnnotationWriter=*/p: nullptr);
720}
721
722void ReducerWorkItem::readBitcode(MemoryBufferRef Data, LLVMContext &Ctx,
723 StringRef ToolName) {
724 Expected<BitcodeFileContents> IF = llvm::getBitcodeFileContents(Buffer: Data);
725 if (!IF) {
726 WithColor::error(OS&: errs(), Prefix: ToolName) << IF.takeError();
727 exit(status: 1);
728 }
729 BitcodeModule BM = IF->Mods[0];
730 Expected<BitcodeLTOInfo> LI = BM.getLTOInfo();
731 Expected<std::unique_ptr<Module>> MOrErr = BM.parseModule(Context&: Ctx);
732 if (!LI || !MOrErr) {
733 WithColor::error(OS&: errs(), Prefix: ToolName) << IF.takeError();
734 exit(status: 1);
735 }
736 LTOInfo = std::make_unique<BitcodeLTOInfo>(args&: *LI);
737 M = std::move(MOrErr.get());
738}
739
740void ReducerWorkItem::writeBitcode(raw_ostream &OutStream) const {
741 if (LTOInfo && LTOInfo->IsThinLTO && LTOInfo->EnableSplitLTOUnit) {
742 PassBuilder PB;
743 LoopAnalysisManager LAM;
744 FunctionAnalysisManager FAM;
745 CGSCCAnalysisManager CGAM;
746 ModuleAnalysisManager MAM;
747 PB.registerModuleAnalyses(MAM);
748 PB.registerCGSCCAnalyses(CGAM);
749 PB.registerFunctionAnalyses(FAM);
750 PB.registerLoopAnalyses(LAM);
751 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
752 ModulePassManager MPM;
753 MPM.addPass(Pass: ThinLTOBitcodeWriterPass(OutStream, nullptr));
754 MPM.run(IR&: *M, AM&: MAM);
755 } else {
756 std::unique_ptr<ModuleSummaryIndex> Index;
757 if (LTOInfo && LTOInfo->HasSummary) {
758 ProfileSummaryInfo PSI(*M);
759 Index = std::make_unique<ModuleSummaryIndex>(
760 args: buildModuleSummaryIndex(M: *M, GetBFICallback: nullptr, PSI: &PSI));
761 }
762 WriteBitcodeToFile(M: getModule(), Out&: OutStream,
763 /*ShouldPreserveUseListOrder=*/true, Index: Index.get());
764 }
765}
766
767std::pair<std::unique_ptr<ReducerWorkItem>, bool>
768llvm::parseReducerWorkItem(StringRef ToolName, StringRef Filename,
769 LLVMContext &Ctxt,
770 std::unique_ptr<TargetMachine> &TM, bool IsMIR) {
771 bool IsBitcode = false;
772 Triple TheTriple;
773
774 auto MMM = std::make_unique<ReducerWorkItem>();
775
776 if (IsMIR) {
777 initializeTargetInfo();
778
779 auto FileOrErr = MemoryBuffer::getFileOrSTDIN(Filename, /*IsText=*/true);
780 if (std::error_code EC = FileOrErr.getError()) {
781 WithColor::error(OS&: errs(), Prefix: ToolName) << EC.message() << '\n';
782 return {nullptr, false};
783 }
784
785 std::unique_ptr<MIRParser> MParser =
786 createMIRParser(Contents: std::move(FileOrErr.get()), Context&: Ctxt);
787
788 auto SetDataLayout = [&](StringRef DataLayoutTargetTriple,
789 StringRef OldDLStr) -> std::optional<std::string> {
790 // NB: We always call createTargetMachineForTriple() even if an explicit
791 // DataLayout is already set in the module since we want to use this
792 // callback to setup the TargetMachine rather than doing it later.
793 std::string IRTargetTriple = DataLayoutTargetTriple.str();
794 if (!TargetTriple.empty())
795 IRTargetTriple = Triple::normalize(Str: TargetTriple);
796 TheTriple = Triple(IRTargetTriple);
797 if (TheTriple.getTriple().empty())
798 TheTriple.setTriple(sys::getDefaultTargetTriple());
799 ExitOnError ExitOnErr(std::string(ToolName) + ": error: ");
800 TM = ExitOnErr(codegen::createTargetMachineForTriple(TargetTriple: TheTriple.str()));
801
802 return TM->createDataLayout().getStringRepresentation();
803 };
804
805 std::unique_ptr<Module> M = MParser->parseIRModule(DataLayoutCallback: SetDataLayout);
806 LLVMTargetMachine *LLVMTM = static_cast<LLVMTargetMachine *>(TM.get());
807
808 MMM->MMI = std::make_unique<MachineModuleInfo>(args&: LLVMTM);
809 MParser->parseMachineFunctions(M&: *M, MMI&: *MMM->MMI);
810 MMM->M = std::move(M);
811 } else {
812 SMDiagnostic Err;
813 ErrorOr<std::unique_ptr<MemoryBuffer>> MB =
814 MemoryBuffer::getFileOrSTDIN(Filename);
815 if (std::error_code EC = MB.getError()) {
816 WithColor::error(OS&: errs(), Prefix: ToolName)
817 << Filename << ": " << EC.message() << "\n";
818 return {nullptr, false};
819 }
820
821 if (!isBitcode(BufPtr: (const unsigned char *)(*MB)->getBufferStart(),
822 BufEnd: (const unsigned char *)(*MB)->getBufferEnd())) {
823 std::unique_ptr<Module> Result = parseIR(Buffer: **MB, Err, Context&: Ctxt);
824 if (!Result) {
825 Err.print(ProgName: ToolName.data(), S&: errs());
826 return {nullptr, false};
827 }
828 MMM->M = std::move(Result);
829 } else {
830 IsBitcode = true;
831 MMM->readBitcode(Data: MemoryBufferRef(**MB), Ctx&: Ctxt, ToolName);
832
833 if (MMM->LTOInfo->IsThinLTO && MMM->LTOInfo->EnableSplitLTOUnit)
834 initializeTargetInfo();
835 }
836 }
837 if (MMM->verify(OS: &errs())) {
838 WithColor::error(OS&: errs(), Prefix: ToolName)
839 << Filename << " - input module is broken!\n";
840 return {nullptr, false};
841 }
842 return {std::move(MMM), IsBitcode};
843}
844

source code of llvm/tools/llvm-reduce/ReducerWorkItem.cpp