1//===- InstrRefBasedImpl.cpp - Tracking Debug Value MIs -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file InstrRefBasedImpl.cpp
9///
10/// This is a separate implementation of LiveDebugValues, see
11/// LiveDebugValues.cpp and VarLocBasedImpl.cpp for more information.
12///
13/// This pass propagates variable locations between basic blocks, resolving
14/// control flow conflicts between them. The problem is SSA construction, where
15/// each debug instruction assigns the *value* that a variable has, and every
16/// instruction where the variable is in scope uses that variable. The resulting
17/// map of instruction-to-value is then translated into a register (or spill)
18/// location for each variable over each instruction.
19///
20/// The primary difference from normal SSA construction is that we cannot
21/// _create_ PHI values that contain variable values. CodeGen has already
22/// completed, and we can't alter it just to make debug-info complete. Thus:
23/// we can identify function positions where we would like a PHI value for a
24/// variable, but must search the MachineFunction to see whether such a PHI is
25/// available. If no such PHI exists, the variable location must be dropped.
26///
27/// To achieve this, we perform two kinds of analysis. First, we identify
28/// every value defined by every instruction (ignoring those that only move
29/// another value), then re-compute an SSA-form representation of the
30/// MachineFunction, using value propagation to eliminate any un-necessary
31/// PHI values. This gives us a map of every value computed in the function,
32/// and its location within the register file / stack.
33///
34/// Secondly, for each variable we perform the same analysis, where each debug
35/// instruction is considered a def, and every instruction where the variable
36/// is in lexical scope as a use. Value propagation is used again to eliminate
37/// any un-necessary PHIs. This gives us a map of each variable to the value
38/// it should have in a block.
39///
40/// Once both are complete, we have two maps for each block:
41/// * Variables to the values they should have,
42/// * Values to the register / spill slot they are located in.
43/// After which we can marry-up variable values with a location, and emit
44/// DBG_VALUE instructions specifying those locations. Variable locations may
45/// be dropped in this process due to the desired variable value not being
46/// resident in any machine location, or because there is no PHI value in any
47/// location that accurately represents the desired value. The building of
48/// location lists for each block is left to DbgEntityHistoryCalculator.
49///
50/// This pass is kept efficient because the size of the first SSA problem
51/// is proportional to the working-set size of the function, which the compiler
52/// tries to keep small. (It's also proportional to the number of blocks).
53/// Additionally, we repeatedly perform the second SSA problem analysis with
54/// only the variables and blocks in a single lexical scope, exploiting their
55/// locality.
56///
57/// ### Terminology
58///
59/// A machine location is a register or spill slot, a value is something that's
60/// defined by an instruction or PHI node, while a variable value is the value
61/// assigned to a variable. A variable location is a machine location, that must
62/// contain the appropriate variable value. A value that is a PHI node is
63/// occasionally called an mphi.
64///
65/// The first SSA problem is the "machine value location" problem,
66/// because we're determining which machine locations contain which values.
67/// The "locations" are constant: what's unknown is what value they contain.
68///
69/// The second SSA problem (the one for variables) is the "variable value
70/// problem", because it's determining what values a variable has, rather than
71/// what location those values are placed in.
72///
73/// TODO:
74/// Overlapping fragments
75/// Entry values
76/// Add back DEBUG statements for debugging this
77/// Collect statistics
78///
79//===----------------------------------------------------------------------===//
80
81#include "llvm/ADT/DenseMap.h"
82#include "llvm/ADT/PostOrderIterator.h"
83#include "llvm/ADT/STLExtras.h"
84#include "llvm/ADT/SmallPtrSet.h"
85#include "llvm/ADT/SmallSet.h"
86#include "llvm/ADT/SmallVector.h"
87#include "llvm/BinaryFormat/Dwarf.h"
88#include "llvm/CodeGen/LexicalScopes.h"
89#include "llvm/CodeGen/MachineBasicBlock.h"
90#include "llvm/CodeGen/MachineDominators.h"
91#include "llvm/CodeGen/MachineFrameInfo.h"
92#include "llvm/CodeGen/MachineFunction.h"
93#include "llvm/CodeGen/MachineInstr.h"
94#include "llvm/CodeGen/MachineInstrBuilder.h"
95#include "llvm/CodeGen/MachineInstrBundle.h"
96#include "llvm/CodeGen/MachineMemOperand.h"
97#include "llvm/CodeGen/MachineOperand.h"
98#include "llvm/CodeGen/PseudoSourceValue.h"
99#include "llvm/CodeGen/TargetFrameLowering.h"
100#include "llvm/CodeGen/TargetInstrInfo.h"
101#include "llvm/CodeGen/TargetLowering.h"
102#include "llvm/CodeGen/TargetPassConfig.h"
103#include "llvm/CodeGen/TargetRegisterInfo.h"
104#include "llvm/CodeGen/TargetSubtargetInfo.h"
105#include "llvm/Config/llvm-config.h"
106#include "llvm/IR/DebugInfoMetadata.h"
107#include "llvm/IR/DebugLoc.h"
108#include "llvm/IR/Function.h"
109#include "llvm/MC/MCRegisterInfo.h"
110#include "llvm/Support/Casting.h"
111#include "llvm/Support/Compiler.h"
112#include "llvm/Support/Debug.h"
113#include "llvm/Support/GenericIteratedDominanceFrontier.h"
114#include "llvm/Support/TypeSize.h"
115#include "llvm/Support/raw_ostream.h"
116#include "llvm/Target/TargetMachine.h"
117#include "llvm/Transforms/Utils/SSAUpdaterImpl.h"
118#include <algorithm>
119#include <cassert>
120#include <climits>
121#include <cstdint>
122#include <functional>
123#include <queue>
124#include <tuple>
125#include <utility>
126#include <vector>
127
128#include "InstrRefBasedImpl.h"
129#include "LiveDebugValues.h"
130#include <optional>
131
132using namespace llvm;
133using namespace LiveDebugValues;
134
135// SSAUpdaterImple sets DEBUG_TYPE, change it.
136#undef DEBUG_TYPE
137#define DEBUG_TYPE "livedebugvalues"
138
139// Act more like the VarLoc implementation, by propagating some locations too
140// far and ignoring some transfers.
141static cl::opt<bool> EmulateOldLDV("emulate-old-livedebugvalues", cl::Hidden,
142 cl::desc("Act like old LiveDebugValues did"),
143 cl::init(Val: false));
144
145// Limit for the maximum number of stack slots we should track, past which we
146// will ignore any spills. InstrRefBasedLDV gathers detailed information on all
147// stack slots which leads to high memory consumption, and in some scenarios
148// (such as asan with very many locals) the working set of the function can be
149// very large, causing many spills. In these scenarios, it is very unlikely that
150// the developer has hundreds of variables live at the same time that they're
151// carefully thinking about -- instead, they probably autogenerated the code.
152// When this happens, gracefully stop tracking excess spill slots, rather than
153// consuming all the developer's memory.
154static cl::opt<unsigned>
155 StackWorkingSetLimit("livedebugvalues-max-stack-slots", cl::Hidden,
156 cl::desc("livedebugvalues-stack-ws-limit"),
157 cl::init(Val: 250));
158
159DbgOpID DbgOpID::UndefID = DbgOpID(0xffffffff);
160
161/// Tracker for converting machine value locations and variable values into
162/// variable locations (the output of LiveDebugValues), recorded as DBG_VALUEs
163/// specifying block live-in locations and transfers within blocks.
164///
165/// Operating on a per-block basis, this class takes a (pre-loaded) MLocTracker
166/// and must be initialized with the set of variable values that are live-in to
167/// the block. The caller then repeatedly calls process(). TransferTracker picks
168/// out variable locations for the live-in variable values (if there _is_ a
169/// location) and creates the corresponding DBG_VALUEs. Then, as the block is
170/// stepped through, transfers of values between machine locations are
171/// identified and if profitable, a DBG_VALUE created.
172///
173/// This is where debug use-before-defs would be resolved: a variable with an
174/// unavailable value could materialize in the middle of a block, when the
175/// value becomes available. Or, we could detect clobbers and re-specify the
176/// variable in a backup location. (XXX these are unimplemented).
177class TransferTracker {
178public:
179 const TargetInstrInfo *TII;
180 const TargetLowering *TLI;
181 /// This machine location tracker is assumed to always contain the up-to-date
182 /// value mapping for all machine locations. TransferTracker only reads
183 /// information from it. (XXX make it const?)
184 MLocTracker *MTracker;
185 MachineFunction &MF;
186 bool ShouldEmitDebugEntryValues;
187
188 /// Record of all changes in variable locations at a block position. Awkwardly
189 /// we allow inserting either before or after the point: MBB != nullptr
190 /// indicates it's before, otherwise after.
191 struct Transfer {
192 MachineBasicBlock::instr_iterator Pos; /// Position to insert DBG_VALUes
193 MachineBasicBlock *MBB; /// non-null if we should insert after.
194 SmallVector<MachineInstr *, 4> Insts; /// Vector of DBG_VALUEs to insert.
195 };
196
197 /// Stores the resolved operands (machine locations and constants) and
198 /// qualifying meta-information needed to construct a concrete DBG_VALUE-like
199 /// instruction.
200 struct ResolvedDbgValue {
201 SmallVector<ResolvedDbgOp> Ops;
202 DbgValueProperties Properties;
203
204 ResolvedDbgValue(SmallVectorImpl<ResolvedDbgOp> &Ops,
205 DbgValueProperties Properties)
206 : Ops(Ops.begin(), Ops.end()), Properties(Properties) {}
207
208 /// Returns all the LocIdx values used in this struct, in the order in which
209 /// they appear as operands in the debug value; may contain duplicates.
210 auto loc_indices() const {
211 return map_range(
212 C: make_filter_range(
213 Range: Ops, Pred: [](const ResolvedDbgOp &Op) { return !Op.IsConst; }),
214 F: [](const ResolvedDbgOp &Op) { return Op.Loc; });
215 }
216 };
217
218 /// Collection of transfers (DBG_VALUEs) to be inserted.
219 SmallVector<Transfer, 32> Transfers;
220
221 /// Local cache of what-value-is-in-what-LocIdx. Used to identify differences
222 /// between TransferTrackers view of variable locations and MLocTrackers. For
223 /// example, MLocTracker observes all clobbers, but TransferTracker lazily
224 /// does not.
225 SmallVector<ValueIDNum, 32> VarLocs;
226
227 /// Map from LocIdxes to which DebugVariables are based that location.
228 /// Mantained while stepping through the block. Not accurate if
229 /// VarLocs[Idx] != MTracker->LocIdxToIDNum[Idx].
230 DenseMap<LocIdx, SmallSet<DebugVariable, 4>> ActiveMLocs;
231
232 /// Map from DebugVariable to it's current location and qualifying meta
233 /// information. To be used in conjunction with ActiveMLocs to construct
234 /// enough information for the DBG_VALUEs for a particular LocIdx.
235 DenseMap<DebugVariable, ResolvedDbgValue> ActiveVLocs;
236
237 /// Temporary cache of DBG_VALUEs to be entered into the Transfers collection.
238 SmallVector<MachineInstr *, 4> PendingDbgValues;
239
240 /// Record of a use-before-def: created when a value that's live-in to the
241 /// current block isn't available in any machine location, but it will be
242 /// defined in this block.
243 struct UseBeforeDef {
244 /// Value of this variable, def'd in block.
245 SmallVector<DbgOp> Values;
246 /// Identity of this variable.
247 DebugVariable Var;
248 /// Additional variable properties.
249 DbgValueProperties Properties;
250 UseBeforeDef(ArrayRef<DbgOp> Values, const DebugVariable &Var,
251 const DbgValueProperties &Properties)
252 : Values(Values.begin(), Values.end()), Var(Var),
253 Properties(Properties) {}
254 };
255
256 /// Map from instruction index (within the block) to the set of UseBeforeDefs
257 /// that become defined at that instruction.
258 DenseMap<unsigned, SmallVector<UseBeforeDef, 1>> UseBeforeDefs;
259
260 /// The set of variables that are in UseBeforeDefs and can become a location
261 /// once the relevant value is defined. An element being erased from this
262 /// collection prevents the use-before-def materializing.
263 DenseSet<DebugVariable> UseBeforeDefVariables;
264
265 const TargetRegisterInfo &TRI;
266 const BitVector &CalleeSavedRegs;
267
268 TransferTracker(const TargetInstrInfo *TII, MLocTracker *MTracker,
269 MachineFunction &MF, const TargetRegisterInfo &TRI,
270 const BitVector &CalleeSavedRegs, const TargetPassConfig &TPC)
271 : TII(TII), MTracker(MTracker), MF(MF), TRI(TRI),
272 CalleeSavedRegs(CalleeSavedRegs) {
273 TLI = MF.getSubtarget().getTargetLowering();
274 auto &TM = TPC.getTM<TargetMachine>();
275 ShouldEmitDebugEntryValues = TM.Options.ShouldEmitDebugEntryValues();
276 }
277
278 bool isCalleeSaved(LocIdx L) const {
279 unsigned Reg = MTracker->LocIdxToLocID[L];
280 if (Reg >= MTracker->NumRegs)
281 return false;
282 for (MCRegAliasIterator RAI(Reg, &TRI, true); RAI.isValid(); ++RAI)
283 if (CalleeSavedRegs.test(Idx: *RAI))
284 return true;
285 return false;
286 };
287
288 // An estimate of the expected lifespan of values at a machine location, with
289 // a greater value corresponding to a longer expected lifespan, i.e. spill
290 // slots generally live longer than callee-saved registers which generally
291 // live longer than non-callee-saved registers. The minimum value of 0
292 // corresponds to an illegal location that cannot have a "lifespan" at all.
293 enum class LocationQuality : unsigned char {
294 Illegal = 0,
295 Register,
296 CalleeSavedRegister,
297 SpillSlot,
298 Best = SpillSlot
299 };
300
301 class LocationAndQuality {
302 unsigned Location : 24;
303 unsigned Quality : 8;
304
305 public:
306 LocationAndQuality() : Location(0), Quality(0) {}
307 LocationAndQuality(LocIdx L, LocationQuality Q)
308 : Location(L.asU64()), Quality(static_cast<unsigned>(Q)) {}
309 LocIdx getLoc() const {
310 if (!Quality)
311 return LocIdx::MakeIllegalLoc();
312 return LocIdx(Location);
313 }
314 LocationQuality getQuality() const { return LocationQuality(Quality); }
315 bool isIllegal() const { return !Quality; }
316 bool isBest() const { return getQuality() == LocationQuality::Best; }
317 };
318
319 // Returns the LocationQuality for the location L iff the quality of L is
320 // is strictly greater than the provided minimum quality.
321 std::optional<LocationQuality>
322 getLocQualityIfBetter(LocIdx L, LocationQuality Min) const {
323 if (L.isIllegal())
324 return std::nullopt;
325 if (Min >= LocationQuality::SpillSlot)
326 return std::nullopt;
327 if (MTracker->isSpill(Idx: L))
328 return LocationQuality::SpillSlot;
329 if (Min >= LocationQuality::CalleeSavedRegister)
330 return std::nullopt;
331 if (isCalleeSaved(L))
332 return LocationQuality::CalleeSavedRegister;
333 if (Min >= LocationQuality::Register)
334 return std::nullopt;
335 return LocationQuality::Register;
336 }
337
338 /// For a variable \p Var with the live-in value \p Value, attempts to resolve
339 /// the DbgValue to a concrete DBG_VALUE, emitting that value and loading the
340 /// tracking information to track Var throughout the block.
341 /// \p ValueToLoc is a map containing the best known location for every
342 /// ValueIDNum that Value may use.
343 /// \p MBB is the basic block that we are loading the live-in value for.
344 /// \p DbgOpStore is the map containing the DbgOpID->DbgOp mapping needed to
345 /// determine the values used by Value.
346 void loadVarInloc(MachineBasicBlock &MBB, DbgOpIDMap &DbgOpStore,
347 const DenseMap<ValueIDNum, LocationAndQuality> &ValueToLoc,
348 DebugVariable Var, DbgValue Value) {
349 SmallVector<DbgOp> DbgOps;
350 SmallVector<ResolvedDbgOp> ResolvedDbgOps;
351 bool IsValueValid = true;
352 unsigned LastUseBeforeDef = 0;
353
354 // If every value used by the incoming DbgValue is available at block
355 // entry, ResolvedDbgOps will contain the machine locations/constants for
356 // those values and will be used to emit a debug location.
357 // If one or more values are not yet available, but will all be defined in
358 // this block, then LastUseBeforeDef will track the instruction index in
359 // this BB at which the last of those values is defined, DbgOps will
360 // contain the values that we will emit when we reach that instruction.
361 // If one or more values are undef or not available throughout this block,
362 // and we can't recover as an entry value, we set IsValueValid=false and
363 // skip this variable.
364 for (DbgOpID ID : Value.getDbgOpIDs()) {
365 DbgOp Op = DbgOpStore.find(ID);
366 DbgOps.push_back(Elt: Op);
367 if (ID.isUndef()) {
368 IsValueValid = false;
369 break;
370 }
371 if (ID.isConst()) {
372 ResolvedDbgOps.push_back(Elt: Op.MO);
373 continue;
374 }
375
376 // If the value has no location, we can't make a variable location.
377 const ValueIDNum &Num = Op.ID;
378 auto ValuesPreferredLoc = ValueToLoc.find(Val: Num);
379 if (ValuesPreferredLoc->second.isIllegal()) {
380 // If it's a def that occurs in this block, register it as a
381 // use-before-def to be resolved as we step through the block.
382 // Continue processing values so that we add any other UseBeforeDef
383 // entries needed for later.
384 if (Num.getBlock() == (unsigned)MBB.getNumber() && !Num.isPHI()) {
385 LastUseBeforeDef = std::max(a: LastUseBeforeDef,
386 b: static_cast<unsigned>(Num.getInst()));
387 continue;
388 }
389 recoverAsEntryValue(Var, Prop: Value.Properties, Num);
390 IsValueValid = false;
391 break;
392 }
393
394 // Defer modifying ActiveVLocs until after we've confirmed we have a
395 // live range.
396 LocIdx M = ValuesPreferredLoc->second.getLoc();
397 ResolvedDbgOps.push_back(Elt: M);
398 }
399
400 // If we cannot produce a valid value for the LiveIn value within this
401 // block, skip this variable.
402 if (!IsValueValid)
403 return;
404
405 // Add UseBeforeDef entry for the last value to be defined in this block.
406 if (LastUseBeforeDef) {
407 addUseBeforeDef(Var, Properties: Value.Properties, DbgOps,
408 Inst: LastUseBeforeDef);
409 return;
410 }
411
412 // The LiveIn value is available at block entry, begin tracking and record
413 // the transfer.
414 for (const ResolvedDbgOp &Op : ResolvedDbgOps)
415 if (!Op.IsConst)
416 ActiveMLocs[Op.Loc].insert(V: Var);
417 auto NewValue = ResolvedDbgValue{ResolvedDbgOps, Value.Properties};
418 auto Result = ActiveVLocs.insert(KV: std::make_pair(x&: Var, y&: NewValue));
419 if (!Result.second)
420 Result.first->second = NewValue;
421 PendingDbgValues.push_back(
422 Elt: MTracker->emitLoc(DbgOps: ResolvedDbgOps, Var, Properties: Value.Properties));
423 }
424
425 /// Load object with live-in variable values. \p mlocs contains the live-in
426 /// values in each machine location, while \p vlocs the live-in variable
427 /// values. This method picks variable locations for the live-in variables,
428 /// creates DBG_VALUEs and puts them in #Transfers, then prepares the other
429 /// object fields to track variable locations as we step through the block.
430 /// FIXME: could just examine mloctracker instead of passing in \p mlocs?
431 void
432 loadInlocs(MachineBasicBlock &MBB, ValueTable &MLocs, DbgOpIDMap &DbgOpStore,
433 const SmallVectorImpl<std::pair<DebugVariable, DbgValue>> &VLocs,
434 unsigned NumLocs) {
435 ActiveMLocs.clear();
436 ActiveVLocs.clear();
437 VarLocs.clear();
438 VarLocs.reserve(N: NumLocs);
439 UseBeforeDefs.clear();
440 UseBeforeDefVariables.clear();
441
442 // Map of the preferred location for each value.
443 DenseMap<ValueIDNum, LocationAndQuality> ValueToLoc;
444
445 // Initialized the preferred-location map with illegal locations, to be
446 // filled in later.
447 for (const auto &VLoc : VLocs)
448 if (VLoc.second.Kind == DbgValue::Def)
449 for (DbgOpID OpID : VLoc.second.getDbgOpIDs())
450 if (!OpID.ID.IsConst)
451 ValueToLoc.insert(KV: {DbgOpStore.find(ID: OpID).ID, LocationAndQuality()});
452
453 ActiveMLocs.reserve(NumEntries: VLocs.size());
454 ActiveVLocs.reserve(NumEntries: VLocs.size());
455
456 // Produce a map of value numbers to the current machine locs they live
457 // in. When emulating VarLocBasedImpl, there should only be one
458 // location; when not, we get to pick.
459 for (auto Location : MTracker->locations()) {
460 LocIdx Idx = Location.Idx;
461 ValueIDNum &VNum = MLocs[Idx.asU64()];
462 if (VNum == ValueIDNum::EmptyValue)
463 continue;
464 VarLocs.push_back(Elt: VNum);
465
466 // Is there a variable that wants a location for this value? If not, skip.
467 auto VIt = ValueToLoc.find(Val: VNum);
468 if (VIt == ValueToLoc.end())
469 continue;
470
471 auto &Previous = VIt->second;
472 // If this is the first location with that value, pick it. Otherwise,
473 // consider whether it's a "longer term" location.
474 std::optional<LocationQuality> ReplacementQuality =
475 getLocQualityIfBetter(L: Idx, Min: Previous.getQuality());
476 if (ReplacementQuality)
477 Previous = LocationAndQuality(Idx, *ReplacementQuality);
478 }
479
480 // Now map variables to their picked LocIdxes.
481 for (const auto &Var : VLocs) {
482 loadVarInloc(MBB, DbgOpStore, ValueToLoc, Var: Var.first, Value: Var.second);
483 }
484 flushDbgValues(Pos: MBB.begin(), MBB: &MBB);
485 }
486
487 /// Record that \p Var has value \p ID, a value that becomes available
488 /// later in the function.
489 void addUseBeforeDef(const DebugVariable &Var,
490 const DbgValueProperties &Properties,
491 const SmallVectorImpl<DbgOp> &DbgOps, unsigned Inst) {
492 UseBeforeDefs[Inst].emplace_back(Args: DbgOps, Args: Var, Args: Properties);
493 UseBeforeDefVariables.insert(V: Var);
494 }
495
496 /// After the instruction at index \p Inst and position \p pos has been
497 /// processed, check whether it defines a variable value in a use-before-def.
498 /// If so, and the variable value hasn't changed since the start of the
499 /// block, create a DBG_VALUE.
500 void checkInstForNewValues(unsigned Inst, MachineBasicBlock::iterator pos) {
501 auto MIt = UseBeforeDefs.find(Val: Inst);
502 if (MIt == UseBeforeDefs.end())
503 return;
504
505 // Map of values to the locations that store them for every value used by
506 // the variables that may have become available.
507 SmallDenseMap<ValueIDNum, LocationAndQuality> ValueToLoc;
508
509 // Populate ValueToLoc with illegal default mappings for every value used by
510 // any UseBeforeDef variables for this instruction.
511 for (auto &Use : MIt->second) {
512 if (!UseBeforeDefVariables.count(V: Use.Var))
513 continue;
514
515 for (DbgOp &Op : Use.Values) {
516 assert(!Op.isUndef() && "UseBeforeDef erroneously created for a "
517 "DbgValue with undef values.");
518 if (Op.IsConst)
519 continue;
520
521 ValueToLoc.insert(KV: {Op.ID, LocationAndQuality()});
522 }
523 }
524
525 // Exit early if we have no DbgValues to produce.
526 if (ValueToLoc.empty())
527 return;
528
529 // Determine the best location for each desired value.
530 for (auto Location : MTracker->locations()) {
531 LocIdx Idx = Location.Idx;
532 ValueIDNum &LocValueID = Location.Value;
533
534 // Is there a variable that wants a location for this value? If not, skip.
535 auto VIt = ValueToLoc.find(Val: LocValueID);
536 if (VIt == ValueToLoc.end())
537 continue;
538
539 auto &Previous = VIt->second;
540 // If this is the first location with that value, pick it. Otherwise,
541 // consider whether it's a "longer term" location.
542 std::optional<LocationQuality> ReplacementQuality =
543 getLocQualityIfBetter(L: Idx, Min: Previous.getQuality());
544 if (ReplacementQuality)
545 Previous = LocationAndQuality(Idx, *ReplacementQuality);
546 }
547
548 // Using the map of values to locations, produce a final set of values for
549 // this variable.
550 for (auto &Use : MIt->second) {
551 if (!UseBeforeDefVariables.count(V: Use.Var))
552 continue;
553
554 SmallVector<ResolvedDbgOp> DbgOps;
555
556 for (DbgOp &Op : Use.Values) {
557 if (Op.IsConst) {
558 DbgOps.push_back(Elt: Op.MO);
559 continue;
560 }
561 LocIdx NewLoc = ValueToLoc.find(Val: Op.ID)->second.getLoc();
562 if (NewLoc.isIllegal())
563 break;
564 DbgOps.push_back(Elt: NewLoc);
565 }
566
567 // If at least one value used by this debug value is no longer available,
568 // i.e. one of the values was killed before we finished defining all of
569 // the values used by this variable, discard.
570 if (DbgOps.size() != Use.Values.size())
571 continue;
572
573 // Otherwise, we're good to go.
574 PendingDbgValues.push_back(
575 Elt: MTracker->emitLoc(DbgOps, Var: Use.Var, Properties: Use.Properties));
576 }
577 flushDbgValues(Pos: pos, MBB: nullptr);
578 }
579
580 /// Helper to move created DBG_VALUEs into Transfers collection.
581 void flushDbgValues(MachineBasicBlock::iterator Pos, MachineBasicBlock *MBB) {
582 if (PendingDbgValues.size() == 0)
583 return;
584
585 // Pick out the instruction start position.
586 MachineBasicBlock::instr_iterator BundleStart;
587 if (MBB && Pos == MBB->begin())
588 BundleStart = MBB->instr_begin();
589 else
590 BundleStart = getBundleStart(I: Pos->getIterator());
591
592 Transfers.push_back(Elt: {.Pos: BundleStart, .MBB: MBB, .Insts: PendingDbgValues});
593 PendingDbgValues.clear();
594 }
595
596 bool isEntryValueVariable(const DebugVariable &Var,
597 const DIExpression *Expr) const {
598 if (!Var.getVariable()->isParameter())
599 return false;
600
601 if (Var.getInlinedAt())
602 return false;
603
604 if (Expr->getNumElements() > 0 && !Expr->isDeref())
605 return false;
606
607 return true;
608 }
609
610 bool isEntryValueValue(const ValueIDNum &Val) const {
611 // Must be in entry block (block number zero), and be a PHI / live-in value.
612 if (Val.getBlock() || !Val.isPHI())
613 return false;
614
615 // Entry values must enter in a register.
616 if (MTracker->isSpill(Idx: Val.getLoc()))
617 return false;
618
619 Register SP = TLI->getStackPointerRegisterToSaveRestore();
620 Register FP = TRI.getFrameRegister(MF);
621 Register Reg = MTracker->LocIdxToLocID[Val.getLoc()];
622 return Reg != SP && Reg != FP;
623 }
624
625 bool recoverAsEntryValue(const DebugVariable &Var,
626 const DbgValueProperties &Prop,
627 const ValueIDNum &Num) {
628 // Is this variable location a candidate to be an entry value. First,
629 // should we be trying this at all?
630 if (!ShouldEmitDebugEntryValues)
631 return false;
632
633 const DIExpression *DIExpr = Prop.DIExpr;
634
635 // We don't currently emit entry values for DBG_VALUE_LISTs.
636 if (Prop.IsVariadic) {
637 // If this debug value can be converted to be non-variadic, then do so;
638 // otherwise give up.
639 auto NonVariadicExpression =
640 DIExpression::convertToNonVariadicExpression(Expr: DIExpr);
641 if (!NonVariadicExpression)
642 return false;
643 DIExpr = *NonVariadicExpression;
644 }
645
646 // Is the variable appropriate for entry values (i.e., is a parameter).
647 if (!isEntryValueVariable(Var, Expr: DIExpr))
648 return false;
649
650 // Is the value assigned to this variable still the entry value?
651 if (!isEntryValueValue(Val: Num))
652 return false;
653
654 // Emit a variable location using an entry value expression.
655 DIExpression *NewExpr =
656 DIExpression::prepend(Expr: DIExpr, Flags: DIExpression::EntryValue);
657 Register Reg = MTracker->LocIdxToLocID[Num.getLoc()];
658 MachineOperand MO = MachineOperand::CreateReg(Reg, isDef: false);
659
660 PendingDbgValues.push_back(
661 Elt: emitMOLoc(MO, Var, Properties: {NewExpr, Prop.Indirect, false}));
662 return true;
663 }
664
665 /// Change a variable value after encountering a DBG_VALUE inside a block.
666 void redefVar(const MachineInstr &MI) {
667 DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(),
668 MI.getDebugLoc()->getInlinedAt());
669 DbgValueProperties Properties(MI);
670
671 // Ignore non-register locations, we don't transfer those.
672 if (MI.isUndefDebugValue() ||
673 all_of(Range: MI.debug_operands(),
674 P: [](const MachineOperand &MO) { return !MO.isReg(); })) {
675 auto It = ActiveVLocs.find(Val: Var);
676 if (It != ActiveVLocs.end()) {
677 for (LocIdx Loc : It->second.loc_indices())
678 ActiveMLocs[Loc].erase(V: Var);
679 ActiveVLocs.erase(I: It);
680 }
681 // Any use-before-defs no longer apply.
682 UseBeforeDefVariables.erase(V: Var);
683 return;
684 }
685
686 SmallVector<ResolvedDbgOp> NewLocs;
687 for (const MachineOperand &MO : MI.debug_operands()) {
688 if (MO.isReg()) {
689 // Any undef regs have already been filtered out above.
690 Register Reg = MO.getReg();
691 LocIdx NewLoc = MTracker->getRegMLoc(R: Reg);
692 NewLocs.push_back(Elt: NewLoc);
693 } else {
694 NewLocs.push_back(Elt: MO);
695 }
696 }
697
698 redefVar(MI, Properties, NewLocs);
699 }
700
701 /// Handle a change in variable location within a block. Terminate the
702 /// variables current location, and record the value it now refers to, so
703 /// that we can detect location transfers later on.
704 void redefVar(const MachineInstr &MI, const DbgValueProperties &Properties,
705 SmallVectorImpl<ResolvedDbgOp> &NewLocs) {
706 DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(),
707 MI.getDebugLoc()->getInlinedAt());
708 // Any use-before-defs no longer apply.
709 UseBeforeDefVariables.erase(V: Var);
710
711 // Erase any previous location.
712 auto It = ActiveVLocs.find(Val: Var);
713 if (It != ActiveVLocs.end()) {
714 for (LocIdx Loc : It->second.loc_indices())
715 ActiveMLocs[Loc].erase(V: Var);
716 }
717
718 // If there _is_ no new location, all we had to do was erase.
719 if (NewLocs.empty()) {
720 if (It != ActiveVLocs.end())
721 ActiveVLocs.erase(I: It);
722 return;
723 }
724
725 SmallVector<std::pair<LocIdx, DebugVariable>> LostMLocs;
726 for (ResolvedDbgOp &Op : NewLocs) {
727 if (Op.IsConst)
728 continue;
729
730 LocIdx NewLoc = Op.Loc;
731
732 // Check whether our local copy of values-by-location in #VarLocs is out
733 // of date. Wipe old tracking data for the location if it's been clobbered
734 // in the meantime.
735 if (MTracker->readMLoc(L: NewLoc) != VarLocs[NewLoc.asU64()]) {
736 for (const auto &P : ActiveMLocs[NewLoc]) {
737 auto LostVLocIt = ActiveVLocs.find(Val: P);
738 if (LostVLocIt != ActiveVLocs.end()) {
739 for (LocIdx Loc : LostVLocIt->second.loc_indices()) {
740 // Every active variable mapping for NewLoc will be cleared, no
741 // need to track individual variables.
742 if (Loc == NewLoc)
743 continue;
744 LostMLocs.emplace_back(Args&: Loc, Args: P);
745 }
746 }
747 ActiveVLocs.erase(Val: P);
748 }
749 for (const auto &LostMLoc : LostMLocs)
750 ActiveMLocs[LostMLoc.first].erase(V: LostMLoc.second);
751 LostMLocs.clear();
752 It = ActiveVLocs.find(Val: Var);
753 ActiveMLocs[NewLoc.asU64()].clear();
754 VarLocs[NewLoc.asU64()] = MTracker->readMLoc(L: NewLoc);
755 }
756
757 ActiveMLocs[NewLoc].insert(V: Var);
758 }
759
760 if (It == ActiveVLocs.end()) {
761 ActiveVLocs.insert(
762 KV: std::make_pair(x&: Var, y: ResolvedDbgValue(NewLocs, Properties)));
763 } else {
764 It->second.Ops.assign(RHS: NewLocs);
765 It->second.Properties = Properties;
766 }
767 }
768
769 /// Account for a location \p mloc being clobbered. Examine the variable
770 /// locations that will be terminated: and try to recover them by using
771 /// another location. Optionally, given \p MakeUndef, emit a DBG_VALUE to
772 /// explicitly terminate a location if it can't be recovered.
773 void clobberMloc(LocIdx MLoc, MachineBasicBlock::iterator Pos,
774 bool MakeUndef = true) {
775 auto ActiveMLocIt = ActiveMLocs.find(Val: MLoc);
776 if (ActiveMLocIt == ActiveMLocs.end())
777 return;
778
779 // What was the old variable value?
780 ValueIDNum OldValue = VarLocs[MLoc.asU64()];
781 clobberMloc(MLoc, OldValue, Pos, MakeUndef);
782 }
783 /// Overload that takes an explicit value \p OldValue for when the value in
784 /// \p MLoc has changed and the TransferTracker's locations have not been
785 /// updated yet.
786 void clobberMloc(LocIdx MLoc, ValueIDNum OldValue,
787 MachineBasicBlock::iterator Pos, bool MakeUndef = true) {
788 auto ActiveMLocIt = ActiveMLocs.find(Val: MLoc);
789 if (ActiveMLocIt == ActiveMLocs.end())
790 return;
791
792 VarLocs[MLoc.asU64()] = ValueIDNum::EmptyValue;
793
794 // Examine the remaining variable locations: if we can find the same value
795 // again, we can recover the location.
796 std::optional<LocIdx> NewLoc;
797 for (auto Loc : MTracker->locations())
798 if (Loc.Value == OldValue)
799 NewLoc = Loc.Idx;
800
801 // If there is no location, and we weren't asked to make the variable
802 // explicitly undef, then stop here.
803 if (!NewLoc && !MakeUndef) {
804 // Try and recover a few more locations with entry values.
805 for (const auto &Var : ActiveMLocIt->second) {
806 auto &Prop = ActiveVLocs.find(Val: Var)->second.Properties;
807 recoverAsEntryValue(Var, Prop, Num: OldValue);
808 }
809 flushDbgValues(Pos, MBB: nullptr);
810 return;
811 }
812
813 // Examine all the variables based on this location.
814 DenseSet<DebugVariable> NewMLocs;
815 // If no new location has been found, every variable that depends on this
816 // MLoc is dead, so end their existing MLoc->Var mappings as well.
817 SmallVector<std::pair<LocIdx, DebugVariable>> LostMLocs;
818 for (const auto &Var : ActiveMLocIt->second) {
819 auto ActiveVLocIt = ActiveVLocs.find(Val: Var);
820 // Re-state the variable location: if there's no replacement then NewLoc
821 // is std::nullopt and a $noreg DBG_VALUE will be created. Otherwise, a
822 // DBG_VALUE identifying the alternative location will be emitted.
823 const DbgValueProperties &Properties = ActiveVLocIt->second.Properties;
824
825 // Produce the new list of debug ops - an empty list if no new location
826 // was found, or the existing list with the substitution MLoc -> NewLoc
827 // otherwise.
828 SmallVector<ResolvedDbgOp> DbgOps;
829 if (NewLoc) {
830 ResolvedDbgOp OldOp(MLoc);
831 ResolvedDbgOp NewOp(*NewLoc);
832 // Insert illegal ops to overwrite afterwards.
833 DbgOps.insert(I: DbgOps.begin(), NumToInsert: ActiveVLocIt->second.Ops.size(),
834 Elt: ResolvedDbgOp(LocIdx::MakeIllegalLoc()));
835 replace_copy(Range&: ActiveVLocIt->second.Ops, Out: DbgOps.begin(), OldValue: OldOp, NewValue: NewOp);
836 }
837
838 PendingDbgValues.push_back(Elt: MTracker->emitLoc(DbgOps, Var, Properties));
839
840 // Update machine locations <=> variable locations maps. Defer updating
841 // ActiveMLocs to avoid invalidating the ActiveMLocIt iterator.
842 if (!NewLoc) {
843 for (LocIdx Loc : ActiveVLocIt->second.loc_indices()) {
844 if (Loc != MLoc)
845 LostMLocs.emplace_back(Args&: Loc, Args: Var);
846 }
847 ActiveVLocs.erase(I: ActiveVLocIt);
848 } else {
849 ActiveVLocIt->second.Ops = DbgOps;
850 NewMLocs.insert(V: Var);
851 }
852 }
853
854 // Remove variables from ActiveMLocs if they no longer use any other MLocs
855 // due to being killed by this clobber.
856 for (auto &LocVarIt : LostMLocs) {
857 auto LostMLocIt = ActiveMLocs.find(Val: LocVarIt.first);
858 assert(LostMLocIt != ActiveMLocs.end() &&
859 "Variable was using this MLoc, but ActiveMLocs[MLoc] has no "
860 "entries?");
861 LostMLocIt->second.erase(V: LocVarIt.second);
862 }
863
864 // We lazily track what locations have which values; if we've found a new
865 // location for the clobbered value, remember it.
866 if (NewLoc)
867 VarLocs[NewLoc->asU64()] = OldValue;
868
869 flushDbgValues(Pos, MBB: nullptr);
870
871 // Commit ActiveMLoc changes.
872 ActiveMLocIt->second.clear();
873 if (!NewMLocs.empty())
874 for (auto &Var : NewMLocs)
875 ActiveMLocs[*NewLoc].insert(V: Var);
876 }
877
878 /// Transfer variables based on \p Src to be based on \p Dst. This handles
879 /// both register copies as well as spills and restores. Creates DBG_VALUEs
880 /// describing the movement.
881 void transferMlocs(LocIdx Src, LocIdx Dst, MachineBasicBlock::iterator Pos) {
882 // Does Src still contain the value num we expect? If not, it's been
883 // clobbered in the meantime, and our variable locations are stale.
884 if (VarLocs[Src.asU64()] != MTracker->readMLoc(L: Src))
885 return;
886
887 // assert(ActiveMLocs[Dst].size() == 0);
888 //^^^ Legitimate scenario on account of un-clobbered slot being assigned to?
889
890 // Move set of active variables from one location to another.
891 auto MovingVars = ActiveMLocs[Src];
892 ActiveMLocs[Dst].insert(I: MovingVars.begin(), E: MovingVars.end());
893 VarLocs[Dst.asU64()] = VarLocs[Src.asU64()];
894
895 // For each variable based on Src; create a location at Dst.
896 ResolvedDbgOp SrcOp(Src);
897 ResolvedDbgOp DstOp(Dst);
898 for (const auto &Var : MovingVars) {
899 auto ActiveVLocIt = ActiveVLocs.find(Val: Var);
900 assert(ActiveVLocIt != ActiveVLocs.end());
901
902 // Update all instances of Src in the variable's tracked values to Dst.
903 std::replace(first: ActiveVLocIt->second.Ops.begin(),
904 last: ActiveVLocIt->second.Ops.end(), old_value: SrcOp, new_value: DstOp);
905
906 MachineInstr *MI = MTracker->emitLoc(DbgOps: ActiveVLocIt->second.Ops, Var,
907 Properties: ActiveVLocIt->second.Properties);
908 PendingDbgValues.push_back(Elt: MI);
909 }
910 ActiveMLocs[Src].clear();
911 flushDbgValues(Pos, MBB: nullptr);
912
913 // XXX XXX XXX "pretend to be old LDV" means dropping all tracking data
914 // about the old location.
915 if (EmulateOldLDV)
916 VarLocs[Src.asU64()] = ValueIDNum::EmptyValue;
917 }
918
919 MachineInstrBuilder emitMOLoc(const MachineOperand &MO,
920 const DebugVariable &Var,
921 const DbgValueProperties &Properties) {
922 DebugLoc DL = DILocation::get(Context&: Var.getVariable()->getContext(), Line: 0, Column: 0,
923 Scope: Var.getVariable()->getScope(),
924 InlinedAt: const_cast<DILocation *>(Var.getInlinedAt()));
925 auto MIB = BuildMI(MF, MIMD: DL, MCID: TII->get(Opcode: TargetOpcode::DBG_VALUE));
926 MIB.add(MO);
927 if (Properties.Indirect)
928 MIB.addImm(Val: 0);
929 else
930 MIB.addReg(RegNo: 0);
931 MIB.addMetadata(MD: Var.getVariable());
932 MIB.addMetadata(MD: Properties.DIExpr);
933 return MIB;
934 }
935};
936
937//===----------------------------------------------------------------------===//
938// Implementation
939//===----------------------------------------------------------------------===//
940
941ValueIDNum ValueIDNum::EmptyValue = {UINT_MAX, UINT_MAX, UINT_MAX};
942ValueIDNum ValueIDNum::TombstoneValue = {UINT_MAX, UINT_MAX, UINT_MAX - 1};
943
944#ifndef NDEBUG
945void ResolvedDbgOp::dump(const MLocTracker *MTrack) const {
946 if (IsConst) {
947 dbgs() << MO;
948 } else {
949 dbgs() << MTrack->LocIdxToName(Idx: Loc);
950 }
951}
952void DbgOp::dump(const MLocTracker *MTrack) const {
953 if (IsConst) {
954 dbgs() << MO;
955 } else if (!isUndef()) {
956 dbgs() << MTrack->IDAsString(Num: ID);
957 }
958}
959void DbgOpID::dump(const MLocTracker *MTrack, const DbgOpIDMap *OpStore) const {
960 if (!OpStore) {
961 dbgs() << "ID(" << asU32() << ")";
962 } else {
963 OpStore->find(ID: *this).dump(MTrack);
964 }
965}
966void DbgValue::dump(const MLocTracker *MTrack,
967 const DbgOpIDMap *OpStore) const {
968 if (Kind == NoVal) {
969 dbgs() << "NoVal(" << BlockNo << ")";
970 } else if (Kind == VPHI || Kind == Def) {
971 if (Kind == VPHI)
972 dbgs() << "VPHI(" << BlockNo << ",";
973 else
974 dbgs() << "Def(";
975 for (unsigned Idx = 0; Idx < getDbgOpIDs().size(); ++Idx) {
976 getDbgOpID(Index: Idx).dump(MTrack, OpStore);
977 if (Idx != 0)
978 dbgs() << ",";
979 }
980 dbgs() << ")";
981 }
982 if (Properties.Indirect)
983 dbgs() << " indir";
984 if (Properties.DIExpr)
985 dbgs() << " " << *Properties.DIExpr;
986}
987#endif
988
989MLocTracker::MLocTracker(MachineFunction &MF, const TargetInstrInfo &TII,
990 const TargetRegisterInfo &TRI,
991 const TargetLowering &TLI)
992 : MF(MF), TII(TII), TRI(TRI), TLI(TLI),
993 LocIdxToIDNum(ValueIDNum::EmptyValue), LocIdxToLocID(0) {
994 NumRegs = TRI.getNumRegs();
995 reset();
996 LocIDToLocIdx.resize(new_size: NumRegs, x: LocIdx::MakeIllegalLoc());
997 assert(NumRegs < (1u << NUM_LOC_BITS)); // Detect bit packing failure
998
999 // Always track SP. This avoids the implicit clobbering caused by regmasks
1000 // from affectings its values. (LiveDebugValues disbelieves calls and
1001 // regmasks that claim to clobber SP).
1002 Register SP = TLI.getStackPointerRegisterToSaveRestore();
1003 if (SP) {
1004 unsigned ID = getLocID(Reg: SP);
1005 (void)lookupOrTrackRegister(ID);
1006
1007 for (MCRegAliasIterator RAI(SP, &TRI, true); RAI.isValid(); ++RAI)
1008 SPAliases.insert(V: *RAI);
1009 }
1010
1011 // Build some common stack positions -- full registers being spilt to the
1012 // stack.
1013 StackSlotIdxes.insert(KV: {{8, 0}, 0});
1014 StackSlotIdxes.insert(KV: {{16, 0}, 1});
1015 StackSlotIdxes.insert(KV: {{32, 0}, 2});
1016 StackSlotIdxes.insert(KV: {{64, 0}, 3});
1017 StackSlotIdxes.insert(KV: {{128, 0}, 4});
1018 StackSlotIdxes.insert(KV: {{256, 0}, 5});
1019 StackSlotIdxes.insert(KV: {{512, 0}, 6});
1020
1021 // Traverse all the subregister idxes, and ensure there's an index for them.
1022 // Duplicates are no problem: we're interested in their position in the
1023 // stack slot, we don't want to type the slot.
1024 for (unsigned int I = 1; I < TRI.getNumSubRegIndices(); ++I) {
1025 unsigned Size = TRI.getSubRegIdxSize(Idx: I);
1026 unsigned Offs = TRI.getSubRegIdxOffset(Idx: I);
1027 unsigned Idx = StackSlotIdxes.size();
1028
1029 // Some subregs have -1, -2 and so forth fed into their fields, to mean
1030 // special backend things. Ignore those.
1031 if (Size > 60000 || Offs > 60000)
1032 continue;
1033
1034 StackSlotIdxes.insert(KV: {{Size, Offs}, Idx});
1035 }
1036
1037 // There may also be strange register class sizes (think x86 fp80s).
1038 for (const TargetRegisterClass *RC : TRI.regclasses()) {
1039 unsigned Size = TRI.getRegSizeInBits(RC: *RC);
1040
1041 // We might see special reserved values as sizes, and classes for other
1042 // stuff the machine tries to model. If it's more than 512 bits, then it
1043 // is very unlikely to be a register than can be spilt.
1044 if (Size > 512)
1045 continue;
1046
1047 unsigned Idx = StackSlotIdxes.size();
1048 StackSlotIdxes.insert(KV: {{Size, 0}, Idx});
1049 }
1050
1051 for (auto &Idx : StackSlotIdxes)
1052 StackIdxesToPos[Idx.second] = Idx.first;
1053
1054 NumSlotIdxes = StackSlotIdxes.size();
1055}
1056
1057LocIdx MLocTracker::trackRegister(unsigned ID) {
1058 assert(ID != 0);
1059 LocIdx NewIdx = LocIdx(LocIdxToIDNum.size());
1060 LocIdxToIDNum.grow(n: NewIdx);
1061 LocIdxToLocID.grow(n: NewIdx);
1062
1063 // Default: it's an mphi.
1064 ValueIDNum ValNum = {CurBB, 0, NewIdx};
1065 // Was this reg ever touched by a regmask?
1066 for (const auto &MaskPair : reverse(C&: Masks)) {
1067 if (MaskPair.first->clobbersPhysReg(PhysReg: ID)) {
1068 // There was an earlier def we skipped.
1069 ValNum = {CurBB, MaskPair.second, NewIdx};
1070 break;
1071 }
1072 }
1073
1074 LocIdxToIDNum[NewIdx] = ValNum;
1075 LocIdxToLocID[NewIdx] = ID;
1076 return NewIdx;
1077}
1078
1079void MLocTracker::writeRegMask(const MachineOperand *MO, unsigned CurBB,
1080 unsigned InstID) {
1081 // Def any register we track have that isn't preserved. The regmask
1082 // terminates the liveness of a register, meaning its value can't be
1083 // relied upon -- we represent this by giving it a new value.
1084 for (auto Location : locations()) {
1085 unsigned ID = LocIdxToLocID[Location.Idx];
1086 // Don't clobber SP, even if the mask says it's clobbered.
1087 if (ID < NumRegs && !SPAliases.count(V: ID) && MO->clobbersPhysReg(PhysReg: ID))
1088 defReg(R: ID, BB: CurBB, Inst: InstID);
1089 }
1090 Masks.push_back(Elt: std::make_pair(x&: MO, y&: InstID));
1091}
1092
1093std::optional<SpillLocationNo> MLocTracker::getOrTrackSpillLoc(SpillLoc L) {
1094 SpillLocationNo SpillID(SpillLocs.idFor(Entry: L));
1095
1096 if (SpillID.id() == 0) {
1097 // If there is no location, and we have reached the limit of how many stack
1098 // slots to track, then don't track this one.
1099 if (SpillLocs.size() >= StackWorkingSetLimit)
1100 return std::nullopt;
1101
1102 // Spill location is untracked: create record for this one, and all
1103 // subregister slots too.
1104 SpillID = SpillLocationNo(SpillLocs.insert(Entry: L));
1105 for (unsigned StackIdx = 0; StackIdx < NumSlotIdxes; ++StackIdx) {
1106 unsigned L = getSpillIDWithIdx(Spill: SpillID, Idx: StackIdx);
1107 LocIdx Idx = LocIdx(LocIdxToIDNum.size()); // New idx
1108 LocIdxToIDNum.grow(n: Idx);
1109 LocIdxToLocID.grow(n: Idx);
1110 LocIDToLocIdx.push_back(x: Idx);
1111 LocIdxToLocID[Idx] = L;
1112 // Initialize to PHI value; corresponds to the location's live-in value
1113 // during transfer function construction.
1114 LocIdxToIDNum[Idx] = ValueIDNum(CurBB, 0, Idx);
1115 }
1116 }
1117 return SpillID;
1118}
1119
1120std::string MLocTracker::LocIdxToName(LocIdx Idx) const {
1121 unsigned ID = LocIdxToLocID[Idx];
1122 if (ID >= NumRegs) {
1123 StackSlotPos Pos = locIDToSpillIdx(ID);
1124 ID -= NumRegs;
1125 unsigned Slot = ID / NumSlotIdxes;
1126 return Twine("slot ")
1127 .concat(Suffix: Twine(Slot).concat(Suffix: Twine(" sz ").concat(Suffix: Twine(Pos.first)
1128 .concat(Suffix: Twine(" offs ").concat(Suffix: Twine(Pos.second))))))
1129 .str();
1130 } else {
1131 return TRI.getRegAsmName(Reg: ID).str();
1132 }
1133}
1134
1135std::string MLocTracker::IDAsString(const ValueIDNum &Num) const {
1136 std::string DefName = LocIdxToName(Idx: Num.getLoc());
1137 return Num.asString(mlocname: DefName);
1138}
1139
1140#ifndef NDEBUG
1141LLVM_DUMP_METHOD void MLocTracker::dump() {
1142 for (auto Location : locations()) {
1143 std::string MLocName = LocIdxToName(Idx: Location.Value.getLoc());
1144 std::string DefName = Location.Value.asString(mlocname: MLocName);
1145 dbgs() << LocIdxToName(Idx: Location.Idx) << " --> " << DefName << "\n";
1146 }
1147}
1148
1149LLVM_DUMP_METHOD void MLocTracker::dump_mloc_map() {
1150 for (auto Location : locations()) {
1151 std::string foo = LocIdxToName(Idx: Location.Idx);
1152 dbgs() << "Idx " << Location.Idx.asU64() << " " << foo << "\n";
1153 }
1154}
1155#endif
1156
1157MachineInstrBuilder
1158MLocTracker::emitLoc(const SmallVectorImpl<ResolvedDbgOp> &DbgOps,
1159 const DebugVariable &Var,
1160 const DbgValueProperties &Properties) {
1161 DebugLoc DL = DILocation::get(Context&: Var.getVariable()->getContext(), Line: 0, Column: 0,
1162 Scope: Var.getVariable()->getScope(),
1163 InlinedAt: const_cast<DILocation *>(Var.getInlinedAt()));
1164
1165 const MCInstrDesc &Desc = Properties.IsVariadic
1166 ? TII.get(Opcode: TargetOpcode::DBG_VALUE_LIST)
1167 : TII.get(Opcode: TargetOpcode::DBG_VALUE);
1168
1169#ifdef EXPENSIVE_CHECKS
1170 assert(all_of(DbgOps,
1171 [](const ResolvedDbgOp &Op) {
1172 return Op.IsConst || !Op.Loc.isIllegal();
1173 }) &&
1174 "Did not expect illegal ops in DbgOps.");
1175 assert((DbgOps.size() == 0 ||
1176 DbgOps.size() == Properties.getLocationOpCount()) &&
1177 "Expected to have either one DbgOp per MI LocationOp, or none.");
1178#endif
1179
1180 auto GetRegOp = [](unsigned Reg) -> MachineOperand {
1181 return MachineOperand::CreateReg(
1182 /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
1183 /* isKill */ false, /* isDead */ false,
1184 /* isUndef */ false, /* isEarlyClobber */ false,
1185 /* SubReg */ 0, /* isDebug */ true);
1186 };
1187
1188 SmallVector<MachineOperand> MOs;
1189
1190 auto EmitUndef = [&]() {
1191 MOs.clear();
1192 MOs.assign(NumElts: Properties.getLocationOpCount(), Elt: GetRegOp(0));
1193 return BuildMI(MF, DL, MCID: Desc, IsIndirect: false, MOs, Variable: Var.getVariable(),
1194 Expr: Properties.DIExpr);
1195 };
1196
1197 // Don't bother passing any real operands to BuildMI if any of them would be
1198 // $noreg.
1199 if (DbgOps.empty())
1200 return EmitUndef();
1201
1202 bool Indirect = Properties.Indirect;
1203
1204 const DIExpression *Expr = Properties.DIExpr;
1205
1206 assert(DbgOps.size() == Properties.getLocationOpCount());
1207
1208 // If all locations are valid, accumulate them into our list of
1209 // MachineOperands. For any spilled locations, either update the indirectness
1210 // register or apply the appropriate transformations in the DIExpression.
1211 for (size_t Idx = 0; Idx < Properties.getLocationOpCount(); ++Idx) {
1212 const ResolvedDbgOp &Op = DbgOps[Idx];
1213
1214 if (Op.IsConst) {
1215 MOs.push_back(Elt: Op.MO);
1216 continue;
1217 }
1218
1219 LocIdx MLoc = Op.Loc;
1220 unsigned LocID = LocIdxToLocID[MLoc];
1221 if (LocID >= NumRegs) {
1222 SpillLocationNo SpillID = locIDToSpill(ID: LocID);
1223 StackSlotPos StackIdx = locIDToSpillIdx(ID: LocID);
1224 unsigned short Offset = StackIdx.second;
1225
1226 // TODO: support variables that are located in spill slots, with non-zero
1227 // offsets from the start of the spill slot. It would require some more
1228 // complex DIExpression calculations. This doesn't seem to be produced by
1229 // LLVM right now, so don't try and support it.
1230 // Accept no-subregister slots and subregisters where the offset is zero.
1231 // The consumer should already have type information to work out how large
1232 // the variable is.
1233 if (Offset == 0) {
1234 const SpillLoc &Spill = SpillLocs[SpillID.id()];
1235 unsigned Base = Spill.SpillBase;
1236
1237 // There are several ways we can dereference things, and several inputs
1238 // to consider:
1239 // * NRVO variables will appear with IsIndirect set, but should have
1240 // nothing else in their DIExpressions,
1241 // * Variables with DW_OP_stack_value in their expr already need an
1242 // explicit dereference of the stack location,
1243 // * Values that don't match the variable size need DW_OP_deref_size,
1244 // * Everything else can just become a simple location expression.
1245
1246 // We need to use deref_size whenever there's a mismatch between the
1247 // size of value and the size of variable portion being read.
1248 // Additionally, we should use it whenever dealing with stack_value
1249 // fragments, to avoid the consumer having to determine the deref size
1250 // from DW_OP_piece.
1251 bool UseDerefSize = false;
1252 unsigned ValueSizeInBits = getLocSizeInBits(L: MLoc);
1253 unsigned DerefSizeInBytes = ValueSizeInBits / 8;
1254 if (auto Fragment = Var.getFragment()) {
1255 unsigned VariableSizeInBits = Fragment->SizeInBits;
1256 if (VariableSizeInBits != ValueSizeInBits || Expr->isComplex())
1257 UseDerefSize = true;
1258 } else if (auto Size = Var.getVariable()->getSizeInBits()) {
1259 if (*Size != ValueSizeInBits) {
1260 UseDerefSize = true;
1261 }
1262 }
1263
1264 SmallVector<uint64_t, 5> OffsetOps;
1265 TRI.getOffsetOpcodes(Offset: Spill.SpillOffset, Ops&: OffsetOps);
1266 bool StackValue = false;
1267
1268 if (Properties.Indirect) {
1269 // This is something like an NRVO variable, where the pointer has been
1270 // spilt to the stack. It should end up being a memory location, with
1271 // the pointer to the variable loaded off the stack with a deref:
1272 assert(!Expr->isImplicit());
1273 OffsetOps.push_back(Elt: dwarf::DW_OP_deref);
1274 } else if (UseDerefSize && Expr->isSingleLocationExpression()) {
1275 // TODO: Figure out how to handle deref size issues for variadic
1276 // values.
1277 // We're loading a value off the stack that's not the same size as the
1278 // variable. Add / subtract stack offset, explicitly deref with a
1279 // size, and add DW_OP_stack_value if not already present.
1280 OffsetOps.push_back(Elt: dwarf::DW_OP_deref_size);
1281 OffsetOps.push_back(Elt: DerefSizeInBytes);
1282 StackValue = true;
1283 } else if (Expr->isComplex() || Properties.IsVariadic) {
1284 // A variable with no size ambiguity, but with extra elements in it's
1285 // expression. Manually dereference the stack location.
1286 OffsetOps.push_back(Elt: dwarf::DW_OP_deref);
1287 } else {
1288 // A plain value that has been spilt to the stack, with no further
1289 // context. Request a location expression, marking the DBG_VALUE as
1290 // IsIndirect.
1291 Indirect = true;
1292 }
1293
1294 Expr = DIExpression::appendOpsToArg(Expr, Ops: OffsetOps, ArgNo: Idx, StackValue);
1295 MOs.push_back(Elt: GetRegOp(Base));
1296 } else {
1297 // This is a stack location with a weird subregister offset: emit an
1298 // undef DBG_VALUE instead.
1299 return EmitUndef();
1300 }
1301 } else {
1302 // Non-empty, non-stack slot, must be a plain register.
1303 MOs.push_back(Elt: GetRegOp(LocID));
1304 }
1305 }
1306
1307 return BuildMI(MF, DL, MCID: Desc, IsIndirect: Indirect, MOs, Variable: Var.getVariable(), Expr);
1308}
1309
1310/// Default construct and initialize the pass.
1311InstrRefBasedLDV::InstrRefBasedLDV() = default;
1312
1313bool InstrRefBasedLDV::isCalleeSaved(LocIdx L) const {
1314 unsigned Reg = MTracker->LocIdxToLocID[L];
1315 return isCalleeSavedReg(R: Reg);
1316}
1317bool InstrRefBasedLDV::isCalleeSavedReg(Register R) const {
1318 for (MCRegAliasIterator RAI(R, TRI, true); RAI.isValid(); ++RAI)
1319 if (CalleeSavedRegs.test(Idx: *RAI))
1320 return true;
1321 return false;
1322}
1323
1324//===----------------------------------------------------------------------===//
1325// Debug Range Extension Implementation
1326//===----------------------------------------------------------------------===//
1327
1328#ifndef NDEBUG
1329// Something to restore in the future.
1330// void InstrRefBasedLDV::printVarLocInMBB(..)
1331#endif
1332
1333std::optional<SpillLocationNo>
1334InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) {
1335 assert(MI.hasOneMemOperand() &&
1336 "Spill instruction does not have exactly one memory operand?");
1337 auto MMOI = MI.memoperands_begin();
1338 const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue();
1339 assert(PVal->kind() == PseudoSourceValue::FixedStack &&
1340 "Inconsistent memory operand in spill instruction");
1341 int FI = cast<FixedStackPseudoSourceValue>(Val: PVal)->getFrameIndex();
1342 const MachineBasicBlock *MBB = MI.getParent();
1343 Register Reg;
1344 StackOffset Offset = TFI->getFrameIndexReference(MF: *MBB->getParent(), FI, FrameReg&: Reg);
1345 return MTracker->getOrTrackSpillLoc(L: {.SpillBase: Reg, .SpillOffset: Offset});
1346}
1347
1348std::optional<LocIdx>
1349InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) {
1350 std::optional<SpillLocationNo> SpillLoc = extractSpillBaseRegAndOffset(MI);
1351 if (!SpillLoc)
1352 return std::nullopt;
1353
1354 // Where in the stack slot is this value defined -- i.e., what size of value
1355 // is this? An important question, because it could be loaded into a register
1356 // from the stack at some point. Happily the memory operand will tell us
1357 // the size written to the stack.
1358 auto *MemOperand = *MI.memoperands_begin();
1359 LocationSize SizeInBits = MemOperand->getSizeInBits();
1360 assert(SizeInBits.hasValue() && "Expected to find a valid size!");
1361
1362 // Find that position in the stack indexes we're tracking.
1363 auto IdxIt = MTracker->StackSlotIdxes.find(Val: {SizeInBits.getValue(), 0});
1364 if (IdxIt == MTracker->StackSlotIdxes.end())
1365 // That index is not tracked. This is suprising, and unlikely to ever
1366 // occur, but the safe action is to indicate the variable is optimised out.
1367 return std::nullopt;
1368
1369 unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *SpillLoc, Idx: IdxIt->second);
1370 return MTracker->getSpillMLoc(SpillID);
1371}
1372
1373/// End all previous ranges related to @MI and start a new range from @MI
1374/// if it is a DBG_VALUE instr.
1375bool InstrRefBasedLDV::transferDebugValue(const MachineInstr &MI) {
1376 if (!MI.isDebugValue())
1377 return false;
1378
1379 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
1380 "Expected inlined-at fields to agree");
1381
1382 // If there are no instructions in this lexical scope, do no location tracking
1383 // at all, this variable shouldn't get a legitimate location range.
1384 auto *Scope = LS.findLexicalScope(DL: MI.getDebugLoc().get());
1385 if (Scope == nullptr)
1386 return true; // handled it; by doing nothing
1387
1388 // MLocTracker needs to know that this register is read, even if it's only
1389 // read by a debug inst.
1390 for (const MachineOperand &MO : MI.debug_operands())
1391 if (MO.isReg() && MO.getReg() != 0)
1392 (void)MTracker->readReg(R: MO.getReg());
1393
1394 // If we're preparing for the second analysis (variables), the machine value
1395 // locations are already solved, and we report this DBG_VALUE and the value
1396 // it refers to to VLocTracker.
1397 if (VTracker) {
1398 SmallVector<DbgOpID> DebugOps;
1399 // Feed defVar the new variable location, or if this is a DBG_VALUE $noreg,
1400 // feed defVar None.
1401 if (!MI.isUndefDebugValue()) {
1402 for (const MachineOperand &MO : MI.debug_operands()) {
1403 // There should be no undef registers here, as we've screened for undef
1404 // debug values.
1405 if (MO.isReg()) {
1406 DebugOps.push_back(Elt: DbgOpStore.insert(Op: MTracker->readReg(R: MO.getReg())));
1407 } else if (MO.isImm() || MO.isFPImm() || MO.isCImm()) {
1408 DebugOps.push_back(Elt: DbgOpStore.insert(Op: MO));
1409 } else {
1410 llvm_unreachable("Unexpected debug operand type.");
1411 }
1412 }
1413 }
1414 VTracker->defVar(MI, Properties: DbgValueProperties(MI), DebugOps);
1415 }
1416
1417 // If performing final tracking of transfers, report this variable definition
1418 // to the TransferTracker too.
1419 if (TTracker)
1420 TTracker->redefVar(MI);
1421 return true;
1422}
1423
1424std::optional<ValueIDNum> InstrRefBasedLDV::getValueForInstrRef(
1425 unsigned InstNo, unsigned OpNo, MachineInstr &MI,
1426 const FuncValueTable *MLiveOuts, const FuncValueTable *MLiveIns) {
1427 // Various optimizations may have happened to the value during codegen,
1428 // recorded in the value substitution table. Apply any substitutions to
1429 // the instruction / operand number in this DBG_INSTR_REF, and collect
1430 // any subregister extractions performed during optimization.
1431 const MachineFunction &MF = *MI.getParent()->getParent();
1432
1433 // Create dummy substitution with Src set, for lookup.
1434 auto SoughtSub =
1435 MachineFunction::DebugSubstitution({InstNo, OpNo}, {0, 0}, 0);
1436
1437 SmallVector<unsigned, 4> SeenSubregs;
1438 auto LowerBoundIt = llvm::lower_bound(Range: MF.DebugValueSubstitutions, Value&: SoughtSub);
1439 while (LowerBoundIt != MF.DebugValueSubstitutions.end() &&
1440 LowerBoundIt->Src == SoughtSub.Src) {
1441 std::tie(args&: InstNo, args&: OpNo) = LowerBoundIt->Dest;
1442 SoughtSub.Src = LowerBoundIt->Dest;
1443 if (unsigned Subreg = LowerBoundIt->Subreg)
1444 SeenSubregs.push_back(Elt: Subreg);
1445 LowerBoundIt = llvm::lower_bound(Range: MF.DebugValueSubstitutions, Value&: SoughtSub);
1446 }
1447
1448 // Default machine value number is <None> -- if no instruction defines
1449 // the corresponding value, it must have been optimized out.
1450 std::optional<ValueIDNum> NewID;
1451
1452 // Try to lookup the instruction number, and find the machine value number
1453 // that it defines. It could be an instruction, or a PHI.
1454 auto InstrIt = DebugInstrNumToInstr.find(x: InstNo);
1455 auto PHIIt = llvm::lower_bound(Range&: DebugPHINumToValue, Value&: InstNo);
1456 if (InstrIt != DebugInstrNumToInstr.end()) {
1457 const MachineInstr &TargetInstr = *InstrIt->second.first;
1458 uint64_t BlockNo = TargetInstr.getParent()->getNumber();
1459
1460 // Pick out the designated operand. It might be a memory reference, if
1461 // a register def was folded into a stack store.
1462 if (OpNo == MachineFunction::DebugOperandMemNumber &&
1463 TargetInstr.hasOneMemOperand()) {
1464 std::optional<LocIdx> L = findLocationForMemOperand(MI: TargetInstr);
1465 if (L)
1466 NewID = ValueIDNum(BlockNo, InstrIt->second.second, *L);
1467 } else if (OpNo != MachineFunction::DebugOperandMemNumber) {
1468 // Permit the debug-info to be completely wrong: identifying a nonexistant
1469 // operand, or one that is not a register definition, means something
1470 // unexpected happened during optimisation. Broken debug-info, however,
1471 // shouldn't crash the compiler -- instead leave the variable value as
1472 // None, which will make it appear "optimised out".
1473 if (OpNo < TargetInstr.getNumOperands()) {
1474 const MachineOperand &MO = TargetInstr.getOperand(i: OpNo);
1475
1476 if (MO.isReg() && MO.isDef() && MO.getReg()) {
1477 unsigned LocID = MTracker->getLocID(Reg: MO.getReg());
1478 LocIdx L = MTracker->LocIDToLocIdx[LocID];
1479 NewID = ValueIDNum(BlockNo, InstrIt->second.second, L);
1480 }
1481 }
1482
1483 if (!NewID) {
1484 LLVM_DEBUG(
1485 { dbgs() << "Seen instruction reference to illegal operand\n"; });
1486 }
1487 }
1488 // else: NewID is left as None.
1489 } else if (PHIIt != DebugPHINumToValue.end() && PHIIt->InstrNum == InstNo) {
1490 // It's actually a PHI value. Which value it is might not be obvious, use
1491 // the resolver helper to find out.
1492 assert(MLiveOuts && MLiveIns);
1493 NewID = resolveDbgPHIs(MF&: *MI.getParent()->getParent(), MLiveOuts: *MLiveOuts, MLiveIns: *MLiveIns,
1494 Here&: MI, InstrNum: InstNo);
1495 }
1496
1497 // Apply any subregister extractions, in reverse. We might have seen code
1498 // like this:
1499 // CALL64 @foo, implicit-def $rax
1500 // %0:gr64 = COPY $rax
1501 // %1:gr32 = COPY %0.sub_32bit
1502 // %2:gr16 = COPY %1.sub_16bit
1503 // %3:gr8 = COPY %2.sub_8bit
1504 // In which case each copy would have been recorded as a substitution with
1505 // a subregister qualifier. Apply those qualifiers now.
1506 if (NewID && !SeenSubregs.empty()) {
1507 unsigned Offset = 0;
1508 unsigned Size = 0;
1509
1510 // Look at each subregister that we passed through, and progressively
1511 // narrow in, accumulating any offsets that occur. Substitutions should
1512 // only ever be the same or narrower width than what they read from;
1513 // iterate in reverse order so that we go from wide to small.
1514 for (unsigned Subreg : reverse(C&: SeenSubregs)) {
1515 unsigned ThisSize = TRI->getSubRegIdxSize(Idx: Subreg);
1516 unsigned ThisOffset = TRI->getSubRegIdxOffset(Idx: Subreg);
1517 Offset += ThisOffset;
1518 Size = (Size == 0) ? ThisSize : std::min(a: Size, b: ThisSize);
1519 }
1520
1521 // If that worked, look for an appropriate subregister with the register
1522 // where the define happens. Don't look at values that were defined during
1523 // a stack write: we can't currently express register locations within
1524 // spills.
1525 LocIdx L = NewID->getLoc();
1526 if (NewID && !MTracker->isSpill(Idx: L)) {
1527 // Find the register class for the register where this def happened.
1528 // FIXME: no index for this?
1529 Register Reg = MTracker->LocIdxToLocID[L];
1530 const TargetRegisterClass *TRC = nullptr;
1531 for (const auto *TRCI : TRI->regclasses())
1532 if (TRCI->contains(Reg))
1533 TRC = TRCI;
1534 assert(TRC && "Couldn't find target register class?");
1535
1536 // If the register we have isn't the right size or in the right place,
1537 // Try to find a subregister inside it.
1538 unsigned MainRegSize = TRI->getRegSizeInBits(RC: *TRC);
1539 if (Size != MainRegSize || Offset) {
1540 // Enumerate all subregisters, searching.
1541 Register NewReg = 0;
1542 for (MCPhysReg SR : TRI->subregs(Reg)) {
1543 unsigned Subreg = TRI->getSubRegIndex(RegNo: Reg, SubRegNo: SR);
1544 unsigned SubregSize = TRI->getSubRegIdxSize(Idx: Subreg);
1545 unsigned SubregOffset = TRI->getSubRegIdxOffset(Idx: Subreg);
1546 if (SubregSize == Size && SubregOffset == Offset) {
1547 NewReg = SR;
1548 break;
1549 }
1550 }
1551
1552 // If we didn't find anything: there's no way to express our value.
1553 if (!NewReg) {
1554 NewID = std::nullopt;
1555 } else {
1556 // Re-state the value as being defined within the subregister
1557 // that we found.
1558 LocIdx NewLoc = MTracker->lookupOrTrackRegister(ID: NewReg);
1559 NewID = ValueIDNum(NewID->getBlock(), NewID->getInst(), NewLoc);
1560 }
1561 }
1562 } else {
1563 // If we can't handle subregisters, unset the new value.
1564 NewID = std::nullopt;
1565 }
1566 }
1567
1568 return NewID;
1569}
1570
1571bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI,
1572 const FuncValueTable *MLiveOuts,
1573 const FuncValueTable *MLiveIns) {
1574 if (!MI.isDebugRef())
1575 return false;
1576
1577 // Only handle this instruction when we are building the variable value
1578 // transfer function.
1579 if (!VTracker && !TTracker)
1580 return false;
1581
1582 const DILocalVariable *Var = MI.getDebugVariable();
1583 const DIExpression *Expr = MI.getDebugExpression();
1584 const DILocation *DebugLoc = MI.getDebugLoc();
1585 const DILocation *InlinedAt = DebugLoc->getInlinedAt();
1586 assert(Var->isValidLocationForIntrinsic(DebugLoc) &&
1587 "Expected inlined-at fields to agree");
1588
1589 DebugVariable V(Var, Expr, InlinedAt);
1590
1591 auto *Scope = LS.findLexicalScope(DL: MI.getDebugLoc().get());
1592 if (Scope == nullptr)
1593 return true; // Handled by doing nothing. This variable is never in scope.
1594
1595 SmallVector<DbgOpID> DbgOpIDs;
1596 for (const MachineOperand &MO : MI.debug_operands()) {
1597 if (!MO.isDbgInstrRef()) {
1598 assert(!MO.isReg() && "DBG_INSTR_REF should not contain registers");
1599 DbgOpID ConstOpID = DbgOpStore.insert(Op: DbgOp(MO));
1600 DbgOpIDs.push_back(Elt: ConstOpID);
1601 continue;
1602 }
1603
1604 unsigned InstNo = MO.getInstrRefInstrIndex();
1605 unsigned OpNo = MO.getInstrRefOpIndex();
1606
1607 // Default machine value number is <None> -- if no instruction defines
1608 // the corresponding value, it must have been optimized out.
1609 std::optional<ValueIDNum> NewID =
1610 getValueForInstrRef(InstNo, OpNo, MI, MLiveOuts, MLiveIns);
1611 // We have a value number or std::nullopt. If the latter, then kill the
1612 // entire debug value.
1613 if (NewID) {
1614 DbgOpIDs.push_back(Elt: DbgOpStore.insert(Op: *NewID));
1615 } else {
1616 DbgOpIDs.clear();
1617 break;
1618 }
1619 }
1620
1621 // We have a DbgOpID for every value or for none. Tell the variable value
1622 // tracker about it. The rest of this LiveDebugValues implementation acts
1623 // exactly the same for DBG_INSTR_REFs as DBG_VALUEs (just, the former can
1624 // refer to values that aren't immediately available).
1625 DbgValueProperties Properties(Expr, false, true);
1626 if (VTracker)
1627 VTracker->defVar(MI, Properties, DebugOps: DbgOpIDs);
1628
1629 // If we're on the final pass through the function, decompose this INSTR_REF
1630 // into a plain DBG_VALUE.
1631 if (!TTracker)
1632 return true;
1633
1634 // Fetch the concrete DbgOps now, as we will need them later.
1635 SmallVector<DbgOp> DbgOps;
1636 for (DbgOpID OpID : DbgOpIDs) {
1637 DbgOps.push_back(Elt: DbgOpStore.find(ID: OpID));
1638 }
1639
1640 // Pick a location for the machine value number, if such a location exists.
1641 // (This information could be stored in TransferTracker to make it faster).
1642 SmallDenseMap<ValueIDNum, TransferTracker::LocationAndQuality> FoundLocs;
1643 SmallVector<ValueIDNum> ValuesToFind;
1644 // Initialized the preferred-location map with illegal locations, to be
1645 // filled in later.
1646 for (const DbgOp &Op : DbgOps) {
1647 if (!Op.IsConst)
1648 if (FoundLocs.insert(KV: {Op.ID, TransferTracker::LocationAndQuality()})
1649 .second)
1650 ValuesToFind.push_back(Elt: Op.ID);
1651 }
1652
1653 for (auto Location : MTracker->locations()) {
1654 LocIdx CurL = Location.Idx;
1655 ValueIDNum ID = MTracker->readMLoc(L: CurL);
1656 auto ValueToFindIt = find(Range&: ValuesToFind, Val: ID);
1657 if (ValueToFindIt == ValuesToFind.end())
1658 continue;
1659 auto &Previous = FoundLocs.find(Val: ID)->second;
1660 // If this is the first location with that value, pick it. Otherwise,
1661 // consider whether it's a "longer term" location.
1662 std::optional<TransferTracker::LocationQuality> ReplacementQuality =
1663 TTracker->getLocQualityIfBetter(L: CurL, Min: Previous.getQuality());
1664 if (ReplacementQuality) {
1665 Previous = TransferTracker::LocationAndQuality(CurL, *ReplacementQuality);
1666 if (Previous.isBest()) {
1667 ValuesToFind.erase(CI: ValueToFindIt);
1668 if (ValuesToFind.empty())
1669 break;
1670 }
1671 }
1672 }
1673
1674 SmallVector<ResolvedDbgOp> NewLocs;
1675 for (const DbgOp &DbgOp : DbgOps) {
1676 if (DbgOp.IsConst) {
1677 NewLocs.push_back(Elt: DbgOp.MO);
1678 continue;
1679 }
1680 LocIdx FoundLoc = FoundLocs.find(Val: DbgOp.ID)->second.getLoc();
1681 if (FoundLoc.isIllegal()) {
1682 NewLocs.clear();
1683 break;
1684 }
1685 NewLocs.push_back(Elt: FoundLoc);
1686 }
1687 // Tell transfer tracker that the variable value has changed.
1688 TTracker->redefVar(MI, Properties, NewLocs);
1689
1690 // If there were values with no location, but all such values are defined in
1691 // later instructions in this block, this is a block-local use-before-def.
1692 if (!DbgOps.empty() && NewLocs.empty()) {
1693 bool IsValidUseBeforeDef = true;
1694 uint64_t LastUseBeforeDef = 0;
1695 for (auto ValueLoc : FoundLocs) {
1696 ValueIDNum NewID = ValueLoc.first;
1697 LocIdx FoundLoc = ValueLoc.second.getLoc();
1698 if (!FoundLoc.isIllegal())
1699 continue;
1700 // If we have an value with no location that is not defined in this block,
1701 // then it has no location in this block, leaving this value undefined.
1702 if (NewID.getBlock() != CurBB || NewID.getInst() <= CurInst) {
1703 IsValidUseBeforeDef = false;
1704 break;
1705 }
1706 LastUseBeforeDef = std::max(a: LastUseBeforeDef, b: NewID.getInst());
1707 }
1708 if (IsValidUseBeforeDef) {
1709 TTracker->addUseBeforeDef(Var: V, Properties: {MI.getDebugExpression(), false, true},
1710 DbgOps, Inst: LastUseBeforeDef);
1711 }
1712 }
1713
1714 // Produce a DBG_VALUE representing what this DBG_INSTR_REF meant.
1715 // This DBG_VALUE is potentially a $noreg / undefined location, if
1716 // FoundLoc is illegal.
1717 // (XXX -- could morph the DBG_INSTR_REF in the future).
1718 MachineInstr *DbgMI = MTracker->emitLoc(DbgOps: NewLocs, Var: V, Properties);
1719
1720 TTracker->PendingDbgValues.push_back(Elt: DbgMI);
1721 TTracker->flushDbgValues(Pos: MI.getIterator(), MBB: nullptr);
1722 return true;
1723}
1724
1725bool InstrRefBasedLDV::transferDebugPHI(MachineInstr &MI) {
1726 if (!MI.isDebugPHI())
1727 return false;
1728
1729 // Analyse these only when solving the machine value location problem.
1730 if (VTracker || TTracker)
1731 return true;
1732
1733 // First operand is the value location, either a stack slot or register.
1734 // Second is the debug instruction number of the original PHI.
1735 const MachineOperand &MO = MI.getOperand(i: 0);
1736 unsigned InstrNum = MI.getOperand(i: 1).getImm();
1737
1738 auto EmitBadPHI = [this, &MI, InstrNum]() -> bool {
1739 // Helper lambda to do any accounting when we fail to find a location for
1740 // a DBG_PHI. This can happen if DBG_PHIs are malformed, or refer to a
1741 // dead stack slot, for example.
1742 // Record a DebugPHIRecord with an empty value + location.
1743 DebugPHINumToValue.push_back(
1744 Elt: {.InstrNum: InstrNum, .MBB: MI.getParent(), .ValueRead: std::nullopt, .ReadLoc: std::nullopt});
1745 return true;
1746 };
1747
1748 if (MO.isReg() && MO.getReg()) {
1749 // The value is whatever's currently in the register. Read and record it,
1750 // to be analysed later.
1751 Register Reg = MO.getReg();
1752 ValueIDNum Num = MTracker->readReg(R: Reg);
1753 auto PHIRec = DebugPHIRecord(
1754 {.InstrNum: InstrNum, .MBB: MI.getParent(), .ValueRead: Num, .ReadLoc: MTracker->lookupOrTrackRegister(ID: Reg)});
1755 DebugPHINumToValue.push_back(Elt: PHIRec);
1756
1757 // Ensure this register is tracked.
1758 for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI)
1759 MTracker->lookupOrTrackRegister(ID: *RAI);
1760 } else if (MO.isFI()) {
1761 // The value is whatever's in this stack slot.
1762 unsigned FI = MO.getIndex();
1763
1764 // If the stack slot is dead, then this was optimized away.
1765 // FIXME: stack slot colouring should account for slots that get merged.
1766 if (MFI->isDeadObjectIndex(ObjectIdx: FI))
1767 return EmitBadPHI();
1768
1769 // Identify this spill slot, ensure it's tracked.
1770 Register Base;
1771 StackOffset Offs = TFI->getFrameIndexReference(MF: *MI.getMF(), FI, FrameReg&: Base);
1772 SpillLoc SL = {.SpillBase: Base, .SpillOffset: Offs};
1773 std::optional<SpillLocationNo> SpillNo = MTracker->getOrTrackSpillLoc(L: SL);
1774
1775 // We might be able to find a value, but have chosen not to, to avoid
1776 // tracking too much stack information.
1777 if (!SpillNo)
1778 return EmitBadPHI();
1779
1780 // Any stack location DBG_PHI should have an associate bit-size.
1781 assert(MI.getNumOperands() == 3 && "Stack DBG_PHI with no size?");
1782 unsigned slotBitSize = MI.getOperand(i: 2).getImm();
1783
1784 unsigned SpillID = MTracker->getLocID(Spill: *SpillNo, Idx: {slotBitSize, 0});
1785 LocIdx SpillLoc = MTracker->getSpillMLoc(SpillID);
1786 ValueIDNum Result = MTracker->readMLoc(L: SpillLoc);
1787
1788 // Record this DBG_PHI for later analysis.
1789 auto DbgPHI = DebugPHIRecord({.InstrNum: InstrNum, .MBB: MI.getParent(), .ValueRead: Result, .ReadLoc: SpillLoc});
1790 DebugPHINumToValue.push_back(Elt: DbgPHI);
1791 } else {
1792 // Else: if the operand is neither a legal register or a stack slot, then
1793 // we're being fed illegal debug-info. Record an empty PHI, so that any
1794 // debug users trying to read this number will be put off trying to
1795 // interpret the value.
1796 LLVM_DEBUG(
1797 { dbgs() << "Seen DBG_PHI with unrecognised operand format\n"; });
1798 return EmitBadPHI();
1799 }
1800
1801 return true;
1802}
1803
1804void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) {
1805 // Meta Instructions do not affect the debug liveness of any register they
1806 // define.
1807 if (MI.isImplicitDef()) {
1808 // Except when there's an implicit def, and the location it's defining has
1809 // no value number. The whole point of an implicit def is to announce that
1810 // the register is live, without be specific about it's value. So define
1811 // a value if there isn't one already.
1812 ValueIDNum Num = MTracker->readReg(R: MI.getOperand(i: 0).getReg());
1813 // Has a legitimate value -> ignore the implicit def.
1814 if (Num.getLoc() != 0)
1815 return;
1816 // Otherwise, def it here.
1817 } else if (MI.isMetaInstruction())
1818 return;
1819
1820 // We always ignore SP defines on call instructions, they don't actually
1821 // change the value of the stack pointer... except for win32's _chkstk. This
1822 // is rare: filter quickly for the common case (no stack adjustments, not a
1823 // call, etc). If it is a call that modifies SP, recognise the SP register
1824 // defs.
1825 bool CallChangesSP = false;
1826 if (AdjustsStackInCalls && MI.isCall() && MI.getOperand(i: 0).isSymbol() &&
1827 !strcmp(s1: MI.getOperand(i: 0).getSymbolName(), s2: StackProbeSymbolName.data()))
1828 CallChangesSP = true;
1829
1830 // Test whether we should ignore a def of this register due to it being part
1831 // of the stack pointer.
1832 auto IgnoreSPAlias = [this, &MI, CallChangesSP](Register R) -> bool {
1833 if (CallChangesSP)
1834 return false;
1835 return MI.isCall() && MTracker->SPAliases.count(V: R);
1836 };
1837
1838 // Find the regs killed by MI, and find regmasks of preserved regs.
1839 // Max out the number of statically allocated elements in `DeadRegs`, as this
1840 // prevents fallback to std::set::count() operations.
1841 SmallSet<uint32_t, 32> DeadRegs;
1842 SmallVector<const uint32_t *, 4> RegMasks;
1843 SmallVector<const MachineOperand *, 4> RegMaskPtrs;
1844 for (const MachineOperand &MO : MI.operands()) {
1845 // Determine whether the operand is a register def.
1846 if (MO.isReg() && MO.isDef() && MO.getReg() && MO.getReg().isPhysical() &&
1847 !IgnoreSPAlias(MO.getReg())) {
1848 // Remove ranges of all aliased registers.
1849 for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI)
1850 // FIXME: Can we break out of this loop early if no insertion occurs?
1851 DeadRegs.insert(V: *RAI);
1852 } else if (MO.isRegMask()) {
1853 RegMasks.push_back(Elt: MO.getRegMask());
1854 RegMaskPtrs.push_back(Elt: &MO);
1855 }
1856 }
1857
1858 // Tell MLocTracker about all definitions, of regmasks and otherwise.
1859 for (uint32_t DeadReg : DeadRegs)
1860 MTracker->defReg(R: DeadReg, BB: CurBB, Inst: CurInst);
1861
1862 for (const auto *MO : RegMaskPtrs)
1863 MTracker->writeRegMask(MO, CurBB, InstID: CurInst);
1864
1865 // If this instruction writes to a spill slot, def that slot.
1866 if (hasFoldedStackStore(MI)) {
1867 if (std::optional<SpillLocationNo> SpillNo =
1868 extractSpillBaseRegAndOffset(MI)) {
1869 for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) {
1870 unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *SpillNo, Idx: I);
1871 LocIdx L = MTracker->getSpillMLoc(SpillID);
1872 MTracker->setMLoc(L, Num: ValueIDNum(CurBB, CurInst, L));
1873 }
1874 }
1875 }
1876
1877 if (!TTracker)
1878 return;
1879
1880 // When committing variable values to locations: tell transfer tracker that
1881 // we've clobbered things. It may be able to recover the variable from a
1882 // different location.
1883
1884 // Inform TTracker about any direct clobbers.
1885 for (uint32_t DeadReg : DeadRegs) {
1886 LocIdx Loc = MTracker->lookupOrTrackRegister(ID: DeadReg);
1887 TTracker->clobberMloc(MLoc: Loc, Pos: MI.getIterator(), MakeUndef: false);
1888 }
1889
1890 // Look for any clobbers performed by a register mask. Only test locations
1891 // that are actually being tracked.
1892 if (!RegMaskPtrs.empty()) {
1893 for (auto L : MTracker->locations()) {
1894 // Stack locations can't be clobbered by regmasks.
1895 if (MTracker->isSpill(Idx: L.Idx))
1896 continue;
1897
1898 Register Reg = MTracker->LocIdxToLocID[L.Idx];
1899 if (IgnoreSPAlias(Reg))
1900 continue;
1901
1902 for (const auto *MO : RegMaskPtrs)
1903 if (MO->clobbersPhysReg(PhysReg: Reg))
1904 TTracker->clobberMloc(MLoc: L.Idx, Pos: MI.getIterator(), MakeUndef: false);
1905 }
1906 }
1907
1908 // Tell TTracker about any folded stack store.
1909 if (hasFoldedStackStore(MI)) {
1910 if (std::optional<SpillLocationNo> SpillNo =
1911 extractSpillBaseRegAndOffset(MI)) {
1912 for (unsigned int I = 0; I < MTracker->NumSlotIdxes; ++I) {
1913 unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *SpillNo, Idx: I);
1914 LocIdx L = MTracker->getSpillMLoc(SpillID);
1915 TTracker->clobberMloc(MLoc: L, Pos: MI.getIterator(), MakeUndef: true);
1916 }
1917 }
1918 }
1919}
1920
1921void InstrRefBasedLDV::performCopy(Register SrcRegNum, Register DstRegNum) {
1922 // In all circumstances, re-def all aliases. It's definitely a new value now.
1923 for (MCRegAliasIterator RAI(DstRegNum, TRI, true); RAI.isValid(); ++RAI)
1924 MTracker->defReg(R: *RAI, BB: CurBB, Inst: CurInst);
1925
1926 ValueIDNum SrcValue = MTracker->readReg(R: SrcRegNum);
1927 MTracker->setReg(R: DstRegNum, ValueID: SrcValue);
1928
1929 // Copy subregisters from one location to another.
1930 for (MCSubRegIndexIterator SRI(SrcRegNum, TRI); SRI.isValid(); ++SRI) {
1931 unsigned SrcSubReg = SRI.getSubReg();
1932 unsigned SubRegIdx = SRI.getSubRegIndex();
1933 unsigned DstSubReg = TRI->getSubReg(Reg: DstRegNum, Idx: SubRegIdx);
1934 if (!DstSubReg)
1935 continue;
1936
1937 // Do copy. There are two matching subregisters, the source value should
1938 // have been def'd when the super-reg was, the latter might not be tracked
1939 // yet.
1940 // This will force SrcSubReg to be tracked, if it isn't yet. Will read
1941 // mphi values if it wasn't tracked.
1942 LocIdx SrcL = MTracker->lookupOrTrackRegister(ID: SrcSubReg);
1943 LocIdx DstL = MTracker->lookupOrTrackRegister(ID: DstSubReg);
1944 (void)SrcL;
1945 (void)DstL;
1946 ValueIDNum CpyValue = MTracker->readReg(R: SrcSubReg);
1947
1948 MTracker->setReg(R: DstSubReg, ValueID: CpyValue);
1949 }
1950}
1951
1952std::optional<SpillLocationNo>
1953InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI,
1954 MachineFunction *MF) {
1955 // TODO: Handle multiple stores folded into one.
1956 if (!MI.hasOneMemOperand())
1957 return std::nullopt;
1958
1959 // Reject any memory operand that's aliased -- we can't guarantee its value.
1960 auto MMOI = MI.memoperands_begin();
1961 const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue();
1962 if (PVal->isAliased(MFI))
1963 return std::nullopt;
1964
1965 if (!MI.getSpillSize(TII) && !MI.getFoldedSpillSize(TII))
1966 return std::nullopt; // This is not a spill instruction, since no valid size
1967 // was returned from either function.
1968
1969 return extractSpillBaseRegAndOffset(MI);
1970}
1971
1972bool InstrRefBasedLDV::isLocationSpill(const MachineInstr &MI,
1973 MachineFunction *MF, unsigned &Reg) {
1974 if (!isSpillInstruction(MI, MF))
1975 return false;
1976
1977 int FI;
1978 Reg = TII->isStoreToStackSlotPostFE(MI, FrameIndex&: FI);
1979 return Reg != 0;
1980}
1981
1982std::optional<SpillLocationNo>
1983InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI,
1984 MachineFunction *MF, unsigned &Reg) {
1985 if (!MI.hasOneMemOperand())
1986 return std::nullopt;
1987
1988 // FIXME: Handle folded restore instructions with more than one memory
1989 // operand.
1990 if (MI.getRestoreSize(TII)) {
1991 Reg = MI.getOperand(i: 0).getReg();
1992 return extractSpillBaseRegAndOffset(MI);
1993 }
1994 return std::nullopt;
1995}
1996
1997bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) {
1998 // XXX -- it's too difficult to implement VarLocBasedImpl's stack location
1999 // limitations under the new model. Therefore, when comparing them, compare
2000 // versions that don't attempt spills or restores at all.
2001 if (EmulateOldLDV)
2002 return false;
2003
2004 // Strictly limit ourselves to plain loads and stores, not all instructions
2005 // that can access the stack.
2006 int DummyFI = -1;
2007 if (!TII->isStoreToStackSlotPostFE(MI, FrameIndex&: DummyFI) &&
2008 !TII->isLoadFromStackSlotPostFE(MI, FrameIndex&: DummyFI))
2009 return false;
2010
2011 MachineFunction *MF = MI.getMF();
2012 unsigned Reg;
2013
2014 LLVM_DEBUG(dbgs() << "Examining instruction: "; MI.dump(););
2015
2016 // Strictly limit ourselves to plain loads and stores, not all instructions
2017 // that can access the stack.
2018 int FIDummy;
2019 if (!TII->isStoreToStackSlotPostFE(MI, FrameIndex&: FIDummy) &&
2020 !TII->isLoadFromStackSlotPostFE(MI, FrameIndex&: FIDummy))
2021 return false;
2022
2023 // First, if there are any DBG_VALUEs pointing at a spill slot that is
2024 // written to, terminate that variable location. The value in memory
2025 // will have changed. DbgEntityHistoryCalculator doesn't try to detect this.
2026 if (std::optional<SpillLocationNo> Loc = isSpillInstruction(MI, MF)) {
2027 // Un-set this location and clobber, so that earlier locations don't
2028 // continue past this store.
2029 for (unsigned SlotIdx = 0; SlotIdx < MTracker->NumSlotIdxes; ++SlotIdx) {
2030 unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: *Loc, Idx: SlotIdx);
2031 std::optional<LocIdx> MLoc = MTracker->getSpillMLoc(SpillID);
2032 if (!MLoc)
2033 continue;
2034
2035 // We need to over-write the stack slot with something (here, a def at
2036 // this instruction) to ensure no values are preserved in this stack slot
2037 // after the spill. It also prevents TTracker from trying to recover the
2038 // location and re-installing it in the same place.
2039 ValueIDNum Def(CurBB, CurInst, *MLoc);
2040 MTracker->setMLoc(L: *MLoc, Num: Def);
2041 if (TTracker)
2042 TTracker->clobberMloc(MLoc: *MLoc, Pos: MI.getIterator());
2043 }
2044 }
2045
2046 // Try to recognise spill and restore instructions that may transfer a value.
2047 if (isLocationSpill(MI, MF, Reg)) {
2048 // isLocationSpill returning true should guarantee we can extract a
2049 // location.
2050 SpillLocationNo Loc = *extractSpillBaseRegAndOffset(MI);
2051
2052 auto DoTransfer = [&](Register SrcReg, unsigned SpillID) {
2053 auto ReadValue = MTracker->readReg(R: SrcReg);
2054 LocIdx DstLoc = MTracker->getSpillMLoc(SpillID);
2055 MTracker->setMLoc(L: DstLoc, Num: ReadValue);
2056
2057 if (TTracker) {
2058 LocIdx SrcLoc = MTracker->getRegMLoc(R: SrcReg);
2059 TTracker->transferMlocs(Src: SrcLoc, Dst: DstLoc, Pos: MI.getIterator());
2060 }
2061 };
2062
2063 // Then, transfer subreg bits.
2064 for (MCPhysReg SR : TRI->subregs(Reg)) {
2065 // Ensure this reg is tracked,
2066 (void)MTracker->lookupOrTrackRegister(ID: SR);
2067 unsigned SubregIdx = TRI->getSubRegIndex(RegNo: Reg, SubRegNo: SR);
2068 unsigned SpillID = MTracker->getLocID(Spill: Loc, SpillSubReg: SubregIdx);
2069 DoTransfer(SR, SpillID);
2070 }
2071
2072 // Directly lookup size of main source reg, and transfer.
2073 unsigned Size = TRI->getRegSizeInBits(Reg, MRI: *MRI);
2074 unsigned SpillID = MTracker->getLocID(Spill: Loc, Idx: {Size, 0});
2075 DoTransfer(Reg, SpillID);
2076 } else {
2077 std::optional<SpillLocationNo> Loc = isRestoreInstruction(MI, MF, Reg);
2078 if (!Loc)
2079 return false;
2080
2081 // Assumption: we're reading from the base of the stack slot, not some
2082 // offset into it. It seems very unlikely LLVM would ever generate
2083 // restores where this wasn't true. This then becomes a question of what
2084 // subregisters in the destination register line up with positions in the
2085 // stack slot.
2086
2087 // Def all registers that alias the destination.
2088 for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI)
2089 MTracker->defReg(R: *RAI, BB: CurBB, Inst: CurInst);
2090
2091 // Now find subregisters within the destination register, and load values
2092 // from stack slot positions.
2093 auto DoTransfer = [&](Register DestReg, unsigned SpillID) {
2094 LocIdx SrcIdx = MTracker->getSpillMLoc(SpillID);
2095 auto ReadValue = MTracker->readMLoc(L: SrcIdx);
2096 MTracker->setReg(R: DestReg, ValueID: ReadValue);
2097 };
2098
2099 for (MCPhysReg SR : TRI->subregs(Reg)) {
2100 unsigned Subreg = TRI->getSubRegIndex(RegNo: Reg, SubRegNo: SR);
2101 unsigned SpillID = MTracker->getLocID(Spill: *Loc, SpillSubReg: Subreg);
2102 DoTransfer(SR, SpillID);
2103 }
2104
2105 // Directly look up this registers slot idx by size, and transfer.
2106 unsigned Size = TRI->getRegSizeInBits(Reg, MRI: *MRI);
2107 unsigned SpillID = MTracker->getLocID(Spill: *Loc, Idx: {Size, 0});
2108 DoTransfer(Reg, SpillID);
2109 }
2110 return true;
2111}
2112
2113bool InstrRefBasedLDV::transferRegisterCopy(MachineInstr &MI) {
2114 auto DestSrc = TII->isCopyLikeInstr(MI);
2115 if (!DestSrc)
2116 return false;
2117
2118 const MachineOperand *DestRegOp = DestSrc->Destination;
2119 const MachineOperand *SrcRegOp = DestSrc->Source;
2120
2121 Register SrcReg = SrcRegOp->getReg();
2122 Register DestReg = DestRegOp->getReg();
2123
2124 // Ignore identity copies. Yep, these make it as far as LiveDebugValues.
2125 if (SrcReg == DestReg)
2126 return true;
2127
2128 // For emulating VarLocBasedImpl:
2129 // We want to recognize instructions where destination register is callee
2130 // saved register. If register that could be clobbered by the call is
2131 // included, there would be a great chance that it is going to be clobbered
2132 // soon. It is more likely that previous register, which is callee saved, is
2133 // going to stay unclobbered longer, even if it is killed.
2134 //
2135 // For InstrRefBasedImpl, we can track multiple locations per value, so
2136 // ignore this condition.
2137 if (EmulateOldLDV && !isCalleeSavedReg(R: DestReg))
2138 return false;
2139
2140 // InstrRefBasedImpl only followed killing copies.
2141 if (EmulateOldLDV && !SrcRegOp->isKill())
2142 return false;
2143
2144 // Before we update MTracker, remember which values were present in each of
2145 // the locations about to be overwritten, so that we can recover any
2146 // potentially clobbered variables.
2147 DenseMap<LocIdx, ValueIDNum> ClobberedLocs;
2148 if (TTracker) {
2149 for (MCRegAliasIterator RAI(DestReg, TRI, true); RAI.isValid(); ++RAI) {
2150 LocIdx ClobberedLoc = MTracker->getRegMLoc(R: *RAI);
2151 auto MLocIt = TTracker->ActiveMLocs.find(Val: ClobberedLoc);
2152 // If ActiveMLocs isn't tracking this location or there are no variables
2153 // using it, don't bother remembering.
2154 if (MLocIt == TTracker->ActiveMLocs.end() || MLocIt->second.empty())
2155 continue;
2156 ValueIDNum Value = MTracker->readReg(R: *RAI);
2157 ClobberedLocs[ClobberedLoc] = Value;
2158 }
2159 }
2160
2161 // Copy MTracker info, including subregs if available.
2162 InstrRefBasedLDV::performCopy(SrcRegNum: SrcReg, DstRegNum: DestReg);
2163
2164 // The copy might have clobbered variables based on the destination register.
2165 // Tell TTracker about it, passing the old ValueIDNum to search for
2166 // alternative locations (or else terminating those variables).
2167 if (TTracker) {
2168 for (auto LocVal : ClobberedLocs) {
2169 TTracker->clobberMloc(MLoc: LocVal.first, OldValue: LocVal.second, Pos: MI.getIterator(), MakeUndef: false);
2170 }
2171 }
2172
2173 // Only produce a transfer of DBG_VALUE within a block where old LDV
2174 // would have. We might make use of the additional value tracking in some
2175 // other way, later.
2176 if (TTracker && isCalleeSavedReg(R: DestReg) && SrcRegOp->isKill())
2177 TTracker->transferMlocs(Src: MTracker->getRegMLoc(R: SrcReg),
2178 Dst: MTracker->getRegMLoc(R: DestReg), Pos: MI.getIterator());
2179
2180 // VarLocBasedImpl would quit tracking the old location after copying.
2181 if (EmulateOldLDV && SrcReg != DestReg)
2182 MTracker->defReg(R: SrcReg, BB: CurBB, Inst: CurInst);
2183
2184 return true;
2185}
2186
2187/// Accumulate a mapping between each DILocalVariable fragment and other
2188/// fragments of that DILocalVariable which overlap. This reduces work during
2189/// the data-flow stage from "Find any overlapping fragments" to "Check if the
2190/// known-to-overlap fragments are present".
2191/// \param MI A previously unprocessed debug instruction to analyze for
2192/// fragment usage.
2193void InstrRefBasedLDV::accumulateFragmentMap(MachineInstr &MI) {
2194 assert(MI.isDebugValueLike());
2195 DebugVariable MIVar(MI.getDebugVariable(), MI.getDebugExpression(),
2196 MI.getDebugLoc()->getInlinedAt());
2197 FragmentInfo ThisFragment = MIVar.getFragmentOrDefault();
2198
2199 // If this is the first sighting of this variable, then we are guaranteed
2200 // there are currently no overlapping fragments either. Initialize the set
2201 // of seen fragments, record no overlaps for the current one, and return.
2202 auto SeenIt = SeenFragments.find(Val: MIVar.getVariable());
2203 if (SeenIt == SeenFragments.end()) {
2204 SmallSet<FragmentInfo, 4> OneFragment;
2205 OneFragment.insert(V: ThisFragment);
2206 SeenFragments.insert(KV: {MIVar.getVariable(), OneFragment});
2207
2208 OverlapFragments.insert(KV: {{MIVar.getVariable(), ThisFragment}, {}});
2209 return;
2210 }
2211
2212 // If this particular Variable/Fragment pair already exists in the overlap
2213 // map, it has already been accounted for.
2214 auto IsInOLapMap =
2215 OverlapFragments.insert(KV: {{MIVar.getVariable(), ThisFragment}, {}});
2216 if (!IsInOLapMap.second)
2217 return;
2218
2219 auto &ThisFragmentsOverlaps = IsInOLapMap.first->second;
2220 auto &AllSeenFragments = SeenIt->second;
2221
2222 // Otherwise, examine all other seen fragments for this variable, with "this"
2223 // fragment being a previously unseen fragment. Record any pair of
2224 // overlapping fragments.
2225 for (const auto &ASeenFragment : AllSeenFragments) {
2226 // Does this previously seen fragment overlap?
2227 if (DIExpression::fragmentsOverlap(A: ThisFragment, B: ASeenFragment)) {
2228 // Yes: Mark the current fragment as being overlapped.
2229 ThisFragmentsOverlaps.push_back(Elt: ASeenFragment);
2230 // Mark the previously seen fragment as being overlapped by the current
2231 // one.
2232 auto ASeenFragmentsOverlaps =
2233 OverlapFragments.find(Val: {MIVar.getVariable(), ASeenFragment});
2234 assert(ASeenFragmentsOverlaps != OverlapFragments.end() &&
2235 "Previously seen var fragment has no vector of overlaps");
2236 ASeenFragmentsOverlaps->second.push_back(Elt: ThisFragment);
2237 }
2238 }
2239
2240 AllSeenFragments.insert(V: ThisFragment);
2241}
2242
2243void InstrRefBasedLDV::process(MachineInstr &MI,
2244 const FuncValueTable *MLiveOuts,
2245 const FuncValueTable *MLiveIns) {
2246 // Try to interpret an MI as a debug or transfer instruction. Only if it's
2247 // none of these should we interpret it's register defs as new value
2248 // definitions.
2249 if (transferDebugValue(MI))
2250 return;
2251 if (transferDebugInstrRef(MI, MLiveOuts, MLiveIns))
2252 return;
2253 if (transferDebugPHI(MI))
2254 return;
2255 if (transferRegisterCopy(MI))
2256 return;
2257 if (transferSpillOrRestoreInst(MI))
2258 return;
2259 transferRegisterDef(MI);
2260}
2261
2262void InstrRefBasedLDV::produceMLocTransferFunction(
2263 MachineFunction &MF, SmallVectorImpl<MLocTransferMap> &MLocTransfer,
2264 unsigned MaxNumBlocks) {
2265 // Because we try to optimize around register mask operands by ignoring regs
2266 // that aren't currently tracked, we set up something ugly for later: RegMask
2267 // operands that are seen earlier than the first use of a register, still need
2268 // to clobber that register in the transfer function. But this information
2269 // isn't actively recorded. Instead, we track each RegMask used in each block,
2270 // and accumulated the clobbered but untracked registers in each block into
2271 // the following bitvector. Later, if new values are tracked, we can add
2272 // appropriate clobbers.
2273 SmallVector<BitVector, 32> BlockMasks;
2274 BlockMasks.resize(N: MaxNumBlocks);
2275
2276 // Reserve one bit per register for the masks described above.
2277 unsigned BVWords = MachineOperand::getRegMaskSize(NumRegs: TRI->getNumRegs());
2278 for (auto &BV : BlockMasks)
2279 BV.resize(N: TRI->getNumRegs(), t: true);
2280
2281 // Step through all instructions and inhale the transfer function.
2282 for (auto &MBB : MF) {
2283 // Object fields that are read by trackers to know where we are in the
2284 // function.
2285 CurBB = MBB.getNumber();
2286 CurInst = 1;
2287
2288 // Set all machine locations to a PHI value. For transfer function
2289 // production only, this signifies the live-in value to the block.
2290 MTracker->reset();
2291 MTracker->setMPhis(CurBB);
2292
2293 // Step through each instruction in this block.
2294 for (auto &MI : MBB) {
2295 // Pass in an empty unique_ptr for the value tables when accumulating the
2296 // machine transfer function.
2297 process(MI, MLiveOuts: nullptr, MLiveIns: nullptr);
2298
2299 // Also accumulate fragment map.
2300 if (MI.isDebugValueLike())
2301 accumulateFragmentMap(MI);
2302
2303 // Create a map from the instruction number (if present) to the
2304 // MachineInstr and its position.
2305 if (uint64_t InstrNo = MI.peekDebugInstrNum()) {
2306 auto InstrAndPos = std::make_pair(x: &MI, y&: CurInst);
2307 auto InsertResult =
2308 DebugInstrNumToInstr.insert(x: std::make_pair(x&: InstrNo, y&: InstrAndPos));
2309
2310 // There should never be duplicate instruction numbers.
2311 assert(InsertResult.second);
2312 (void)InsertResult;
2313 }
2314
2315 ++CurInst;
2316 }
2317
2318 // Produce the transfer function, a map of machine location to new value. If
2319 // any machine location has the live-in phi value from the start of the
2320 // block, it's live-through and doesn't need recording in the transfer
2321 // function.
2322 for (auto Location : MTracker->locations()) {
2323 LocIdx Idx = Location.Idx;
2324 ValueIDNum &P = Location.Value;
2325 if (P.isPHI() && P.getLoc() == Idx.asU64())
2326 continue;
2327
2328 // Insert-or-update.
2329 auto &TransferMap = MLocTransfer[CurBB];
2330 auto Result = TransferMap.insert(KV: std::make_pair(x: Idx.asU64(), y&: P));
2331 if (!Result.second)
2332 Result.first->second = P;
2333 }
2334
2335 // Accumulate any bitmask operands into the clobbered reg mask for this
2336 // block.
2337 for (auto &P : MTracker->Masks) {
2338 BlockMasks[CurBB].clearBitsNotInMask(Mask: P.first->getRegMask(), MaskWords: BVWords);
2339 }
2340 }
2341
2342 // Compute a bitvector of all the registers that are tracked in this block.
2343 BitVector UsedRegs(TRI->getNumRegs());
2344 for (auto Location : MTracker->locations()) {
2345 unsigned ID = MTracker->LocIdxToLocID[Location.Idx];
2346 // Ignore stack slots, and aliases of the stack pointer.
2347 if (ID >= TRI->getNumRegs() || MTracker->SPAliases.count(V: ID))
2348 continue;
2349 UsedRegs.set(ID);
2350 }
2351
2352 // Check that any regmask-clobber of a register that gets tracked, is not
2353 // live-through in the transfer function. It needs to be clobbered at the
2354 // very least.
2355 for (unsigned int I = 0; I < MaxNumBlocks; ++I) {
2356 BitVector &BV = BlockMasks[I];
2357 BV.flip();
2358 BV &= UsedRegs;
2359 // This produces all the bits that we clobber, but also use. Check that
2360 // they're all clobbered or at least set in the designated transfer
2361 // elem.
2362 for (unsigned Bit : BV.set_bits()) {
2363 unsigned ID = MTracker->getLocID(Reg: Bit);
2364 LocIdx Idx = MTracker->LocIDToLocIdx[ID];
2365 auto &TransferMap = MLocTransfer[I];
2366
2367 // Install a value representing the fact that this location is effectively
2368 // written to in this block. As there's no reserved value, instead use
2369 // a value number that is never generated. Pick the value number for the
2370 // first instruction in the block, def'ing this location, which we know
2371 // this block never used anyway.
2372 ValueIDNum NotGeneratedNum = ValueIDNum(I, 1, Idx);
2373 auto Result =
2374 TransferMap.insert(KV: std::make_pair(x: Idx.asU64(), y&: NotGeneratedNum));
2375 if (!Result.second) {
2376 ValueIDNum &ValueID = Result.first->second;
2377 if (ValueID.getBlock() == I && ValueID.isPHI())
2378 // It was left as live-through. Set it to clobbered.
2379 ValueID = NotGeneratedNum;
2380 }
2381 }
2382 }
2383}
2384
2385bool InstrRefBasedLDV::mlocJoin(
2386 MachineBasicBlock &MBB, SmallPtrSet<const MachineBasicBlock *, 16> &Visited,
2387 FuncValueTable &OutLocs, ValueTable &InLocs) {
2388 LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n");
2389 bool Changed = false;
2390
2391 // Handle value-propagation when control flow merges on entry to a block. For
2392 // any location without a PHI already placed, the location has the same value
2393 // as its predecessors. If a PHI is placed, test to see whether it's now a
2394 // redundant PHI that we can eliminate.
2395
2396 SmallVector<const MachineBasicBlock *, 8> BlockOrders;
2397 for (auto *Pred : MBB.predecessors())
2398 BlockOrders.push_back(Elt: Pred);
2399
2400 // Visit predecessors in RPOT order.
2401 auto Cmp = [&](const MachineBasicBlock *A, const MachineBasicBlock *B) {
2402 return BBToOrder.find(Val: A)->second < BBToOrder.find(Val: B)->second;
2403 };
2404 llvm::sort(C&: BlockOrders, Comp: Cmp);
2405
2406 // Skip entry block.
2407 if (BlockOrders.size() == 0) {
2408 // FIXME: We don't use assert here to prevent instr-ref-unreachable.mir
2409 // failing.
2410 LLVM_DEBUG(if (!MBB.isEntryBlock()) dbgs()
2411 << "Found not reachable block " << MBB.getFullName()
2412 << " from entry which may lead out of "
2413 "bound access to VarLocs\n");
2414 return false;
2415 }
2416
2417 // Step through all machine locations, look at each predecessor and test
2418 // whether we can eliminate redundant PHIs.
2419 for (auto Location : MTracker->locations()) {
2420 LocIdx Idx = Location.Idx;
2421
2422 // Pick out the first predecessors live-out value for this location. It's
2423 // guaranteed to not be a backedge, as we order by RPO.
2424 ValueIDNum FirstVal = OutLocs[*BlockOrders[0]][Idx.asU64()];
2425
2426 // If we've already eliminated a PHI here, do no further checking, just
2427 // propagate the first live-in value into this block.
2428 if (InLocs[Idx.asU64()] != ValueIDNum(MBB.getNumber(), 0, Idx)) {
2429 if (InLocs[Idx.asU64()] != FirstVal) {
2430 InLocs[Idx.asU64()] = FirstVal;
2431 Changed |= true;
2432 }
2433 continue;
2434 }
2435
2436 // We're now examining a PHI to see whether it's un-necessary. Loop around
2437 // the other live-in values and test whether they're all the same.
2438 bool Disagree = false;
2439 for (unsigned int I = 1; I < BlockOrders.size(); ++I) {
2440 const MachineBasicBlock *PredMBB = BlockOrders[I];
2441 const ValueIDNum &PredLiveOut = OutLocs[*PredMBB][Idx.asU64()];
2442
2443 // Incoming values agree, continue trying to eliminate this PHI.
2444 if (FirstVal == PredLiveOut)
2445 continue;
2446
2447 // We can also accept a PHI value that feeds back into itself.
2448 if (PredLiveOut == ValueIDNum(MBB.getNumber(), 0, Idx))
2449 continue;
2450
2451 // Live-out of a predecessor disagrees with the first predecessor.
2452 Disagree = true;
2453 }
2454
2455 // No disagreement? No PHI. Otherwise, leave the PHI in live-ins.
2456 if (!Disagree) {
2457 InLocs[Idx.asU64()] = FirstVal;
2458 Changed |= true;
2459 }
2460 }
2461
2462 // TODO: Reimplement NumInserted and NumRemoved.
2463 return Changed;
2464}
2465
2466void InstrRefBasedLDV::findStackIndexInterference(
2467 SmallVectorImpl<unsigned> &Slots) {
2468 // We could spend a bit of time finding the exact, minimal, set of stack
2469 // indexes that interfere with each other, much like reg units. Or, we can
2470 // rely on the fact that:
2471 // * The smallest / lowest index will interfere with everything at zero
2472 // offset, which will be the largest set of registers,
2473 // * Most indexes with non-zero offset will end up being interference units
2474 // anyway.
2475 // So just pick those out and return them.
2476
2477 // We can rely on a single-byte stack index existing already, because we
2478 // initialize them in MLocTracker.
2479 auto It = MTracker->StackSlotIdxes.find(Val: {8, 0});
2480 assert(It != MTracker->StackSlotIdxes.end());
2481 Slots.push_back(Elt: It->second);
2482
2483 // Find anything that has a non-zero offset and add that too.
2484 for (auto &Pair : MTracker->StackSlotIdxes) {
2485 // Is offset zero? If so, ignore.
2486 if (!Pair.first.second)
2487 continue;
2488 Slots.push_back(Elt: Pair.second);
2489 }
2490}
2491
2492void InstrRefBasedLDV::placeMLocPHIs(
2493 MachineFunction &MF, SmallPtrSetImpl<MachineBasicBlock *> &AllBlocks,
2494 FuncValueTable &MInLocs, SmallVectorImpl<MLocTransferMap> &MLocTransfer) {
2495 SmallVector<unsigned, 4> StackUnits;
2496 findStackIndexInterference(Slots&: StackUnits);
2497
2498 // To avoid repeatedly running the PHI placement algorithm, leverage the
2499 // fact that a def of register MUST also def its register units. Find the
2500 // units for registers, place PHIs for them, and then replicate them for
2501 // aliasing registers. Some inputs that are never def'd (DBG_PHIs of
2502 // arguments) don't lead to register units being tracked, just place PHIs for
2503 // those registers directly. Stack slots have their own form of "unit",
2504 // store them to one side.
2505 SmallSet<Register, 32> RegUnitsToPHIUp;
2506 SmallSet<LocIdx, 32> NormalLocsToPHI;
2507 SmallSet<SpillLocationNo, 32> StackSlots;
2508 for (auto Location : MTracker->locations()) {
2509 LocIdx L = Location.Idx;
2510 if (MTracker->isSpill(Idx: L)) {
2511 StackSlots.insert(V: MTracker->locIDToSpill(ID: MTracker->LocIdxToLocID[L]));
2512 continue;
2513 }
2514
2515 Register R = MTracker->LocIdxToLocID[L];
2516 SmallSet<Register, 8> FoundRegUnits;
2517 bool AnyIllegal = false;
2518 for (MCRegUnit Unit : TRI->regunits(Reg: R.asMCReg())) {
2519 for (MCRegUnitRootIterator URoot(Unit, TRI); URoot.isValid(); ++URoot) {
2520 if (!MTracker->isRegisterTracked(R: *URoot)) {
2521 // Not all roots were loaded into the tracking map: this register
2522 // isn't actually def'd anywhere, we only read from it. Generate PHIs
2523 // for this reg, but don't iterate units.
2524 AnyIllegal = true;
2525 } else {
2526 FoundRegUnits.insert(V: *URoot);
2527 }
2528 }
2529 }
2530
2531 if (AnyIllegal) {
2532 NormalLocsToPHI.insert(V: L);
2533 continue;
2534 }
2535
2536 RegUnitsToPHIUp.insert(I: FoundRegUnits.begin(), E: FoundRegUnits.end());
2537 }
2538
2539 // Lambda to fetch PHIs for a given location, and write into the PHIBlocks
2540 // collection.
2541 SmallVector<MachineBasicBlock *, 32> PHIBlocks;
2542 auto CollectPHIsForLoc = [&](LocIdx L) {
2543 // Collect the set of defs.
2544 SmallPtrSet<MachineBasicBlock *, 32> DefBlocks;
2545 for (unsigned int I = 0; I < OrderToBB.size(); ++I) {
2546 MachineBasicBlock *MBB = OrderToBB[I];
2547 const auto &TransferFunc = MLocTransfer[MBB->getNumber()];
2548 if (TransferFunc.contains(Val: L))
2549 DefBlocks.insert(Ptr: MBB);
2550 }
2551
2552 // The entry block defs the location too: it's the live-in / argument value.
2553 // Only insert if there are other defs though; everything is trivially live
2554 // through otherwise.
2555 if (!DefBlocks.empty())
2556 DefBlocks.insert(Ptr: &*MF.begin());
2557
2558 // Ask the SSA construction algorithm where we should put PHIs. Clear
2559 // anything that might have been hanging around from earlier.
2560 PHIBlocks.clear();
2561 BlockPHIPlacement(AllBlocks, DefBlocks, PHIBlocks);
2562 };
2563
2564 auto InstallPHIsAtLoc = [&PHIBlocks, &MInLocs](LocIdx L) {
2565 for (const MachineBasicBlock *MBB : PHIBlocks)
2566 MInLocs[*MBB][L.asU64()] = ValueIDNum(MBB->getNumber(), 0, L);
2567 };
2568
2569 // For locations with no reg units, just place PHIs.
2570 for (LocIdx L : NormalLocsToPHI) {
2571 CollectPHIsForLoc(L);
2572 // Install those PHI values into the live-in value array.
2573 InstallPHIsAtLoc(L);
2574 }
2575
2576 // For stack slots, calculate PHIs for the equivalent of the units, then
2577 // install for each index.
2578 for (SpillLocationNo Slot : StackSlots) {
2579 for (unsigned Idx : StackUnits) {
2580 unsigned SpillID = MTracker->getSpillIDWithIdx(Spill: Slot, Idx);
2581 LocIdx L = MTracker->getSpillMLoc(SpillID);
2582 CollectPHIsForLoc(L);
2583 InstallPHIsAtLoc(L);
2584
2585 // Find anything that aliases this stack index, install PHIs for it too.
2586 unsigned Size, Offset;
2587 std::tie(args&: Size, args&: Offset) = MTracker->StackIdxesToPos[Idx];
2588 for (auto &Pair : MTracker->StackSlotIdxes) {
2589 unsigned ThisSize, ThisOffset;
2590 std::tie(args&: ThisSize, args&: ThisOffset) = Pair.first;
2591 if (ThisSize + ThisOffset <= Offset || Size + Offset <= ThisOffset)
2592 continue;
2593
2594 unsigned ThisID = MTracker->getSpillIDWithIdx(Spill: Slot, Idx: Pair.second);
2595 LocIdx ThisL = MTracker->getSpillMLoc(SpillID: ThisID);
2596 InstallPHIsAtLoc(ThisL);
2597 }
2598 }
2599 }
2600
2601 // For reg units, place PHIs, and then place them for any aliasing registers.
2602 for (Register R : RegUnitsToPHIUp) {
2603 LocIdx L = MTracker->lookupOrTrackRegister(ID: R);
2604 CollectPHIsForLoc(L);
2605
2606 // Install those PHI values into the live-in value array.
2607 InstallPHIsAtLoc(L);
2608
2609 // Now find aliases and install PHIs for those.
2610 for (MCRegAliasIterator RAI(R, TRI, true); RAI.isValid(); ++RAI) {
2611 // Super-registers that are "above" the largest register read/written by
2612 // the function will alias, but will not be tracked.
2613 if (!MTracker->isRegisterTracked(R: *RAI))
2614 continue;
2615
2616 LocIdx AliasLoc = MTracker->lookupOrTrackRegister(ID: *RAI);
2617 InstallPHIsAtLoc(AliasLoc);
2618 }
2619 }
2620}
2621
2622void InstrRefBasedLDV::buildMLocValueMap(
2623 MachineFunction &MF, FuncValueTable &MInLocs, FuncValueTable &MOutLocs,
2624 SmallVectorImpl<MLocTransferMap> &MLocTransfer) {
2625 std::priority_queue<unsigned int, std::vector<unsigned int>,
2626 std::greater<unsigned int>>
2627 Worklist, Pending;
2628
2629 // We track what is on the current and pending worklist to avoid inserting
2630 // the same thing twice. We could avoid this with a custom priority queue,
2631 // but this is probably not worth it.
2632 SmallPtrSet<MachineBasicBlock *, 16> OnPending, OnWorklist;
2633
2634 // Initialize worklist with every block to be visited. Also produce list of
2635 // all blocks.
2636 SmallPtrSet<MachineBasicBlock *, 32> AllBlocks;
2637 for (unsigned int I = 0; I < BBToOrder.size(); ++I) {
2638 Worklist.push(x: I);
2639 OnWorklist.insert(Ptr: OrderToBB[I]);
2640 AllBlocks.insert(Ptr: OrderToBB[I]);
2641 }
2642
2643 // Initialize entry block to PHIs. These represent arguments.
2644 for (auto Location : MTracker->locations())
2645 MInLocs.tableForEntryMBB()[Location.Idx.asU64()] =
2646 ValueIDNum(0, 0, Location.Idx);
2647
2648 MTracker->reset();
2649
2650 // Start by placing PHIs, using the usual SSA constructor algorithm. Consider
2651 // any machine-location that isn't live-through a block to be def'd in that
2652 // block.
2653 placeMLocPHIs(MF, AllBlocks, MInLocs, MLocTransfer);
2654
2655 // Propagate values to eliminate redundant PHIs. At the same time, this
2656 // produces the table of Block x Location => Value for the entry to each
2657 // block.
2658 // The kind of PHIs we can eliminate are, for example, where one path in a
2659 // conditional spills and restores a register, and the register still has
2660 // the same value once control flow joins, unbeknowns to the PHI placement
2661 // code. Propagating values allows us to identify such un-necessary PHIs and
2662 // remove them.
2663 SmallPtrSet<const MachineBasicBlock *, 16> Visited;
2664 while (!Worklist.empty() || !Pending.empty()) {
2665 // Vector for storing the evaluated block transfer function.
2666 SmallVector<std::pair<LocIdx, ValueIDNum>, 32> ToRemap;
2667
2668 while (!Worklist.empty()) {
2669 MachineBasicBlock *MBB = OrderToBB[Worklist.top()];
2670 CurBB = MBB->getNumber();
2671 Worklist.pop();
2672
2673 // Join the values in all predecessor blocks.
2674 bool InLocsChanged;
2675 InLocsChanged = mlocJoin(MBB&: *MBB, Visited, OutLocs&: MOutLocs, InLocs&: MInLocs[*MBB]);
2676 InLocsChanged |= Visited.insert(Ptr: MBB).second;
2677
2678 // Don't examine transfer function if we've visited this loc at least
2679 // once, and inlocs haven't changed.
2680 if (!InLocsChanged)
2681 continue;
2682
2683 // Load the current set of live-ins into MLocTracker.
2684 MTracker->loadFromArray(Locs&: MInLocs[*MBB], NewCurBB: CurBB);
2685
2686 // Each element of the transfer function can be a new def, or a read of
2687 // a live-in value. Evaluate each element, and store to "ToRemap".
2688 ToRemap.clear();
2689 for (auto &P : MLocTransfer[CurBB]) {
2690 if (P.second.getBlock() == CurBB && P.second.isPHI()) {
2691 // This is a movement of whatever was live in. Read it.
2692 ValueIDNum NewID = MTracker->readMLoc(L: P.second.getLoc());
2693 ToRemap.push_back(Elt: std::make_pair(x&: P.first, y&: NewID));
2694 } else {
2695 // It's a def. Just set it.
2696 assert(P.second.getBlock() == CurBB);
2697 ToRemap.push_back(Elt: std::make_pair(x&: P.first, y&: P.second));
2698 }
2699 }
2700
2701 // Commit the transfer function changes into mloc tracker, which
2702 // transforms the contents of the MLocTracker into the live-outs.
2703 for (auto &P : ToRemap)
2704 MTracker->setMLoc(L: P.first, Num: P.second);
2705
2706 // Now copy out-locs from mloc tracker into out-loc vector, checking
2707 // whether changes have occurred. These changes can have come from both
2708 // the transfer function, and mlocJoin.
2709 bool OLChanged = false;
2710 for (auto Location : MTracker->locations()) {
2711 OLChanged |= MOutLocs[*MBB][Location.Idx.asU64()] != Location.Value;
2712 MOutLocs[*MBB][Location.Idx.asU64()] = Location.Value;
2713 }
2714
2715 MTracker->reset();
2716
2717 // No need to examine successors again if out-locs didn't change.
2718 if (!OLChanged)
2719 continue;
2720
2721 // All successors should be visited: put any back-edges on the pending
2722 // list for the next pass-through, and any other successors to be
2723 // visited this pass, if they're not going to be already.
2724 for (auto *s : MBB->successors()) {
2725 // Does branching to this successor represent a back-edge?
2726 if (BBToOrder[s] > BBToOrder[MBB]) {
2727 // No: visit it during this dataflow iteration.
2728 if (OnWorklist.insert(Ptr: s).second)
2729 Worklist.push(x: BBToOrder[s]);
2730 } else {
2731 // Yes: visit it on the next iteration.
2732 if (OnPending.insert(Ptr: s).second)
2733 Pending.push(x: BBToOrder[s]);
2734 }
2735 }
2736 }
2737
2738 Worklist.swap(pq&: Pending);
2739 std::swap(LHS&: OnPending, RHS&: OnWorklist);
2740 OnPending.clear();
2741 // At this point, pending must be empty, since it was just the empty
2742 // worklist
2743 assert(Pending.empty() && "Pending should be empty");
2744 }
2745
2746 // Once all the live-ins don't change on mlocJoin(), we've eliminated all
2747 // redundant PHIs.
2748}
2749
2750void InstrRefBasedLDV::BlockPHIPlacement(
2751 const SmallPtrSetImpl<MachineBasicBlock *> &AllBlocks,
2752 const SmallPtrSetImpl<MachineBasicBlock *> &DefBlocks,
2753 SmallVectorImpl<MachineBasicBlock *> &PHIBlocks) {
2754 // Apply IDF calculator to the designated set of location defs, storing
2755 // required PHIs into PHIBlocks. Uses the dominator tree stored in the
2756 // InstrRefBasedLDV object.
2757 IDFCalculatorBase<MachineBasicBlock, false> IDF(DomTree->getBase());
2758
2759 IDF.setLiveInBlocks(AllBlocks);
2760 IDF.setDefiningBlocks(DefBlocks);
2761 IDF.calculate(IDFBlocks&: PHIBlocks);
2762}
2763
2764bool InstrRefBasedLDV::pickVPHILoc(
2765 SmallVectorImpl<DbgOpID> &OutValues, const MachineBasicBlock &MBB,
2766 const LiveIdxT &LiveOuts, FuncValueTable &MOutLocs,
2767 const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders) {
2768
2769 // No predecessors means no PHIs.
2770 if (BlockOrders.empty())
2771 return false;
2772
2773 // All the location operands that do not already agree need to be joined,
2774 // track the indices of each such location operand here.
2775 SmallDenseSet<unsigned> LocOpsToJoin;
2776
2777 auto FirstValueIt = LiveOuts.find(Val: BlockOrders[0]);
2778 if (FirstValueIt == LiveOuts.end())
2779 return false;
2780 const DbgValue &FirstValue = *FirstValueIt->second;
2781
2782 for (const auto p : BlockOrders) {
2783 auto OutValIt = LiveOuts.find(Val: p);
2784 if (OutValIt == LiveOuts.end())
2785 // If we have a predecessor not in scope, we'll never find a PHI position.
2786 return false;
2787 const DbgValue &OutVal = *OutValIt->second;
2788
2789 // No-values cannot have locations we can join on.
2790 if (OutVal.Kind == DbgValue::NoVal)
2791 return false;
2792
2793 // For unjoined VPHIs where we don't know the location, we definitely
2794 // can't find a join loc unless the VPHI is a backedge.
2795 if (OutVal.isUnjoinedPHI() && OutVal.BlockNo != MBB.getNumber())
2796 return false;
2797
2798 if (!FirstValue.Properties.isJoinable(Other: OutVal.Properties))
2799 return false;
2800
2801 for (unsigned Idx = 0; Idx < FirstValue.getLocationOpCount(); ++Idx) {
2802 // An unjoined PHI has no defined locations, and so a shared location must
2803 // be found for every operand.
2804 if (OutVal.isUnjoinedPHI()) {
2805 LocOpsToJoin.insert(V: Idx);
2806 continue;
2807 }
2808 DbgOpID FirstValOp = FirstValue.getDbgOpID(Index: Idx);
2809 DbgOpID OutValOp = OutVal.getDbgOpID(Index: Idx);
2810 if (FirstValOp != OutValOp) {
2811 // We can never join constant ops - the ops must either both be equal
2812 // constant ops or non-const ops.
2813 if (FirstValOp.isConst() || OutValOp.isConst())
2814 return false;
2815 else
2816 LocOpsToJoin.insert(V: Idx);
2817 }
2818 }
2819 }
2820
2821 SmallVector<DbgOpID> NewDbgOps;
2822
2823 for (unsigned Idx = 0; Idx < FirstValue.getLocationOpCount(); ++Idx) {
2824 // If this op doesn't need to be joined because the values agree, use that
2825 // already-agreed value.
2826 if (!LocOpsToJoin.contains(V: Idx)) {
2827 NewDbgOps.push_back(Elt: FirstValue.getDbgOpID(Index: Idx));
2828 continue;
2829 }
2830
2831 std::optional<ValueIDNum> JoinedOpLoc =
2832 pickOperandPHILoc(DbgOpIdx: Idx, MBB, LiveOuts, MOutLocs, BlockOrders);
2833
2834 if (!JoinedOpLoc)
2835 return false;
2836
2837 NewDbgOps.push_back(Elt: DbgOpStore.insert(Op: *JoinedOpLoc));
2838 }
2839
2840 OutValues.append(RHS: NewDbgOps);
2841 return true;
2842}
2843
2844std::optional<ValueIDNum> InstrRefBasedLDV::pickOperandPHILoc(
2845 unsigned DbgOpIdx, const MachineBasicBlock &MBB, const LiveIdxT &LiveOuts,
2846 FuncValueTable &MOutLocs,
2847 const SmallVectorImpl<const MachineBasicBlock *> &BlockOrders) {
2848
2849 // Collect a set of locations from predecessor where its live-out value can
2850 // be found.
2851 SmallVector<SmallVector<LocIdx, 4>, 8> Locs;
2852 unsigned NumLocs = MTracker->getNumLocs();
2853
2854 for (const auto p : BlockOrders) {
2855 auto OutValIt = LiveOuts.find(Val: p);
2856 assert(OutValIt != LiveOuts.end());
2857 const DbgValue &OutVal = *OutValIt->second;
2858 DbgOpID OutValOpID = OutVal.getDbgOpID(Index: DbgOpIdx);
2859 DbgOp OutValOp = DbgOpStore.find(ID: OutValOpID);
2860 assert(!OutValOp.IsConst);
2861
2862 // Create new empty vector of locations.
2863 Locs.resize(N: Locs.size() + 1);
2864
2865 // If the live-in value is a def, find the locations where that value is
2866 // present. Do the same for VPHIs where we know the VPHI value.
2867 if (OutVal.Kind == DbgValue::Def ||
2868 (OutVal.Kind == DbgValue::VPHI && OutVal.BlockNo != MBB.getNumber() &&
2869 !OutValOp.isUndef())) {
2870 ValueIDNum ValToLookFor = OutValOp.ID;
2871 // Search the live-outs of the predecessor for the specified value.
2872 for (unsigned int I = 0; I < NumLocs; ++I) {
2873 if (MOutLocs[*p][I] == ValToLookFor)
2874 Locs.back().push_back(Elt: LocIdx(I));
2875 }
2876 } else {
2877 assert(OutVal.Kind == DbgValue::VPHI);
2878 // Otherwise: this is a VPHI on a backedge feeding back into itself, i.e.
2879 // a value that's live-through the whole loop. (It has to be a backedge,
2880 // because a block can't dominate itself). We can accept as a PHI location
2881 // any location where the other predecessors agree, _and_ the machine
2882 // locations feed back into themselves. Therefore, add all self-looping
2883 // machine-value PHI locations.
2884 for (unsigned int I = 0; I < NumLocs; ++I) {
2885 ValueIDNum MPHI(MBB.getNumber(), 0, LocIdx(I));
2886 if (MOutLocs[*p][I] == MPHI)
2887 Locs.back().push_back(Elt: LocIdx(I));
2888 }
2889 }
2890 }
2891 // We should have found locations for all predecessors, or returned.
2892 assert(Locs.size() == BlockOrders.size());
2893
2894 // Starting with the first set of locations, take the intersection with
2895 // subsequent sets.
2896 SmallVector<LocIdx, 4> CandidateLocs = Locs[0];
2897 for (unsigned int I = 1; I < Locs.size(); ++I) {
2898 auto &LocVec = Locs[I];
2899 SmallVector<LocIdx, 4> NewCandidates;
2900 std::set_intersection(first1: CandidateLocs.begin(), last1: CandidateLocs.end(),
2901 first2: LocVec.begin(), last2: LocVec.end(), result: std::inserter(x&: NewCandidates, i: NewCandidates.begin()));
2902 CandidateLocs = NewCandidates;
2903 }
2904 if (CandidateLocs.empty())
2905 return std::nullopt;
2906
2907 // We now have a set of LocIdxes that contain the right output value in
2908 // each of the predecessors. Pick the lowest; if there's a register loc,
2909 // that'll be it.
2910 LocIdx L = *CandidateLocs.begin();
2911
2912 // Return a PHI-value-number for the found location.
2913 ValueIDNum PHIVal = {(unsigned)MBB.getNumber(), 0, L};
2914 return PHIVal;
2915}
2916
2917bool InstrRefBasedLDV::vlocJoin(
2918 MachineBasicBlock &MBB, LiveIdxT &VLOCOutLocs,
2919 SmallPtrSet<const MachineBasicBlock *, 8> &BlocksToExplore,
2920 DbgValue &LiveIn) {
2921 LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n");
2922 bool Changed = false;
2923
2924 // Order predecessors by RPOT order, for exploring them in that order.
2925 SmallVector<MachineBasicBlock *, 8> BlockOrders(MBB.predecessors());
2926
2927 auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) {
2928 return BBToOrder[A] < BBToOrder[B];
2929 };
2930
2931 llvm::sort(C&: BlockOrders, Comp: Cmp);
2932
2933 unsigned CurBlockRPONum = BBToOrder[&MBB];
2934
2935 // Collect all the incoming DbgValues for this variable, from predecessor
2936 // live-out values.
2937 SmallVector<InValueT, 8> Values;
2938 bool Bail = false;
2939 int BackEdgesStart = 0;
2940 for (auto *p : BlockOrders) {
2941 // If the predecessor isn't in scope / to be explored, we'll never be
2942 // able to join any locations.
2943 if (!BlocksToExplore.contains(Ptr: p)) {
2944 Bail = true;
2945 break;
2946 }
2947
2948 // All Live-outs will have been initialized.
2949 DbgValue &OutLoc = *VLOCOutLocs.find(Val: p)->second;
2950
2951 // Keep track of where back-edges begin in the Values vector. Relies on
2952 // BlockOrders being sorted by RPO.
2953 unsigned ThisBBRPONum = BBToOrder[p];
2954 if (ThisBBRPONum < CurBlockRPONum)
2955 ++BackEdgesStart;
2956
2957 Values.push_back(Elt: std::make_pair(x&: p, y: &OutLoc));
2958 }
2959
2960 // If there were no values, or one of the predecessors couldn't have a
2961 // value, then give up immediately. It's not safe to produce a live-in
2962 // value. Leave as whatever it was before.
2963 if (Bail || Values.size() == 0)
2964 return false;
2965
2966 // All (non-entry) blocks have at least one non-backedge predecessor.
2967 // Pick the variable value from the first of these, to compare against
2968 // all others.
2969 const DbgValue &FirstVal = *Values[0].second;
2970
2971 // If the old live-in value is not a PHI then either a) no PHI is needed
2972 // here, or b) we eliminated the PHI that was here. If so, we can just
2973 // propagate in the first parent's incoming value.
2974 if (LiveIn.Kind != DbgValue::VPHI || LiveIn.BlockNo != MBB.getNumber()) {
2975 Changed = LiveIn != FirstVal;
2976 if (Changed)
2977 LiveIn = FirstVal;
2978 return Changed;
2979 }
2980
2981 // Scan for variable values that can never be resolved: if they have
2982 // different DIExpressions, different indirectness, or are mixed constants /
2983 // non-constants.
2984 for (const auto &V : Values) {
2985 if (!V.second->Properties.isJoinable(Other: FirstVal.Properties))
2986 return false;
2987 if (V.second->Kind == DbgValue::NoVal)
2988 return false;
2989 if (!V.second->hasJoinableLocOps(Other: FirstVal))
2990 return false;
2991 }
2992
2993 // Try to eliminate this PHI. Do the incoming values all agree?
2994 bool Disagree = false;
2995 for (auto &V : Values) {
2996 if (*V.second == FirstVal)
2997 continue; // No disagreement.
2998
2999 // If both values are not equal but have equal non-empty IDs then they refer
3000 // to the same value from different sources (e.g. one is VPHI and the other
3001 // is Def), which does not cause disagreement.
3002 if (V.second->hasIdenticalValidLocOps(Other: FirstVal))
3003 continue;
3004
3005 // Eliminate if a backedge feeds a VPHI back into itself.
3006 if (V.second->Kind == DbgValue::VPHI &&
3007 V.second->BlockNo == MBB.getNumber() &&
3008 // Is this a backedge?
3009 std::distance(first: Values.begin(), last: &V) >= BackEdgesStart)
3010 continue;
3011
3012 Disagree = true;
3013 }
3014
3015 // No disagreement -> live-through value.
3016 if (!Disagree) {
3017 Changed = LiveIn != FirstVal;
3018 if (Changed)
3019 LiveIn = FirstVal;
3020 return Changed;
3021 } else {
3022 // Otherwise use a VPHI.
3023 DbgValue VPHI(MBB.getNumber(), FirstVal.Properties, DbgValue::VPHI);
3024 Changed = LiveIn != VPHI;
3025 if (Changed)
3026 LiveIn = VPHI;
3027 return Changed;
3028 }
3029}
3030
3031void InstrRefBasedLDV::getBlocksForScope(
3032 const DILocation *DILoc,
3033 SmallPtrSetImpl<const MachineBasicBlock *> &BlocksToExplore,
3034 const SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks) {
3035 // Get the set of "normal" in-lexical-scope blocks.
3036 LS.getMachineBasicBlocks(DL: DILoc, MBBs&: BlocksToExplore);
3037
3038 // VarLoc LiveDebugValues tracks variable locations that are defined in
3039 // blocks not in scope. This is something we could legitimately ignore, but
3040 // lets allow it for now for the sake of coverage.
3041 BlocksToExplore.insert(I: AssignBlocks.begin(), E: AssignBlocks.end());
3042
3043 // Storage for artificial blocks we intend to add to BlocksToExplore.
3044 DenseSet<const MachineBasicBlock *> ToAdd;
3045
3046 // To avoid needlessly dropping large volumes of variable locations, propagate
3047 // variables through aritifical blocks, i.e. those that don't have any
3048 // instructions in scope at all. To accurately replicate VarLoc
3049 // LiveDebugValues, this means exploring all artificial successors too.
3050 // Perform a depth-first-search to enumerate those blocks.
3051 for (const auto *MBB : BlocksToExplore) {
3052 // Depth-first-search state: each node is a block and which successor
3053 // we're currently exploring.
3054 SmallVector<std::pair<const MachineBasicBlock *,
3055 MachineBasicBlock::const_succ_iterator>,
3056 8>
3057 DFS;
3058
3059 // Find any artificial successors not already tracked.
3060 for (auto *succ : MBB->successors()) {
3061 if (BlocksToExplore.count(Ptr: succ))
3062 continue;
3063 if (!ArtificialBlocks.count(Ptr: succ))
3064 continue;
3065 ToAdd.insert(V: succ);
3066 DFS.push_back(Elt: {succ, succ->succ_begin()});
3067 }
3068
3069 // Search all those blocks, depth first.
3070 while (!DFS.empty()) {
3071 const MachineBasicBlock *CurBB = DFS.back().first;
3072 MachineBasicBlock::const_succ_iterator &CurSucc = DFS.back().second;
3073 // Walk back if we've explored this blocks successors to the end.
3074 if (CurSucc == CurBB->succ_end()) {
3075 DFS.pop_back();
3076 continue;
3077 }
3078
3079 // If the current successor is artificial and unexplored, descend into
3080 // it.
3081 if (!ToAdd.count(V: *CurSucc) && ArtificialBlocks.count(Ptr: *CurSucc)) {
3082 ToAdd.insert(V: *CurSucc);
3083 DFS.push_back(Elt: {*CurSucc, (*CurSucc)->succ_begin()});
3084 continue;
3085 }
3086
3087 ++CurSucc;
3088 }
3089 };
3090
3091 BlocksToExplore.insert(I: ToAdd.begin(), E: ToAdd.end());
3092}
3093
3094void InstrRefBasedLDV::buildVLocValueMap(
3095 const DILocation *DILoc, const SmallSet<DebugVariable, 4> &VarsWeCareAbout,
3096 SmallPtrSetImpl<MachineBasicBlock *> &AssignBlocks, LiveInsT &Output,
3097 FuncValueTable &MOutLocs, FuncValueTable &MInLocs,
3098 SmallVectorImpl<VLocTracker> &AllTheVLocs) {
3099 // This method is much like buildMLocValueMap: but focuses on a single
3100 // LexicalScope at a time. Pick out a set of blocks and variables that are
3101 // to have their value assignments solved, then run our dataflow algorithm
3102 // until a fixedpoint is reached.
3103 std::priority_queue<unsigned int, std::vector<unsigned int>,
3104 std::greater<unsigned int>>
3105 Worklist, Pending;
3106 SmallPtrSet<MachineBasicBlock *, 16> OnWorklist, OnPending;
3107
3108 // The set of blocks we'll be examining.
3109 SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore;
3110
3111 // The order in which to examine them (RPO).
3112 SmallVector<MachineBasicBlock *, 8> BlockOrders;
3113
3114 // RPO ordering function.
3115 auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) {
3116 return BBToOrder[A] < BBToOrder[B];
3117 };
3118
3119 getBlocksForScope(DILoc, BlocksToExplore, AssignBlocks);
3120
3121 // Single block scope: not interesting! No propagation at all. Note that
3122 // this could probably go above ArtificialBlocks without damage, but
3123 // that then produces output differences from original-live-debug-values,
3124 // which propagates from a single block into many artificial ones.
3125 if (BlocksToExplore.size() == 1)
3126 return;
3127
3128 // Convert a const set to a non-const set. LexicalScopes
3129 // getMachineBasicBlocks returns const MBB pointers, IDF wants mutable ones.
3130 // (Neither of them mutate anything).
3131 SmallPtrSet<MachineBasicBlock *, 8> MutBlocksToExplore;
3132 for (const auto *MBB : BlocksToExplore)
3133 MutBlocksToExplore.insert(Ptr: const_cast<MachineBasicBlock *>(MBB));
3134
3135 // Picks out relevants blocks RPO order and sort them.
3136 for (const auto *MBB : BlocksToExplore)
3137 BlockOrders.push_back(Elt: const_cast<MachineBasicBlock *>(MBB));
3138
3139 llvm::sort(C&: BlockOrders, Comp: Cmp);
3140 unsigned NumBlocks = BlockOrders.size();
3141
3142 // Allocate some vectors for storing the live ins and live outs. Large.
3143 SmallVector<DbgValue, 32> LiveIns, LiveOuts;
3144 LiveIns.reserve(N: NumBlocks);
3145 LiveOuts.reserve(N: NumBlocks);
3146
3147 // Initialize all values to start as NoVals. This signifies "it's live
3148 // through, but we don't know what it is".
3149 DbgValueProperties EmptyProperties(EmptyExpr, false, false);
3150 for (unsigned int I = 0; I < NumBlocks; ++I) {
3151 DbgValue EmptyDbgValue(I, EmptyProperties, DbgValue::NoVal);
3152 LiveIns.push_back(Elt: EmptyDbgValue);
3153 LiveOuts.push_back(Elt: EmptyDbgValue);
3154 }
3155
3156 // Produce by-MBB indexes of live-in/live-outs, to ease lookup within
3157 // vlocJoin.
3158 LiveIdxT LiveOutIdx, LiveInIdx;
3159 LiveOutIdx.reserve(NumEntries: NumBlocks);
3160 LiveInIdx.reserve(NumEntries: NumBlocks);
3161 for (unsigned I = 0; I < NumBlocks; ++I) {
3162 LiveOutIdx[BlockOrders[I]] = &LiveOuts[I];
3163 LiveInIdx[BlockOrders[I]] = &LiveIns[I];
3164 }
3165
3166 // Loop over each variable and place PHIs for it, then propagate values
3167 // between blocks. This keeps the locality of working on one lexical scope at
3168 // at time, but avoids re-processing variable values because some other
3169 // variable has been assigned.
3170 for (const auto &Var : VarsWeCareAbout) {
3171 // Re-initialize live-ins and live-outs, to clear the remains of previous
3172 // variables live-ins / live-outs.
3173 for (unsigned int I = 0; I < NumBlocks; ++I) {
3174 DbgValue EmptyDbgValue(I, EmptyProperties, DbgValue::NoVal);
3175 LiveIns[I] = EmptyDbgValue;
3176 LiveOuts[I] = EmptyDbgValue;
3177 }
3178
3179 // Place PHIs for variable values, using the LLVM IDF calculator.
3180 // Collect the set of blocks where variables are def'd.
3181 SmallPtrSet<MachineBasicBlock *, 32> DefBlocks;
3182 for (const MachineBasicBlock *ExpMBB : BlocksToExplore) {
3183 auto &TransferFunc = AllTheVLocs[ExpMBB->getNumber()].Vars;
3184 if (TransferFunc.contains(Key: Var))
3185 DefBlocks.insert(Ptr: const_cast<MachineBasicBlock *>(ExpMBB));
3186 }
3187
3188 SmallVector<MachineBasicBlock *, 32> PHIBlocks;
3189
3190 // Request the set of PHIs we should insert for this variable. If there's
3191 // only one value definition, things are very simple.
3192 if (DefBlocks.size() == 1) {
3193 placePHIsForSingleVarDefinition(InScopeBlocks: MutBlocksToExplore, MBB: *DefBlocks.begin(),
3194 AllTheVLocs, Var, Output);
3195 continue;
3196 }
3197
3198 // Otherwise: we need to place PHIs through SSA and propagate values.
3199 BlockPHIPlacement(AllBlocks: MutBlocksToExplore, DefBlocks, PHIBlocks);
3200
3201 // Insert PHIs into the per-block live-in tables for this variable.
3202 for (MachineBasicBlock *PHIMBB : PHIBlocks) {
3203 unsigned BlockNo = PHIMBB->getNumber();
3204 DbgValue *LiveIn = LiveInIdx[PHIMBB];
3205 *LiveIn = DbgValue(BlockNo, EmptyProperties, DbgValue::VPHI);
3206 }
3207
3208 for (auto *MBB : BlockOrders) {
3209 Worklist.push(x: BBToOrder[MBB]);
3210 OnWorklist.insert(Ptr: MBB);
3211 }
3212
3213 // Iterate over all the blocks we selected, propagating the variables value.
3214 // This loop does two things:
3215 // * Eliminates un-necessary VPHIs in vlocJoin,
3216 // * Evaluates the blocks transfer function (i.e. variable assignments) and
3217 // stores the result to the blocks live-outs.
3218 // Always evaluate the transfer function on the first iteration, and when
3219 // the live-ins change thereafter.
3220 bool FirstTrip = true;
3221 while (!Worklist.empty() || !Pending.empty()) {
3222 while (!Worklist.empty()) {
3223 auto *MBB = OrderToBB[Worklist.top()];
3224 CurBB = MBB->getNumber();
3225 Worklist.pop();
3226
3227 auto LiveInsIt = LiveInIdx.find(Val: MBB);
3228 assert(LiveInsIt != LiveInIdx.end());
3229 DbgValue *LiveIn = LiveInsIt->second;
3230
3231 // Join values from predecessors. Updates LiveInIdx, and writes output
3232 // into JoinedInLocs.
3233 bool InLocsChanged =
3234 vlocJoin(MBB&: *MBB, VLOCOutLocs&: LiveOutIdx, BlocksToExplore, LiveIn&: *LiveIn);
3235
3236 SmallVector<const MachineBasicBlock *, 8> Preds;
3237 for (const auto *Pred : MBB->predecessors())
3238 Preds.push_back(Elt: Pred);
3239
3240 // If this block's live-in value is a VPHI, try to pick a machine-value
3241 // for it. This makes the machine-value available and propagated
3242 // through all blocks by the time value propagation finishes. We can't
3243 // do this any earlier as it needs to read the block live-outs.
3244 if (LiveIn->Kind == DbgValue::VPHI && LiveIn->BlockNo == (int)CurBB) {
3245 // There's a small possibility that on a preceeding path, a VPHI is
3246 // eliminated and transitions from VPHI-with-location to
3247 // live-through-value. As a result, the selected location of any VPHI
3248 // might change, so we need to re-compute it on each iteration.
3249 SmallVector<DbgOpID> JoinedOps;
3250
3251 if (pickVPHILoc(OutValues&: JoinedOps, MBB: *MBB, LiveOuts: LiveOutIdx, MOutLocs, BlockOrders: Preds)) {
3252 bool NewLocPicked = !equal(LRange: LiveIn->getDbgOpIDs(), RRange&: JoinedOps);
3253 InLocsChanged |= NewLocPicked;
3254 if (NewLocPicked)
3255 LiveIn->setDbgOpIDs(JoinedOps);
3256 }
3257 }
3258
3259 if (!InLocsChanged && !FirstTrip)
3260 continue;
3261
3262 DbgValue *LiveOut = LiveOutIdx[MBB];
3263 bool OLChanged = false;
3264
3265 // Do transfer function.
3266 auto &VTracker = AllTheVLocs[MBB->getNumber()];
3267 auto TransferIt = VTracker.Vars.find(Key: Var);
3268 if (TransferIt != VTracker.Vars.end()) {
3269 // Erase on empty transfer (DBG_VALUE $noreg).
3270 if (TransferIt->second.Kind == DbgValue::Undef) {
3271 DbgValue NewVal(MBB->getNumber(), EmptyProperties, DbgValue::NoVal);
3272 if (*LiveOut != NewVal) {
3273 *LiveOut = NewVal;
3274 OLChanged = true;
3275 }
3276 } else {
3277 // Insert new variable value; or overwrite.
3278 if (*LiveOut != TransferIt->second) {
3279 *LiveOut = TransferIt->second;
3280 OLChanged = true;
3281 }
3282 }
3283 } else {
3284 // Just copy live-ins to live-outs, for anything not transferred.
3285 if (*LiveOut != *LiveIn) {
3286 *LiveOut = *LiveIn;
3287 OLChanged = true;
3288 }
3289 }
3290
3291 // If no live-out value changed, there's no need to explore further.
3292 if (!OLChanged)
3293 continue;
3294
3295 // We should visit all successors. Ensure we'll visit any non-backedge
3296 // successors during this dataflow iteration; book backedge successors
3297 // to be visited next time around.
3298 for (auto *s : MBB->successors()) {
3299 // Ignore out of scope / not-to-be-explored successors.
3300 if (!LiveInIdx.contains(Val: s))
3301 continue;
3302
3303 if (BBToOrder[s] > BBToOrder[MBB]) {
3304 if (OnWorklist.insert(Ptr: s).second)
3305 Worklist.push(x: BBToOrder[s]);
3306 } else if (OnPending.insert(Ptr: s).second && (FirstTrip || OLChanged)) {
3307 Pending.push(x: BBToOrder[s]);
3308 }
3309 }
3310 }
3311 Worklist.swap(pq&: Pending);
3312 std::swap(LHS&: OnWorklist, RHS&: OnPending);
3313 OnPending.clear();
3314 assert(Pending.empty());
3315 FirstTrip = false;
3316 }
3317
3318 // Save live-ins to output vector. Ignore any that are still marked as being
3319 // VPHIs with no location -- those are variables that we know the value of,
3320 // but are not actually available in the register file.
3321 for (auto *MBB : BlockOrders) {
3322 DbgValue *BlockLiveIn = LiveInIdx[MBB];
3323 if (BlockLiveIn->Kind == DbgValue::NoVal)
3324 continue;
3325 if (BlockLiveIn->isUnjoinedPHI())
3326 continue;
3327 if (BlockLiveIn->Kind == DbgValue::VPHI)
3328 BlockLiveIn->Kind = DbgValue::Def;
3329 assert(BlockLiveIn->Properties.DIExpr->getFragmentInfo() ==
3330 Var.getFragment() && "Fragment info missing during value prop");
3331 Output[MBB->getNumber()].push_back(Elt: std::make_pair(x: Var, y&: *BlockLiveIn));
3332 }
3333 } // Per-variable loop.
3334
3335 BlockOrders.clear();
3336 BlocksToExplore.clear();
3337}
3338
3339void InstrRefBasedLDV::placePHIsForSingleVarDefinition(
3340 const SmallPtrSetImpl<MachineBasicBlock *> &InScopeBlocks,
3341 MachineBasicBlock *AssignMBB, SmallVectorImpl<VLocTracker> &AllTheVLocs,
3342 const DebugVariable &Var, LiveInsT &Output) {
3343 // If there is a single definition of the variable, then working out it's
3344 // value everywhere is very simple: it's every block dominated by the
3345 // definition. At the dominance frontier, the usual algorithm would:
3346 // * Place PHIs,
3347 // * Propagate values into them,
3348 // * Find there's no incoming variable value from the other incoming branches
3349 // of the dominance frontier,
3350 // * Specify there's no variable value in blocks past the frontier.
3351 // This is a common case, hence it's worth special-casing it.
3352
3353 // Pick out the variables value from the block transfer function.
3354 VLocTracker &VLocs = AllTheVLocs[AssignMBB->getNumber()];
3355 auto ValueIt = VLocs.Vars.find(Key: Var);
3356 const DbgValue &Value = ValueIt->second;
3357
3358 // If it's an explicit assignment of "undef", that means there is no location
3359 // anyway, anywhere.
3360 if (Value.Kind == DbgValue::Undef)
3361 return;
3362
3363 // Assign the variable value to entry to each dominated block that's in scope.
3364 // Skip the definition block -- it's assigned the variable value in the middle
3365 // of the block somewhere.
3366 for (auto *ScopeBlock : InScopeBlocks) {
3367 if (!DomTree->properlyDominates(A: AssignMBB, B: ScopeBlock))
3368 continue;
3369
3370 Output[ScopeBlock->getNumber()].push_back(Elt: {Var, Value});
3371 }
3372
3373 // All blocks that aren't dominated have no live-in value, thus no variable
3374 // value will be given to them.
3375}
3376
3377#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3378void InstrRefBasedLDV::dump_mloc_transfer(
3379 const MLocTransferMap &mloc_transfer) const {
3380 for (const auto &P : mloc_transfer) {
3381 std::string foo = MTracker->LocIdxToName(Idx: P.first);
3382 std::string bar = MTracker->IDAsString(Num: P.second);
3383 dbgs() << "Loc " << foo << " --> " << bar << "\n";
3384 }
3385}
3386#endif
3387
3388void InstrRefBasedLDV::initialSetup(MachineFunction &MF) {
3389 // Build some useful data structures.
3390
3391 LLVMContext &Context = MF.getFunction().getContext();
3392 EmptyExpr = DIExpression::get(Context, Elements: {});
3393
3394 auto hasNonArtificialLocation = [](const MachineInstr &MI) -> bool {
3395 if (const DebugLoc &DL = MI.getDebugLoc())
3396 return DL.getLine() != 0;
3397 return false;
3398 };
3399 // Collect a set of all the artificial blocks.
3400 for (auto &MBB : MF)
3401 if (none_of(Range: MBB.instrs(), P: hasNonArtificialLocation))
3402 ArtificialBlocks.insert(Ptr: &MBB);
3403
3404 // Compute mappings of block <=> RPO order.
3405 ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
3406 unsigned int RPONumber = 0;
3407 auto processMBB = [&](MachineBasicBlock *MBB) {
3408 OrderToBB[RPONumber] = MBB;
3409 BBToOrder[MBB] = RPONumber;
3410 BBNumToRPO[MBB->getNumber()] = RPONumber;
3411 ++RPONumber;
3412 };
3413 for (MachineBasicBlock *MBB : RPOT)
3414 processMBB(MBB);
3415 for (MachineBasicBlock &MBB : MF)
3416 if (!BBToOrder.contains(Val: &MBB))
3417 processMBB(&MBB);
3418
3419 // Order value substitutions by their "source" operand pair, for quick lookup.
3420 llvm::sort(C&: MF.DebugValueSubstitutions);
3421
3422#ifdef EXPENSIVE_CHECKS
3423 // As an expensive check, test whether there are any duplicate substitution
3424 // sources in the collection.
3425 if (MF.DebugValueSubstitutions.size() > 2) {
3426 for (auto It = MF.DebugValueSubstitutions.begin();
3427 It != std::prev(MF.DebugValueSubstitutions.end()); ++It) {
3428 assert(It->Src != std::next(It)->Src && "Duplicate variable location "
3429 "substitution seen");
3430 }
3431 }
3432#endif
3433}
3434
3435// Produce an "ejection map" for blocks, i.e., what's the highest-numbered
3436// lexical scope it's used in. When exploring in DFS order and we pass that
3437// scope, the block can be processed and any tracking information freed.
3438void InstrRefBasedLDV::makeDepthFirstEjectionMap(
3439 SmallVectorImpl<unsigned> &EjectionMap,
3440 const ScopeToDILocT &ScopeToDILocation,
3441 ScopeToAssignBlocksT &ScopeToAssignBlocks) {
3442 SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore;
3443 SmallVector<std::pair<LexicalScope *, ssize_t>, 4> WorkStack;
3444 auto *TopScope = LS.getCurrentFunctionScope();
3445
3446 // Unlike lexical scope explorers, we explore in reverse order, to find the
3447 // "last" lexical scope used for each block early.
3448 WorkStack.push_back(Elt: {TopScope, TopScope->getChildren().size() - 1});
3449
3450 while (!WorkStack.empty()) {
3451 auto &ScopePosition = WorkStack.back();
3452 LexicalScope *WS = ScopePosition.first;
3453 ssize_t ChildNum = ScopePosition.second--;
3454
3455 const SmallVectorImpl<LexicalScope *> &Children = WS->getChildren();
3456 if (ChildNum >= 0) {
3457 // If ChildNum is positive, there are remaining children to explore.
3458 // Push the child and its children-count onto the stack.
3459 auto &ChildScope = Children[ChildNum];
3460 WorkStack.push_back(
3461 Elt: std::make_pair(x: ChildScope, y: ChildScope->getChildren().size() - 1));
3462 } else {
3463 WorkStack.pop_back();
3464
3465 // We've explored all children and any later blocks: examine all blocks
3466 // in our scope. If they haven't yet had an ejection number set, then
3467 // this scope will be the last to use that block.
3468 auto DILocationIt = ScopeToDILocation.find(Val: WS);
3469 if (DILocationIt != ScopeToDILocation.end()) {
3470 getBlocksForScope(DILoc: DILocationIt->second, BlocksToExplore,
3471 AssignBlocks: ScopeToAssignBlocks.find(Val: WS)->second);
3472 for (const auto *MBB : BlocksToExplore) {
3473 unsigned BBNum = MBB->getNumber();
3474 if (EjectionMap[BBNum] == 0)
3475 EjectionMap[BBNum] = WS->getDFSOut();
3476 }
3477
3478 BlocksToExplore.clear();
3479 }
3480 }
3481 }
3482}
3483
3484bool InstrRefBasedLDV::depthFirstVLocAndEmit(
3485 unsigned MaxNumBlocks, const ScopeToDILocT &ScopeToDILocation,
3486 const ScopeToVarsT &ScopeToVars, ScopeToAssignBlocksT &ScopeToAssignBlocks,
3487 LiveInsT &Output, FuncValueTable &MOutLocs, FuncValueTable &MInLocs,
3488 SmallVectorImpl<VLocTracker> &AllTheVLocs, MachineFunction &MF,
3489 DenseMap<DebugVariable, unsigned> &AllVarsNumbering,
3490 const TargetPassConfig &TPC) {
3491 TTracker = new TransferTracker(TII, MTracker, MF, *TRI, CalleeSavedRegs, TPC);
3492 unsigned NumLocs = MTracker->getNumLocs();
3493 VTracker = nullptr;
3494
3495 // No scopes? No variable locations.
3496 if (!LS.getCurrentFunctionScope())
3497 return false;
3498
3499 // Build map from block number to the last scope that uses the block.
3500 SmallVector<unsigned, 16> EjectionMap;
3501 EjectionMap.resize(N: MaxNumBlocks, NV: 0);
3502 makeDepthFirstEjectionMap(EjectionMap, ScopeToDILocation,
3503 ScopeToAssignBlocks);
3504
3505 // Helper lambda for ejecting a block -- if nothing is going to use the block,
3506 // we can translate the variable location information into DBG_VALUEs and then
3507 // free all of InstrRefBasedLDV's data structures.
3508 auto EjectBlock = [&](MachineBasicBlock &MBB) -> void {
3509 unsigned BBNum = MBB.getNumber();
3510 AllTheVLocs[BBNum].clear();
3511
3512 // Prime the transfer-tracker, and then step through all the block
3513 // instructions, installing transfers.
3514 MTracker->reset();
3515 MTracker->loadFromArray(Locs&: MInLocs[MBB], NewCurBB: BBNum);
3516 TTracker->loadInlocs(MBB, MLocs&: MInLocs[MBB], DbgOpStore, VLocs: Output[BBNum], NumLocs);
3517
3518 CurBB = BBNum;
3519 CurInst = 1;
3520 for (auto &MI : MBB) {
3521 process(MI, MLiveOuts: &MOutLocs, MLiveIns: &MInLocs);
3522 TTracker->checkInstForNewValues(Inst: CurInst, pos: MI.getIterator());
3523 ++CurInst;
3524 }
3525
3526 // Free machine-location tables for this block.
3527 MInLocs.ejectTableForBlock(MBB);
3528 MOutLocs.ejectTableForBlock(MBB);
3529 // We don't need live-in variable values for this block either.
3530 Output[BBNum].clear();
3531 AllTheVLocs[BBNum].clear();
3532 };
3533
3534 SmallPtrSet<const MachineBasicBlock *, 8> BlocksToExplore;
3535 SmallVector<std::pair<LexicalScope *, ssize_t>, 4> WorkStack;
3536 WorkStack.push_back(Elt: {LS.getCurrentFunctionScope(), 0});
3537 unsigned HighestDFSIn = 0;
3538
3539 // Proceed to explore in depth first order.
3540 while (!WorkStack.empty()) {
3541 auto &ScopePosition = WorkStack.back();
3542 LexicalScope *WS = ScopePosition.first;
3543 ssize_t ChildNum = ScopePosition.second++;
3544
3545 // We obesrve scopes with children twice here, once descending in, once
3546 // ascending out of the scope nest. Use HighestDFSIn as a ratchet to ensure
3547 // we don't process a scope twice. Additionally, ignore scopes that don't
3548 // have a DILocation -- by proxy, this means we never tracked any variable
3549 // assignments in that scope.
3550 auto DILocIt = ScopeToDILocation.find(Val: WS);
3551 if (HighestDFSIn <= WS->getDFSIn() && DILocIt != ScopeToDILocation.end()) {
3552 const DILocation *DILoc = DILocIt->second;
3553 auto &VarsWeCareAbout = ScopeToVars.find(Val: WS)->second;
3554 auto &BlocksInScope = ScopeToAssignBlocks.find(Val: WS)->second;
3555
3556 buildVLocValueMap(DILoc, VarsWeCareAbout, AssignBlocks&: BlocksInScope, Output, MOutLocs,
3557 MInLocs, AllTheVLocs);
3558 }
3559
3560 HighestDFSIn = std::max(a: HighestDFSIn, b: WS->getDFSIn());
3561
3562 // Descend into any scope nests.
3563 const SmallVectorImpl<LexicalScope *> &Children = WS->getChildren();
3564 if (ChildNum < (ssize_t)Children.size()) {
3565 // There are children to explore -- push onto stack and continue.
3566 auto &ChildScope = Children[ChildNum];
3567 WorkStack.push_back(Elt: std::make_pair(x: ChildScope, y: 0));
3568 } else {
3569 WorkStack.pop_back();
3570
3571 // We've explored a leaf, or have explored all the children of a scope.
3572 // Try to eject any blocks where this is the last scope it's relevant to.
3573 auto DILocationIt = ScopeToDILocation.find(Val: WS);
3574 if (DILocationIt == ScopeToDILocation.end())
3575 continue;
3576
3577 getBlocksForScope(DILoc: DILocationIt->second, BlocksToExplore,
3578 AssignBlocks: ScopeToAssignBlocks.find(Val: WS)->second);
3579 for (const auto *MBB : BlocksToExplore)
3580 if (WS->getDFSOut() == EjectionMap[MBB->getNumber()])
3581 EjectBlock(const_cast<MachineBasicBlock &>(*MBB));
3582
3583 BlocksToExplore.clear();
3584 }
3585 }
3586
3587 // Some artificial blocks may not have been ejected, meaning they're not
3588 // connected to an actual legitimate scope. This can technically happen
3589 // with things like the entry block. In theory, we shouldn't need to do
3590 // anything for such out-of-scope blocks, but for the sake of being similar
3591 // to VarLocBasedLDV, eject these too.
3592 for (auto *MBB : ArtificialBlocks)
3593 if (MInLocs.hasTableFor(MBB&: *MBB))
3594 EjectBlock(*MBB);
3595
3596 return emitTransfers(AllVarsNumbering);
3597}
3598
3599bool InstrRefBasedLDV::emitTransfers(
3600 DenseMap<DebugVariable, unsigned> &AllVarsNumbering) {
3601 // Go through all the transfers recorded in the TransferTracker -- this is
3602 // both the live-ins to a block, and any movements of values that happen
3603 // in the middle.
3604 for (const auto &P : TTracker->Transfers) {
3605 // We have to insert DBG_VALUEs in a consistent order, otherwise they
3606 // appear in DWARF in different orders. Use the order that they appear
3607 // when walking through each block / each instruction, stored in
3608 // AllVarsNumbering.
3609 SmallVector<std::pair<unsigned, MachineInstr *>> Insts;
3610 for (MachineInstr *MI : P.Insts) {
3611 DebugVariable Var(MI->getDebugVariable(), MI->getDebugExpression(),
3612 MI->getDebugLoc()->getInlinedAt());
3613 Insts.emplace_back(Args&: AllVarsNumbering.find(Val: Var)->second, Args&: MI);
3614 }
3615 llvm::sort(C&: Insts, Comp: llvm::less_first());
3616
3617 // Insert either before or after the designated point...
3618 if (P.MBB) {
3619 MachineBasicBlock &MBB = *P.MBB;
3620 for (const auto &Pair : Insts)
3621 MBB.insert(I: P.Pos, M: Pair.second);
3622 } else {
3623 // Terminators, like tail calls, can clobber things. Don't try and place
3624 // transfers after them.
3625 if (P.Pos->isTerminator())
3626 continue;
3627
3628 MachineBasicBlock &MBB = *P.Pos->getParent();
3629 for (const auto &Pair : Insts)
3630 MBB.insertAfterBundle(I: P.Pos, MI: Pair.second);
3631 }
3632 }
3633
3634 return TTracker->Transfers.size() != 0;
3635}
3636
3637/// Calculate the liveness information for the given machine function and
3638/// extend ranges across basic blocks.
3639bool InstrRefBasedLDV::ExtendRanges(MachineFunction &MF,
3640 MachineDominatorTree *DomTree,
3641 TargetPassConfig *TPC,
3642 unsigned InputBBLimit,
3643 unsigned InputDbgValLimit) {
3644 // No subprogram means this function contains no debuginfo.
3645 if (!MF.getFunction().getSubprogram())
3646 return false;
3647
3648 LLVM_DEBUG(dbgs() << "\nDebug Range Extension\n");
3649 this->TPC = TPC;
3650
3651 this->DomTree = DomTree;
3652 TRI = MF.getSubtarget().getRegisterInfo();
3653 MRI = &MF.getRegInfo();
3654 TII = MF.getSubtarget().getInstrInfo();
3655 TFI = MF.getSubtarget().getFrameLowering();
3656 TFI->getCalleeSaves(MF, SavedRegs&: CalleeSavedRegs);
3657 MFI = &MF.getFrameInfo();
3658 LS.initialize(MF);
3659
3660 const auto &STI = MF.getSubtarget();
3661 AdjustsStackInCalls = MFI->adjustsStack() &&
3662 STI.getFrameLowering()->stackProbeFunctionModifiesSP();
3663 if (AdjustsStackInCalls)
3664 StackProbeSymbolName = STI.getTargetLowering()->getStackProbeSymbolName(MF);
3665
3666 MTracker =
3667 new MLocTracker(MF, *TII, *TRI, *MF.getSubtarget().getTargetLowering());
3668 VTracker = nullptr;
3669 TTracker = nullptr;
3670
3671 SmallVector<MLocTransferMap, 32> MLocTransfer;
3672 SmallVector<VLocTracker, 8> vlocs;
3673 LiveInsT SavedLiveIns;
3674
3675 int MaxNumBlocks = -1;
3676 for (auto &MBB : MF)
3677 MaxNumBlocks = std::max(a: MBB.getNumber(), b: MaxNumBlocks);
3678 assert(MaxNumBlocks >= 0);
3679 ++MaxNumBlocks;
3680
3681 initialSetup(MF);
3682
3683 MLocTransfer.resize(N: MaxNumBlocks);
3684 vlocs.resize(N: MaxNumBlocks, NV: VLocTracker(OverlapFragments, EmptyExpr));
3685 SavedLiveIns.resize(N: MaxNumBlocks);
3686
3687 produceMLocTransferFunction(MF, MLocTransfer, MaxNumBlocks);
3688
3689 // Allocate and initialize two array-of-arrays for the live-in and live-out
3690 // machine values. The outer dimension is the block number; while the inner
3691 // dimension is a LocIdx from MLocTracker.
3692 unsigned NumLocs = MTracker->getNumLocs();
3693 FuncValueTable MOutLocs(MaxNumBlocks, NumLocs);
3694 FuncValueTable MInLocs(MaxNumBlocks, NumLocs);
3695
3696 // Solve the machine value dataflow problem using the MLocTransfer function,
3697 // storing the computed live-ins / live-outs into the array-of-arrays. We use
3698 // both live-ins and live-outs for decision making in the variable value
3699 // dataflow problem.
3700 buildMLocValueMap(MF, MInLocs, MOutLocs, MLocTransfer);
3701
3702 // Patch up debug phi numbers, turning unknown block-live-in values into
3703 // either live-through machine values, or PHIs.
3704 for (auto &DBG_PHI : DebugPHINumToValue) {
3705 // Identify unresolved block-live-ins.
3706 if (!DBG_PHI.ValueRead)
3707 continue;
3708
3709 ValueIDNum &Num = *DBG_PHI.ValueRead;
3710 if (!Num.isPHI())
3711 continue;
3712
3713 unsigned BlockNo = Num.getBlock();
3714 LocIdx LocNo = Num.getLoc();
3715 ValueIDNum ResolvedValue = MInLocs[BlockNo][LocNo.asU64()];
3716 // If there is no resolved value for this live-in then it is not directly
3717 // reachable from the entry block -- model it as a PHI on entry to this
3718 // block, which means we leave the ValueIDNum unchanged.
3719 if (ResolvedValue != ValueIDNum::EmptyValue)
3720 Num = ResolvedValue;
3721 }
3722 // Later, we'll be looking up ranges of instruction numbers.
3723 llvm::sort(C&: DebugPHINumToValue);
3724
3725 // Walk back through each block / instruction, collecting DBG_VALUE
3726 // instructions and recording what machine value their operands refer to.
3727 for (auto &OrderPair : OrderToBB) {
3728 MachineBasicBlock &MBB = *OrderPair.second;
3729 CurBB = MBB.getNumber();
3730 VTracker = &vlocs[CurBB];
3731 VTracker->MBB = &MBB;
3732 MTracker->loadFromArray(Locs&: MInLocs[MBB], NewCurBB: CurBB);
3733 CurInst = 1;
3734 for (auto &MI : MBB) {
3735 process(MI, MLiveOuts: &MOutLocs, MLiveIns: &MInLocs);
3736 ++CurInst;
3737 }
3738 MTracker->reset();
3739 }
3740
3741 // Number all variables in the order that they appear, to be used as a stable
3742 // insertion order later.
3743 DenseMap<DebugVariable, unsigned> AllVarsNumbering;
3744
3745 // Map from one LexicalScope to all the variables in that scope.
3746 ScopeToVarsT ScopeToVars;
3747
3748 // Map from One lexical scope to all blocks where assignments happen for
3749 // that scope.
3750 ScopeToAssignBlocksT ScopeToAssignBlocks;
3751
3752 // Store map of DILocations that describes scopes.
3753 ScopeToDILocT ScopeToDILocation;
3754
3755 // To mirror old LiveDebugValues, enumerate variables in RPOT order. Otherwise
3756 // the order is unimportant, it just has to be stable.
3757 unsigned VarAssignCount = 0;
3758 for (unsigned int I = 0; I < OrderToBB.size(); ++I) {
3759 auto *MBB = OrderToBB[I];
3760 auto *VTracker = &vlocs[MBB->getNumber()];
3761 // Collect each variable with a DBG_VALUE in this block.
3762 for (auto &idx : VTracker->Vars) {
3763 const auto &Var = idx.first;
3764 const DILocation *ScopeLoc = VTracker->Scopes[Var];
3765 assert(ScopeLoc != nullptr);
3766 auto *Scope = LS.findLexicalScope(DL: ScopeLoc);
3767
3768 // No insts in scope -> shouldn't have been recorded.
3769 assert(Scope != nullptr);
3770
3771 AllVarsNumbering.insert(KV: std::make_pair(x: Var, y: AllVarsNumbering.size()));
3772 ScopeToVars[Scope].insert(V: Var);
3773 ScopeToAssignBlocks[Scope].insert(Ptr: VTracker->MBB);
3774 ScopeToDILocation[Scope] = ScopeLoc;
3775 ++VarAssignCount;
3776 }
3777 }
3778
3779 bool Changed = false;
3780
3781 // If we have an extremely large number of variable assignments and blocks,
3782 // bail out at this point. We've burnt some time doing analysis already,
3783 // however we should cut our losses.
3784 if ((unsigned)MaxNumBlocks > InputBBLimit &&
3785 VarAssignCount > InputDbgValLimit) {
3786 LLVM_DEBUG(dbgs() << "Disabling InstrRefBasedLDV: " << MF.getName()
3787 << " has " << MaxNumBlocks << " basic blocks and "
3788 << VarAssignCount
3789 << " variable assignments, exceeding limits.\n");
3790 } else {
3791 // Optionally, solve the variable value problem and emit to blocks by using
3792 // a lexical-scope-depth search. It should be functionally identical to
3793 // the "else" block of this condition.
3794 Changed = depthFirstVLocAndEmit(
3795 MaxNumBlocks, ScopeToDILocation, ScopeToVars, ScopeToAssignBlocks,
3796 Output&: SavedLiveIns, MOutLocs, MInLocs, AllTheVLocs&: vlocs, MF, AllVarsNumbering, TPC: *TPC);
3797 }
3798
3799 delete MTracker;
3800 delete TTracker;
3801 MTracker = nullptr;
3802 VTracker = nullptr;
3803 TTracker = nullptr;
3804
3805 ArtificialBlocks.clear();
3806 OrderToBB.clear();
3807 BBToOrder.clear();
3808 BBNumToRPO.clear();
3809 DebugInstrNumToInstr.clear();
3810 DebugPHINumToValue.clear();
3811 OverlapFragments.clear();
3812 SeenFragments.clear();
3813 SeenDbgPHIs.clear();
3814 DbgOpStore.clear();
3815
3816 return Changed;
3817}
3818
3819LDVImpl *llvm::makeInstrRefBasedLiveDebugValues() {
3820 return new InstrRefBasedLDV();
3821}
3822
3823namespace {
3824class LDVSSABlock;
3825class LDVSSAUpdater;
3826
3827// Pick a type to identify incoming block values as we construct SSA. We
3828// can't use anything more robust than an integer unfortunately, as SSAUpdater
3829// expects to zero-initialize the type.
3830typedef uint64_t BlockValueNum;
3831
3832/// Represents an SSA PHI node for the SSA updater class. Contains the block
3833/// this PHI is in, the value number it would have, and the expected incoming
3834/// values from parent blocks.
3835class LDVSSAPhi {
3836public:
3837 SmallVector<std::pair<LDVSSABlock *, BlockValueNum>, 4> IncomingValues;
3838 LDVSSABlock *ParentBlock;
3839 BlockValueNum PHIValNum;
3840 LDVSSAPhi(BlockValueNum PHIValNum, LDVSSABlock *ParentBlock)
3841 : ParentBlock(ParentBlock), PHIValNum(PHIValNum) {}
3842
3843 LDVSSABlock *getParent() { return ParentBlock; }
3844};
3845
3846/// Thin wrapper around a block predecessor iterator. Only difference from a
3847/// normal block iterator is that it dereferences to an LDVSSABlock.
3848class LDVSSABlockIterator {
3849public:
3850 MachineBasicBlock::pred_iterator PredIt;
3851 LDVSSAUpdater &Updater;
3852
3853 LDVSSABlockIterator(MachineBasicBlock::pred_iterator PredIt,
3854 LDVSSAUpdater &Updater)
3855 : PredIt(PredIt), Updater(Updater) {}
3856
3857 bool operator!=(const LDVSSABlockIterator &OtherIt) const {
3858 return OtherIt.PredIt != PredIt;
3859 }
3860
3861 LDVSSABlockIterator &operator++() {
3862 ++PredIt;
3863 return *this;
3864 }
3865
3866 LDVSSABlock *operator*();
3867};
3868
3869/// Thin wrapper around a block for SSA Updater interface. Necessary because
3870/// we need to track the PHI value(s) that we may have observed as necessary
3871/// in this block.
3872class LDVSSABlock {
3873public:
3874 MachineBasicBlock &BB;
3875 LDVSSAUpdater &Updater;
3876 using PHIListT = SmallVector<LDVSSAPhi, 1>;
3877 /// List of PHIs in this block. There should only ever be one.
3878 PHIListT PHIList;
3879
3880 LDVSSABlock(MachineBasicBlock &BB, LDVSSAUpdater &Updater)
3881 : BB(BB), Updater(Updater) {}
3882
3883 LDVSSABlockIterator succ_begin() {
3884 return LDVSSABlockIterator(BB.succ_begin(), Updater);
3885 }
3886
3887 LDVSSABlockIterator succ_end() {
3888 return LDVSSABlockIterator(BB.succ_end(), Updater);
3889 }
3890
3891 /// SSAUpdater has requested a PHI: create that within this block record.
3892 LDVSSAPhi *newPHI(BlockValueNum Value) {
3893 PHIList.emplace_back(Args&: Value, Args: this);
3894 return &PHIList.back();
3895 }
3896
3897 /// SSAUpdater wishes to know what PHIs already exist in this block.
3898 PHIListT &phis() { return PHIList; }
3899};
3900
3901/// Utility class for the SSAUpdater interface: tracks blocks, PHIs and values
3902/// while SSAUpdater is exploring the CFG. It's passed as a handle / baton to
3903// SSAUpdaterTraits<LDVSSAUpdater>.
3904class LDVSSAUpdater {
3905public:
3906 /// Map of value numbers to PHI records.
3907 DenseMap<BlockValueNum, LDVSSAPhi *> PHIs;
3908 /// Map of which blocks generate Undef values -- blocks that are not
3909 /// dominated by any Def.
3910 DenseMap<MachineBasicBlock *, BlockValueNum> UndefMap;
3911 /// Map of machine blocks to our own records of them.
3912 DenseMap<MachineBasicBlock *, LDVSSABlock *> BlockMap;
3913 /// Machine location where any PHI must occur.
3914 LocIdx Loc;
3915 /// Table of live-in machine value numbers for blocks / locations.
3916 const FuncValueTable &MLiveIns;
3917
3918 LDVSSAUpdater(LocIdx L, const FuncValueTable &MLiveIns)
3919 : Loc(L), MLiveIns(MLiveIns) {}
3920
3921 void reset() {
3922 for (auto &Block : BlockMap)
3923 delete Block.second;
3924
3925 PHIs.clear();
3926 UndefMap.clear();
3927 BlockMap.clear();
3928 }
3929
3930 ~LDVSSAUpdater() { reset(); }
3931
3932 /// For a given MBB, create a wrapper block for it. Stores it in the
3933 /// LDVSSAUpdater block map.
3934 LDVSSABlock *getSSALDVBlock(MachineBasicBlock *BB) {
3935 auto it = BlockMap.find(Val: BB);
3936 if (it == BlockMap.end()) {
3937 BlockMap[BB] = new LDVSSABlock(*BB, *this);
3938 it = BlockMap.find(Val: BB);
3939 }
3940 return it->second;
3941 }
3942
3943 /// Find the live-in value number for the given block. Looks up the value at
3944 /// the PHI location on entry.
3945 BlockValueNum getValue(LDVSSABlock *LDVBB) {
3946 return MLiveIns[LDVBB->BB][Loc.asU64()].asU64();
3947 }
3948};
3949
3950LDVSSABlock *LDVSSABlockIterator::operator*() {
3951 return Updater.getSSALDVBlock(BB: *PredIt);
3952}
3953
3954#ifndef NDEBUG
3955
3956raw_ostream &operator<<(raw_ostream &out, const LDVSSAPhi &PHI) {
3957 out << "SSALDVPHI " << PHI.PHIValNum;
3958 return out;
3959}
3960
3961#endif
3962
3963} // namespace
3964
3965namespace llvm {
3966
3967/// Template specialization to give SSAUpdater access to CFG and value
3968/// information. SSAUpdater calls methods in these traits, passing in the
3969/// LDVSSAUpdater object, to learn about blocks and the values they define.
3970/// It also provides methods to create PHI nodes and track them.
3971template <> class SSAUpdaterTraits<LDVSSAUpdater> {
3972public:
3973 using BlkT = LDVSSABlock;
3974 using ValT = BlockValueNum;
3975 using PhiT = LDVSSAPhi;
3976 using BlkSucc_iterator = LDVSSABlockIterator;
3977
3978 // Methods to access block successors -- dereferencing to our wrapper class.
3979 static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return BB->succ_begin(); }
3980 static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return BB->succ_end(); }
3981
3982 /// Iterator for PHI operands.
3983 class PHI_iterator {
3984 private:
3985 LDVSSAPhi *PHI;
3986 unsigned Idx;
3987
3988 public:
3989 explicit PHI_iterator(LDVSSAPhi *P) // begin iterator
3990 : PHI(P), Idx(0) {}
3991 PHI_iterator(LDVSSAPhi *P, bool) // end iterator
3992 : PHI(P), Idx(PHI->IncomingValues.size()) {}
3993
3994 PHI_iterator &operator++() {
3995 Idx++;
3996 return *this;
3997 }
3998 bool operator==(const PHI_iterator &X) const { return Idx == X.Idx; }
3999 bool operator!=(const PHI_iterator &X) const { return !operator==(X); }
4000
4001 BlockValueNum getIncomingValue() { return PHI->IncomingValues[Idx].second; }
4002
4003 LDVSSABlock *getIncomingBlock() { return PHI->IncomingValues[Idx].first; }
4004 };
4005
4006 static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); }
4007
4008 static inline PHI_iterator PHI_end(PhiT *PHI) {
4009 return PHI_iterator(PHI, true);
4010 }
4011
4012 /// FindPredecessorBlocks - Put the predecessors of BB into the Preds
4013 /// vector.
4014 static void FindPredecessorBlocks(LDVSSABlock *BB,
4015 SmallVectorImpl<LDVSSABlock *> *Preds) {
4016 for (MachineBasicBlock *Pred : BB->BB.predecessors())
4017 Preds->push_back(Elt: BB->Updater.getSSALDVBlock(BB: Pred));
4018 }
4019
4020 /// GetUndefVal - Normally creates an IMPLICIT_DEF instruction with a new
4021 /// register. For LiveDebugValues, represents a block identified as not having
4022 /// any DBG_PHI predecessors.
4023 static BlockValueNum GetUndefVal(LDVSSABlock *BB, LDVSSAUpdater *Updater) {
4024 // Create a value number for this block -- it needs to be unique and in the
4025 // "undef" collection, so that we know it's not real. Use a number
4026 // representing a PHI into this block.
4027 BlockValueNum Num = ValueIDNum(BB->BB.getNumber(), 0, Updater->Loc).asU64();
4028 Updater->UndefMap[&BB->BB] = Num;
4029 return Num;
4030 }
4031
4032 /// CreateEmptyPHI - Create a (representation of a) PHI in the given block.
4033 /// SSAUpdater will populate it with information about incoming values. The
4034 /// value number of this PHI is whatever the machine value number problem
4035 /// solution determined it to be. This includes non-phi values if SSAUpdater
4036 /// tries to create a PHI where the incoming values are identical.
4037 static BlockValueNum CreateEmptyPHI(LDVSSABlock *BB, unsigned NumPreds,
4038 LDVSSAUpdater *Updater) {
4039 BlockValueNum PHIValNum = Updater->getValue(LDVBB: BB);
4040 LDVSSAPhi *PHI = BB->newPHI(Value: PHIValNum);
4041 Updater->PHIs[PHIValNum] = PHI;
4042 return PHIValNum;
4043 }
4044
4045 /// AddPHIOperand - Add the specified value as an operand of the PHI for
4046 /// the specified predecessor block.
4047 static void AddPHIOperand(LDVSSAPhi *PHI, BlockValueNum Val, LDVSSABlock *Pred) {
4048 PHI->IncomingValues.push_back(Elt: std::make_pair(x&: Pred, y&: Val));
4049 }
4050
4051 /// ValueIsPHI - Check if the instruction that defines the specified value
4052 /// is a PHI instruction.
4053 static LDVSSAPhi *ValueIsPHI(BlockValueNum Val, LDVSSAUpdater *Updater) {
4054 return Updater->PHIs.lookup(Val);
4055 }
4056
4057 /// ValueIsNewPHI - Like ValueIsPHI but also check if the PHI has no source
4058 /// operands, i.e., it was just added.
4059 static LDVSSAPhi *ValueIsNewPHI(BlockValueNum Val, LDVSSAUpdater *Updater) {
4060 LDVSSAPhi *PHI = ValueIsPHI(Val, Updater);
4061 if (PHI && PHI->IncomingValues.size() == 0)
4062 return PHI;
4063 return nullptr;
4064 }
4065
4066 /// GetPHIValue - For the specified PHI instruction, return the value
4067 /// that it defines.
4068 static BlockValueNum GetPHIValue(LDVSSAPhi *PHI) { return PHI->PHIValNum; }
4069};
4070
4071} // end namespace llvm
4072
4073std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIs(
4074 MachineFunction &MF, const FuncValueTable &MLiveOuts,
4075 const FuncValueTable &MLiveIns, MachineInstr &Here, uint64_t InstrNum) {
4076 // This function will be called twice per DBG_INSTR_REF, and might end up
4077 // computing lots of SSA information: memoize it.
4078 auto SeenDbgPHIIt = SeenDbgPHIs.find(Val: std::make_pair(x: &Here, y&: InstrNum));
4079 if (SeenDbgPHIIt != SeenDbgPHIs.end())
4080 return SeenDbgPHIIt->second;
4081
4082 std::optional<ValueIDNum> Result =
4083 resolveDbgPHIsImpl(MF, MLiveOuts, MLiveIns, Here, InstrNum);
4084 SeenDbgPHIs.insert(KV: {std::make_pair(x: &Here, y&: InstrNum), Result});
4085 return Result;
4086}
4087
4088std::optional<ValueIDNum> InstrRefBasedLDV::resolveDbgPHIsImpl(
4089 MachineFunction &MF, const FuncValueTable &MLiveOuts,
4090 const FuncValueTable &MLiveIns, MachineInstr &Here, uint64_t InstrNum) {
4091 // Pick out records of DBG_PHI instructions that have been observed. If there
4092 // are none, then we cannot compute a value number.
4093 auto RangePair = std::equal_range(first: DebugPHINumToValue.begin(),
4094 last: DebugPHINumToValue.end(), val: InstrNum);
4095 auto LowerIt = RangePair.first;
4096 auto UpperIt = RangePair.second;
4097
4098 // No DBG_PHI means there can be no location.
4099 if (LowerIt == UpperIt)
4100 return std::nullopt;
4101
4102 // If any DBG_PHIs referred to a location we didn't understand, don't try to
4103 // compute a value. There might be scenarios where we could recover a value
4104 // for some range of DBG_INSTR_REFs, but at this point we can have high
4105 // confidence that we've seen a bug.
4106 auto DBGPHIRange = make_range(x: LowerIt, y: UpperIt);
4107 for (const DebugPHIRecord &DBG_PHI : DBGPHIRange)
4108 if (!DBG_PHI.ValueRead)
4109 return std::nullopt;
4110
4111 // If there's only one DBG_PHI, then that is our value number.
4112 if (std::distance(first: LowerIt, last: UpperIt) == 1)
4113 return *LowerIt->ValueRead;
4114
4115 // Pick out the location (physreg, slot) where any PHIs must occur. It's
4116 // technically possible for us to merge values in different registers in each
4117 // block, but highly unlikely that LLVM will generate such code after register
4118 // allocation.
4119 LocIdx Loc = *LowerIt->ReadLoc;
4120
4121 // We have several DBG_PHIs, and a use position (the Here inst). All each
4122 // DBG_PHI does is identify a value at a program position. We can treat each
4123 // DBG_PHI like it's a Def of a value, and the use position is a Use of a
4124 // value, just like SSA. We use the bulk-standard LLVM SSA updater class to
4125 // determine which Def is used at the Use, and any PHIs that happen along
4126 // the way.
4127 // Adapted LLVM SSA Updater:
4128 LDVSSAUpdater Updater(Loc, MLiveIns);
4129 // Map of which Def or PHI is the current value in each block.
4130 DenseMap<LDVSSABlock *, BlockValueNum> AvailableValues;
4131 // Set of PHIs that we have created along the way.
4132 SmallVector<LDVSSAPhi *, 8> CreatedPHIs;
4133
4134 // Each existing DBG_PHI is a Def'd value under this model. Record these Defs
4135 // for the SSAUpdater.
4136 for (const auto &DBG_PHI : DBGPHIRange) {
4137 LDVSSABlock *Block = Updater.getSSALDVBlock(BB: DBG_PHI.MBB);
4138 const ValueIDNum &Num = *DBG_PHI.ValueRead;
4139 AvailableValues.insert(KV: std::make_pair(x&: Block, y: Num.asU64()));
4140 }
4141
4142 LDVSSABlock *HereBlock = Updater.getSSALDVBlock(BB: Here.getParent());
4143 const auto &AvailIt = AvailableValues.find(Val: HereBlock);
4144 if (AvailIt != AvailableValues.end()) {
4145 // Actually, we already know what the value is -- the Use is in the same
4146 // block as the Def.
4147 return ValueIDNum::fromU64(v: AvailIt->second);
4148 }
4149
4150 // Otherwise, we must use the SSA Updater. It will identify the value number
4151 // that we are to use, and the PHIs that must happen along the way.
4152 SSAUpdaterImpl<LDVSSAUpdater> Impl(&Updater, &AvailableValues, &CreatedPHIs);
4153 BlockValueNum ResultInt = Impl.GetValue(BB: Updater.getSSALDVBlock(BB: Here.getParent()));
4154 ValueIDNum Result = ValueIDNum::fromU64(v: ResultInt);
4155
4156 // We have the number for a PHI, or possibly live-through value, to be used
4157 // at this Use. There are a number of things we have to check about it though:
4158 // * Does any PHI use an 'Undef' (like an IMPLICIT_DEF) value? If so, this
4159 // Use was not completely dominated by DBG_PHIs and we should abort.
4160 // * Are the Defs or PHIs clobbered in a block? SSAUpdater isn't aware that
4161 // we've left SSA form. Validate that the inputs to each PHI are the
4162 // expected values.
4163 // * Is a PHI we've created actually a merging of values, or are all the
4164 // predecessor values the same, leading to a non-PHI machine value number?
4165 // (SSAUpdater doesn't know that either). Remap validated PHIs into the
4166 // the ValidatedValues collection below to sort this out.
4167 DenseMap<LDVSSABlock *, ValueIDNum> ValidatedValues;
4168
4169 // Define all the input DBG_PHI values in ValidatedValues.
4170 for (const auto &DBG_PHI : DBGPHIRange) {
4171 LDVSSABlock *Block = Updater.getSSALDVBlock(BB: DBG_PHI.MBB);
4172 const ValueIDNum &Num = *DBG_PHI.ValueRead;
4173 ValidatedValues.insert(KV: std::make_pair(x&: Block, y: Num));
4174 }
4175
4176 // Sort PHIs to validate into RPO-order.
4177 SmallVector<LDVSSAPhi *, 8> SortedPHIs;
4178 for (auto &PHI : CreatedPHIs)
4179 SortedPHIs.push_back(Elt: PHI);
4180
4181 llvm::sort(C&: SortedPHIs, Comp: [&](LDVSSAPhi *A, LDVSSAPhi *B) {
4182 return BBToOrder[&A->getParent()->BB] < BBToOrder[&B->getParent()->BB];
4183 });
4184
4185 for (auto &PHI : SortedPHIs) {
4186 ValueIDNum ThisBlockValueNum = MLiveIns[PHI->ParentBlock->BB][Loc.asU64()];
4187
4188 // Are all these things actually defined?
4189 for (auto &PHIIt : PHI->IncomingValues) {
4190 // Any undef input means DBG_PHIs didn't dominate the use point.
4191 if (Updater.UndefMap.contains(Val: &PHIIt.first->BB))
4192 return std::nullopt;
4193
4194 ValueIDNum ValueToCheck;
4195 const ValueTable &BlockLiveOuts = MLiveOuts[PHIIt.first->BB];
4196
4197 auto VVal = ValidatedValues.find(Val: PHIIt.first);
4198 if (VVal == ValidatedValues.end()) {
4199 // We cross a loop, and this is a backedge. LLVMs tail duplication
4200 // happens so late that DBG_PHI instructions should not be able to
4201 // migrate into loops -- meaning we can only be live-through this
4202 // loop.
4203 ValueToCheck = ThisBlockValueNum;
4204 } else {
4205 // Does the block have as a live-out, in the location we're examining,
4206 // the value that we expect? If not, it's been moved or clobbered.
4207 ValueToCheck = VVal->second;
4208 }
4209
4210 if (BlockLiveOuts[Loc.asU64()] != ValueToCheck)
4211 return std::nullopt;
4212 }
4213
4214 // Record this value as validated.
4215 ValidatedValues.insert(KV: {PHI->ParentBlock, ThisBlockValueNum});
4216 }
4217
4218 // All the PHIs are valid: we can return what the SSAUpdater said our value
4219 // number was.
4220 return Result;
4221}
4222

source code of llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp