1//===- llvm/CodeGen/DwarfExpression.cpp - Dwarf Debug Framework -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains support for writing dwarf debug info into asm files.
10//
11//===----------------------------------------------------------------------===//
12
13#include "DwarfExpression.h"
14#include "DwarfCompileUnit.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/SmallBitVector.h"
17#include "llvm/BinaryFormat/Dwarf.h"
18#include "llvm/CodeGen/Register.h"
19#include "llvm/CodeGen/TargetRegisterInfo.h"
20#include "llvm/IR/DataLayout.h"
21#include "llvm/Support/ErrorHandling.h"
22#include <algorithm>
23
24using namespace llvm;
25
26#define DEBUG_TYPE "dwarfdebug"
27
28void DwarfExpression::emitConstu(uint64_t Value) {
29 if (Value < 32)
30 emitOp(Op: dwarf::DW_OP_lit0 + Value);
31 else if (Value == std::numeric_limits<uint64_t>::max()) {
32 // Only do this for 64-bit values as the DWARF expression stack uses
33 // target-address-size values.
34 emitOp(Op: dwarf::DW_OP_lit0);
35 emitOp(Op: dwarf::DW_OP_not);
36 } else {
37 emitOp(Op: dwarf::DW_OP_constu);
38 emitUnsigned(Value);
39 }
40}
41
42void DwarfExpression::addReg(int DwarfReg, const char *Comment) {
43 assert(DwarfReg >= 0 && "invalid negative dwarf register number");
44 assert((isUnknownLocation() || isRegisterLocation()) &&
45 "location description already locked down");
46 LocationKind = Register;
47 if (DwarfReg < 32) {
48 emitOp(Op: dwarf::DW_OP_reg0 + DwarfReg, Comment);
49 } else {
50 emitOp(Op: dwarf::DW_OP_regx, Comment);
51 emitUnsigned(Value: DwarfReg);
52 }
53}
54
55void DwarfExpression::addBReg(int DwarfReg, int Offset) {
56 assert(DwarfReg >= 0 && "invalid negative dwarf register number");
57 assert(!isRegisterLocation() && "location description already locked down");
58 if (DwarfReg < 32) {
59 emitOp(Op: dwarf::DW_OP_breg0 + DwarfReg);
60 } else {
61 emitOp(Op: dwarf::DW_OP_bregx);
62 emitUnsigned(Value: DwarfReg);
63 }
64 emitSigned(Value: Offset);
65}
66
67void DwarfExpression::addFBReg(int Offset) {
68 emitOp(Op: dwarf::DW_OP_fbreg);
69 emitSigned(Value: Offset);
70}
71
72void DwarfExpression::addOpPiece(unsigned SizeInBits, unsigned OffsetInBits) {
73 if (!SizeInBits)
74 return;
75
76 const unsigned SizeOfByte = 8;
77 if (OffsetInBits > 0 || SizeInBits % SizeOfByte) {
78 emitOp(Op: dwarf::DW_OP_bit_piece);
79 emitUnsigned(Value: SizeInBits);
80 emitUnsigned(Value: OffsetInBits);
81 } else {
82 emitOp(Op: dwarf::DW_OP_piece);
83 unsigned ByteSize = SizeInBits / SizeOfByte;
84 emitUnsigned(Value: ByteSize);
85 }
86 this->OffsetInBits += SizeInBits;
87}
88
89void DwarfExpression::addShr(unsigned ShiftBy) {
90 emitConstu(Value: ShiftBy);
91 emitOp(Op: dwarf::DW_OP_shr);
92}
93
94void DwarfExpression::addAnd(unsigned Mask) {
95 emitConstu(Value: Mask);
96 emitOp(Op: dwarf::DW_OP_and);
97}
98
99bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI,
100 llvm::Register MachineReg,
101 unsigned MaxSize) {
102 if (!MachineReg.isPhysical()) {
103 if (isFrameRegister(TRI, MachineReg)) {
104 DwarfRegs.push_back(Elt: Register::createRegister(RegNo: -1, Comment: nullptr));
105 return true;
106 }
107 return false;
108 }
109
110 int Reg = TRI.getDwarfRegNum(RegNum: MachineReg, isEH: false);
111
112 // If this is a valid register number, emit it.
113 if (Reg >= 0) {
114 DwarfRegs.push_back(Elt: Register::createRegister(RegNo: Reg, Comment: nullptr));
115 return true;
116 }
117
118 // Walk up the super-register chain until we find a valid number.
119 // For example, EAX on x86_64 is a 32-bit fragment of RAX with offset 0.
120 for (MCPhysReg SR : TRI.superregs(Reg: MachineReg)) {
121 Reg = TRI.getDwarfRegNum(RegNum: SR, isEH: false);
122 if (Reg >= 0) {
123 unsigned Idx = TRI.getSubRegIndex(RegNo: SR, SubRegNo: MachineReg);
124 unsigned Size = TRI.getSubRegIdxSize(Idx);
125 unsigned RegOffset = TRI.getSubRegIdxOffset(Idx);
126 DwarfRegs.push_back(Elt: Register::createRegister(RegNo: Reg, Comment: "super-register"));
127 // Use a DW_OP_bit_piece to describe the sub-register.
128 setSubRegisterPiece(SizeInBits: Size, OffsetInBits: RegOffset);
129 return true;
130 }
131 }
132
133 // Otherwise, attempt to find a covering set of sub-register numbers.
134 // For example, Q0 on ARM is a composition of D0+D1.
135 unsigned CurPos = 0;
136 // The size of the register in bits.
137 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg: MachineReg);
138 unsigned RegSize = TRI.getRegSizeInBits(RC: *RC);
139 // Keep track of the bits in the register we already emitted, so we
140 // can avoid emitting redundant aliasing subregs. Because this is
141 // just doing a greedy scan of all subregisters, it is possible that
142 // this doesn't find a combination of subregisters that fully cover
143 // the register (even though one may exist).
144 SmallBitVector Coverage(RegSize, false);
145 for (MCPhysReg SR : TRI.subregs(Reg: MachineReg)) {
146 unsigned Idx = TRI.getSubRegIndex(RegNo: MachineReg, SubRegNo: SR);
147 unsigned Size = TRI.getSubRegIdxSize(Idx);
148 unsigned Offset = TRI.getSubRegIdxOffset(Idx);
149 Reg = TRI.getDwarfRegNum(RegNum: SR, isEH: false);
150 if (Reg < 0)
151 continue;
152
153 // Used to build the intersection between the bits we already
154 // emitted and the bits covered by this subregister.
155 SmallBitVector CurSubReg(RegSize, false);
156 CurSubReg.set(I: Offset, E: Offset + Size);
157
158 // If this sub-register has a DWARF number and we haven't covered
159 // its range, and its range covers the value, emit a DWARF piece for it.
160 if (Offset < MaxSize && CurSubReg.test(RHS: Coverage)) {
161 // Emit a piece for any gap in the coverage.
162 if (Offset > CurPos)
163 DwarfRegs.push_back(Elt: Register::createSubRegister(
164 RegNo: -1, SizeInBits: Offset - CurPos, Comment: "no DWARF register encoding"));
165 if (Offset == 0 && Size >= MaxSize)
166 DwarfRegs.push_back(Elt: Register::createRegister(RegNo: Reg, Comment: "sub-register"));
167 else
168 DwarfRegs.push_back(Elt: Register::createSubRegister(
169 RegNo: Reg, SizeInBits: std::min<unsigned>(a: Size, b: MaxSize - Offset), Comment: "sub-register"));
170 }
171 // Mark it as emitted.
172 Coverage.set(I: Offset, E: Offset + Size);
173 CurPos = Offset + Size;
174 }
175 // Failed to find any DWARF encoding.
176 if (CurPos == 0)
177 return false;
178 // Found a partial or complete DWARF encoding.
179 if (CurPos < RegSize)
180 DwarfRegs.push_back(Elt: Register::createSubRegister(
181 RegNo: -1, SizeInBits: RegSize - CurPos, Comment: "no DWARF register encoding"));
182 return true;
183}
184
185void DwarfExpression::addStackValue() {
186 if (DwarfVersion >= 4)
187 emitOp(Op: dwarf::DW_OP_stack_value);
188}
189
190void DwarfExpression::addSignedConstant(int64_t Value) {
191 assert(isImplicitLocation() || isUnknownLocation());
192 LocationKind = Implicit;
193 emitOp(Op: dwarf::DW_OP_consts);
194 emitSigned(Value);
195}
196
197void DwarfExpression::addUnsignedConstant(uint64_t Value) {
198 assert(isImplicitLocation() || isUnknownLocation());
199 LocationKind = Implicit;
200 emitConstu(Value);
201}
202
203void DwarfExpression::addUnsignedConstant(const APInt &Value) {
204 assert(isImplicitLocation() || isUnknownLocation());
205 LocationKind = Implicit;
206
207 unsigned Size = Value.getBitWidth();
208 const uint64_t *Data = Value.getRawData();
209
210 // Chop it up into 64-bit pieces, because that's the maximum that
211 // addUnsignedConstant takes.
212 unsigned Offset = 0;
213 while (Offset < Size) {
214 addUnsignedConstant(Value: *Data++);
215 if (Offset == 0 && Size <= 64)
216 break;
217 addStackValue();
218 addOpPiece(SizeInBits: std::min(a: Size - Offset, b: 64u), OffsetInBits: Offset);
219 Offset += 64;
220 }
221}
222
223void DwarfExpression::addConstantFP(const APFloat &APF, const AsmPrinter &AP) {
224 assert(isImplicitLocation() || isUnknownLocation());
225 APInt API = APF.bitcastToAPInt();
226 int NumBytes = API.getBitWidth() / 8;
227 if (NumBytes == 4 /*float*/ || NumBytes == 8 /*double*/) {
228 // FIXME: Add support for `long double`.
229 emitOp(Op: dwarf::DW_OP_implicit_value);
230 emitUnsigned(Value: NumBytes /*Size of the block in bytes*/);
231
232 // The loop below is emitting the value starting at least significant byte,
233 // so we need to perform a byte-swap to get the byte order correct in case
234 // of a big-endian target.
235 if (AP.getDataLayout().isBigEndian())
236 API = API.byteSwap();
237
238 for (int i = 0; i < NumBytes; ++i) {
239 emitData1(Value: API.getZExtValue() & 0xFF);
240 API = API.lshr(shiftAmt: 8);
241 }
242
243 return;
244 }
245 LLVM_DEBUG(
246 dbgs() << "Skipped DW_OP_implicit_value creation for ConstantFP of size: "
247 << API.getBitWidth() << " bits\n");
248}
249
250bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI,
251 DIExpressionCursor &ExprCursor,
252 llvm::Register MachineReg,
253 unsigned FragmentOffsetInBits) {
254 auto Fragment = ExprCursor.getFragmentInfo();
255 if (!addMachineReg(TRI, MachineReg, MaxSize: Fragment ? Fragment->SizeInBits : ~1U)) {
256 LocationKind = Unknown;
257 return false;
258 }
259
260 bool HasComplexExpression = false;
261 auto Op = ExprCursor.peek();
262 if (Op && Op->getOp() != dwarf::DW_OP_LLVM_fragment)
263 HasComplexExpression = true;
264
265 // If the register can only be described by a complex expression (i.e.,
266 // multiple subregisters) it doesn't safely compose with another complex
267 // expression. For example, it is not possible to apply a DW_OP_deref
268 // operation to multiple DW_OP_pieces, since composite location descriptions
269 // do not push anything on the DWARF stack.
270 //
271 // DW_OP_entry_value operations can only hold a DWARF expression or a
272 // register location description, so we can't emit a single entry value
273 // covering a composite location description. In the future we may want to
274 // emit entry value operations for each register location in the composite
275 // location, but until that is supported do not emit anything.
276 if ((HasComplexExpression || IsEmittingEntryValue) && DwarfRegs.size() > 1) {
277 if (IsEmittingEntryValue)
278 cancelEntryValue();
279 DwarfRegs.clear();
280 LocationKind = Unknown;
281 return false;
282 }
283
284 // Handle simple register locations. If we are supposed to emit
285 // a call site parameter expression and if that expression is just a register
286 // location, emit it with addBReg and offset 0, because we should emit a DWARF
287 // expression representing a value, rather than a location.
288 if ((!isParameterValue() && !isMemoryLocation() && !HasComplexExpression) ||
289 isEntryValue()) {
290 auto FragmentInfo = ExprCursor.getFragmentInfo();
291 unsigned RegSize = 0;
292 for (auto &Reg : DwarfRegs) {
293 RegSize += Reg.SubRegSize;
294 if (Reg.DwarfRegNo >= 0)
295 addReg(DwarfReg: Reg.DwarfRegNo, Comment: Reg.Comment);
296 if (FragmentInfo)
297 if (RegSize > FragmentInfo->SizeInBits)
298 // If the register is larger than the current fragment stop
299 // once the fragment is covered.
300 break;
301 addOpPiece(SizeInBits: Reg.SubRegSize);
302 }
303
304 if (isEntryValue()) {
305 finalizeEntryValue();
306
307 if (!isIndirect() && !isParameterValue() && !HasComplexExpression &&
308 DwarfVersion >= 4)
309 emitOp(Op: dwarf::DW_OP_stack_value);
310 }
311
312 DwarfRegs.clear();
313 // If we need to mask out a subregister, do it now, unless the next
314 // operation would emit an OpPiece anyway.
315 auto NextOp = ExprCursor.peek();
316 if (SubRegisterSizeInBits && NextOp &&
317 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment))
318 maskSubRegister();
319 return true;
320 }
321
322 // Don't emit locations that cannot be expressed without DW_OP_stack_value.
323 if (DwarfVersion < 4)
324 if (any_of(Range&: ExprCursor, P: [](DIExpression::ExprOperand Op) -> bool {
325 return Op.getOp() == dwarf::DW_OP_stack_value;
326 })) {
327 DwarfRegs.clear();
328 LocationKind = Unknown;
329 return false;
330 }
331
332 // TODO: We should not give up here but the following code needs to be changed
333 // to deal with multiple (sub)registers first.
334 if (DwarfRegs.size() > 1) {
335 LLVM_DEBUG(dbgs() << "TODO: giving up on debug information due to "
336 "multi-register usage.\n");
337 DwarfRegs.clear();
338 LocationKind = Unknown;
339 return false;
340 }
341
342 auto Reg = DwarfRegs[0];
343 bool FBReg = isFrameRegister(TRI, MachineReg);
344 int SignedOffset = 0;
345 assert(!Reg.isSubRegister() && "full register expected");
346
347 // Pattern-match combinations for which more efficient representations exist.
348 // [Reg, DW_OP_plus_uconst, Offset] --> [DW_OP_breg, Offset].
349 if (Op && (Op->getOp() == dwarf::DW_OP_plus_uconst)) {
350 uint64_t Offset = Op->getArg(I: 0);
351 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max());
352 if (Offset <= IntMax) {
353 SignedOffset = Offset;
354 ExprCursor.take();
355 }
356 }
357
358 // [Reg, DW_OP_constu, Offset, DW_OP_plus] --> [DW_OP_breg, Offset]
359 // [Reg, DW_OP_constu, Offset, DW_OP_minus] --> [DW_OP_breg,-Offset]
360 // If Reg is a subregister we need to mask it out before subtracting.
361 if (Op && Op->getOp() == dwarf::DW_OP_constu) {
362 uint64_t Offset = Op->getArg(I: 0);
363 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max());
364 auto N = ExprCursor.peekNext();
365 if (N && N->getOp() == dwarf::DW_OP_plus && Offset <= IntMax) {
366 SignedOffset = Offset;
367 ExprCursor.consume(N: 2);
368 } else if (N && N->getOp() == dwarf::DW_OP_minus &&
369 !SubRegisterSizeInBits && Offset <= IntMax + 1) {
370 SignedOffset = -static_cast<int64_t>(Offset);
371 ExprCursor.consume(N: 2);
372 }
373 }
374
375 if (FBReg)
376 addFBReg(Offset: SignedOffset);
377 else
378 addBReg(DwarfReg: Reg.DwarfRegNo, Offset: SignedOffset);
379 DwarfRegs.clear();
380
381 // If we need to mask out a subregister, do it now, unless the next
382 // operation would emit an OpPiece anyway.
383 auto NextOp = ExprCursor.peek();
384 if (SubRegisterSizeInBits && NextOp &&
385 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment))
386 maskSubRegister();
387
388 return true;
389}
390
391void DwarfExpression::setEntryValueFlags(const MachineLocation &Loc) {
392 LocationFlags |= EntryValue;
393 if (Loc.isIndirect())
394 LocationFlags |= Indirect;
395}
396
397void DwarfExpression::setLocation(const MachineLocation &Loc,
398 const DIExpression *DIExpr) {
399 if (Loc.isIndirect())
400 setMemoryLocationKind();
401
402 if (DIExpr->isEntryValue())
403 setEntryValueFlags(Loc);
404}
405
406void DwarfExpression::beginEntryValueExpression(
407 DIExpressionCursor &ExprCursor) {
408 auto Op = ExprCursor.take();
409 (void)Op;
410 assert(Op && Op->getOp() == dwarf::DW_OP_LLVM_entry_value);
411 assert(!IsEmittingEntryValue && "Already emitting entry value?");
412 assert(Op->getArg(0) == 1 &&
413 "Can currently only emit entry values covering a single operation");
414
415 SavedLocationKind = LocationKind;
416 LocationKind = Register;
417 LocationFlags |= EntryValue;
418 IsEmittingEntryValue = true;
419 enableTemporaryBuffer();
420}
421
422void DwarfExpression::finalizeEntryValue() {
423 assert(IsEmittingEntryValue && "Entry value not open?");
424 disableTemporaryBuffer();
425
426 emitOp(Op: CU.getDwarf5OrGNULocationAtom(Loc: dwarf::DW_OP_entry_value));
427
428 // Emit the entry value's size operand.
429 unsigned Size = getTemporaryBufferSize();
430 emitUnsigned(Value: Size);
431
432 // Emit the entry value's DWARF block operand.
433 commitTemporaryBuffer();
434
435 LocationFlags &= ~EntryValue;
436 LocationKind = SavedLocationKind;
437 IsEmittingEntryValue = false;
438}
439
440void DwarfExpression::cancelEntryValue() {
441 assert(IsEmittingEntryValue && "Entry value not open?");
442 disableTemporaryBuffer();
443
444 // The temporary buffer can't be emptied, so for now just assert that nothing
445 // has been emitted to it.
446 assert(getTemporaryBufferSize() == 0 &&
447 "Began emitting entry value block before cancelling entry value");
448
449 LocationKind = SavedLocationKind;
450 IsEmittingEntryValue = false;
451}
452
453unsigned DwarfExpression::getOrCreateBaseType(unsigned BitSize,
454 dwarf::TypeKind Encoding) {
455 // Reuse the base_type if we already have one in this CU otherwise we
456 // create a new one.
457 unsigned I = 0, E = CU.ExprRefedBaseTypes.size();
458 for (; I != E; ++I)
459 if (CU.ExprRefedBaseTypes[I].BitSize == BitSize &&
460 CU.ExprRefedBaseTypes[I].Encoding == Encoding)
461 break;
462
463 if (I == E)
464 CU.ExprRefedBaseTypes.emplace_back(args&: BitSize, args&: Encoding);
465 return I;
466}
467
468/// Assuming a well-formed expression, match "DW_OP_deref*
469/// DW_OP_LLVM_fragment?".
470static bool isMemoryLocation(DIExpressionCursor ExprCursor) {
471 while (ExprCursor) {
472 auto Op = ExprCursor.take();
473 switch (Op->getOp()) {
474 case dwarf::DW_OP_deref:
475 case dwarf::DW_OP_LLVM_fragment:
476 break;
477 default:
478 return false;
479 }
480 }
481 return true;
482}
483
484void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor) {
485 addExpression(Expr: std::move(ExprCursor),
486 InsertArg: [](unsigned Idx, DIExpressionCursor &Cursor) -> bool {
487 llvm_unreachable("unhandled opcode found in expression");
488 });
489}
490
491bool DwarfExpression::addExpression(
492 DIExpressionCursor &&ExprCursor,
493 llvm::function_ref<bool(unsigned, DIExpressionCursor &)> InsertArg) {
494 // Entry values can currently only cover the initial register location,
495 // and not any other parts of the following DWARF expression.
496 assert(!IsEmittingEntryValue && "Can't emit entry value around expression");
497
498 std::optional<DIExpression::ExprOperand> PrevConvertOp;
499
500 while (ExprCursor) {
501 auto Op = ExprCursor.take();
502 uint64_t OpNum = Op->getOp();
503
504 if (OpNum >= dwarf::DW_OP_reg0 && OpNum <= dwarf::DW_OP_reg31) {
505 emitOp(Op: OpNum);
506 continue;
507 } else if (OpNum >= dwarf::DW_OP_breg0 && OpNum <= dwarf::DW_OP_breg31) {
508 addBReg(DwarfReg: OpNum - dwarf::DW_OP_breg0, Offset: Op->getArg(I: 0));
509 continue;
510 }
511
512 switch (OpNum) {
513 case dwarf::DW_OP_LLVM_arg:
514 if (!InsertArg(Op->getArg(I: 0), ExprCursor)) {
515 LocationKind = Unknown;
516 return false;
517 }
518 break;
519 case dwarf::DW_OP_LLVM_fragment: {
520 unsigned SizeInBits = Op->getArg(I: 1);
521 unsigned FragmentOffset = Op->getArg(I: 0);
522 // The fragment offset must have already been adjusted by emitting an
523 // empty DW_OP_piece / DW_OP_bit_piece before we emitted the base
524 // location.
525 assert(OffsetInBits >= FragmentOffset && "fragment offset not added?");
526 assert(SizeInBits >= OffsetInBits - FragmentOffset && "size underflow");
527
528 // If addMachineReg already emitted DW_OP_piece operations to represent
529 // a super-register by splicing together sub-registers, subtract the size
530 // of the pieces that was already emitted.
531 SizeInBits -= OffsetInBits - FragmentOffset;
532
533 // If addMachineReg requested a DW_OP_bit_piece to stencil out a
534 // sub-register that is smaller than the current fragment's size, use it.
535 if (SubRegisterSizeInBits)
536 SizeInBits = std::min<unsigned>(a: SizeInBits, b: SubRegisterSizeInBits);
537
538 // Emit a DW_OP_stack_value for implicit location descriptions.
539 if (isImplicitLocation())
540 addStackValue();
541
542 // Emit the DW_OP_piece.
543 addOpPiece(SizeInBits, OffsetInBits: SubRegisterOffsetInBits);
544 setSubRegisterPiece(SizeInBits: 0, OffsetInBits: 0);
545 // Reset the location description kind.
546 LocationKind = Unknown;
547 return true;
548 }
549 case dwarf::DW_OP_plus_uconst:
550 assert(!isRegisterLocation());
551 emitOp(Op: dwarf::DW_OP_plus_uconst);
552 emitUnsigned(Value: Op->getArg(I: 0));
553 break;
554 case dwarf::DW_OP_plus:
555 case dwarf::DW_OP_minus:
556 case dwarf::DW_OP_mul:
557 case dwarf::DW_OP_div:
558 case dwarf::DW_OP_mod:
559 case dwarf::DW_OP_or:
560 case dwarf::DW_OP_and:
561 case dwarf::DW_OP_xor:
562 case dwarf::DW_OP_shl:
563 case dwarf::DW_OP_shr:
564 case dwarf::DW_OP_shra:
565 case dwarf::DW_OP_lit0:
566 case dwarf::DW_OP_not:
567 case dwarf::DW_OP_dup:
568 case dwarf::DW_OP_push_object_address:
569 case dwarf::DW_OP_over:
570 case dwarf::DW_OP_eq:
571 case dwarf::DW_OP_ne:
572 case dwarf::DW_OP_gt:
573 case dwarf::DW_OP_ge:
574 case dwarf::DW_OP_lt:
575 case dwarf::DW_OP_le:
576 emitOp(Op: OpNum);
577 break;
578 case dwarf::DW_OP_deref:
579 assert(!isRegisterLocation());
580 if (!isMemoryLocation() && ::isMemoryLocation(ExprCursor))
581 // Turning this into a memory location description makes the deref
582 // implicit.
583 LocationKind = Memory;
584 else
585 emitOp(Op: dwarf::DW_OP_deref);
586 break;
587 case dwarf::DW_OP_constu:
588 assert(!isRegisterLocation());
589 emitConstu(Value: Op->getArg(I: 0));
590 break;
591 case dwarf::DW_OP_consts:
592 assert(!isRegisterLocation());
593 emitOp(Op: dwarf::DW_OP_consts);
594 emitSigned(Value: Op->getArg(I: 0));
595 break;
596 case dwarf::DW_OP_LLVM_convert: {
597 unsigned BitSize = Op->getArg(I: 0);
598 dwarf::TypeKind Encoding = static_cast<dwarf::TypeKind>(Op->getArg(I: 1));
599 if (DwarfVersion >= 5 && CU.getDwarfDebug().useOpConvert()) {
600 emitOp(Op: dwarf::DW_OP_convert);
601 // If targeting a location-list; simply emit the index into the raw
602 // byte stream as ULEB128, DwarfDebug::emitDebugLocEntry has been
603 // fitted with means to extract it later.
604 // If targeting a inlined DW_AT_location; insert a DIEBaseTypeRef
605 // (containing the index and a resolve mechanism during emit) into the
606 // DIE value list.
607 emitBaseTypeRef(Idx: getOrCreateBaseType(BitSize, Encoding));
608 } else {
609 if (PrevConvertOp && PrevConvertOp->getArg(I: 0) < BitSize) {
610 if (Encoding == dwarf::DW_ATE_signed)
611 emitLegacySExt(FromBits: PrevConvertOp->getArg(I: 0));
612 else if (Encoding == dwarf::DW_ATE_unsigned)
613 emitLegacyZExt(FromBits: PrevConvertOp->getArg(I: 0));
614 PrevConvertOp = std::nullopt;
615 } else {
616 PrevConvertOp = Op;
617 }
618 }
619 break;
620 }
621 case dwarf::DW_OP_stack_value:
622 LocationKind = Implicit;
623 break;
624 case dwarf::DW_OP_swap:
625 assert(!isRegisterLocation());
626 emitOp(Op: dwarf::DW_OP_swap);
627 break;
628 case dwarf::DW_OP_xderef:
629 assert(!isRegisterLocation());
630 emitOp(Op: dwarf::DW_OP_xderef);
631 break;
632 case dwarf::DW_OP_deref_size:
633 emitOp(Op: dwarf::DW_OP_deref_size);
634 emitData1(Value: Op->getArg(I: 0));
635 break;
636 case dwarf::DW_OP_LLVM_tag_offset:
637 TagOffset = Op->getArg(I: 0);
638 break;
639 case dwarf::DW_OP_regx:
640 emitOp(Op: dwarf::DW_OP_regx);
641 emitUnsigned(Value: Op->getArg(I: 0));
642 break;
643 case dwarf::DW_OP_bregx:
644 emitOp(Op: dwarf::DW_OP_bregx);
645 emitUnsigned(Value: Op->getArg(I: 0));
646 emitSigned(Value: Op->getArg(I: 1));
647 break;
648 default:
649 llvm_unreachable("unhandled opcode found in expression");
650 }
651 }
652
653 if (isImplicitLocation() && !isParameterValue())
654 // Turn this into an implicit location description.
655 addStackValue();
656
657 return true;
658}
659
660/// add masking operations to stencil out a subregister.
661void DwarfExpression::maskSubRegister() {
662 assert(SubRegisterSizeInBits && "no subregister was registered");
663 if (SubRegisterOffsetInBits > 0)
664 addShr(ShiftBy: SubRegisterOffsetInBits);
665 uint64_t Mask = (1ULL << (uint64_t)SubRegisterSizeInBits) - 1ULL;
666 addAnd(Mask);
667}
668
669void DwarfExpression::finalize() {
670 assert(DwarfRegs.size() == 0 && "dwarf registers not emitted");
671 // Emit any outstanding DW_OP_piece operations to mask out subregisters.
672 if (SubRegisterSizeInBits == 0)
673 return;
674 // Don't emit a DW_OP_piece for a subregister at offset 0.
675 if (SubRegisterOffsetInBits == 0)
676 return;
677 addOpPiece(SizeInBits: SubRegisterSizeInBits, OffsetInBits: SubRegisterOffsetInBits);
678}
679
680void DwarfExpression::addFragmentOffset(const DIExpression *Expr) {
681 if (!Expr || !Expr->isFragment())
682 return;
683
684 uint64_t FragmentOffset = Expr->getFragmentInfo()->OffsetInBits;
685 assert(FragmentOffset >= OffsetInBits &&
686 "overlapping or duplicate fragments");
687 if (FragmentOffset > OffsetInBits)
688 addOpPiece(SizeInBits: FragmentOffset - OffsetInBits);
689 OffsetInBits = FragmentOffset;
690}
691
692void DwarfExpression::emitLegacySExt(unsigned FromBits) {
693 // (((X >> (FromBits - 1)) * (~0)) << FromBits) | X
694 emitOp(Op: dwarf::DW_OP_dup);
695 emitOp(Op: dwarf::DW_OP_constu);
696 emitUnsigned(Value: FromBits - 1);
697 emitOp(Op: dwarf::DW_OP_shr);
698 emitOp(Op: dwarf::DW_OP_lit0);
699 emitOp(Op: dwarf::DW_OP_not);
700 emitOp(Op: dwarf::DW_OP_mul);
701 emitOp(Op: dwarf::DW_OP_constu);
702 emitUnsigned(Value: FromBits);
703 emitOp(Op: dwarf::DW_OP_shl);
704 emitOp(Op: dwarf::DW_OP_or);
705}
706
707void DwarfExpression::emitLegacyZExt(unsigned FromBits) {
708 // Heuristic to decide the most efficient encoding.
709 // A ULEB can encode 7 1-bits per byte.
710 if (FromBits / 7 < 1+1+1+1+1) {
711 // (X & (1 << FromBits - 1))
712 emitOp(Op: dwarf::DW_OP_constu);
713 emitUnsigned(Value: (1ULL << FromBits) - 1);
714 } else {
715 // Note that the DWARF 4 stack consists of pointer-sized elements,
716 // so technically it doesn't make sense to shift left more than 64
717 // bits. We leave that for the consumer to decide though. LLDB for
718 // example uses APInt for the stack elements and can still deal
719 // with this.
720 emitOp(Op: dwarf::DW_OP_lit1);
721 emitOp(Op: dwarf::DW_OP_constu);
722 emitUnsigned(Value: FromBits);
723 emitOp(Op: dwarf::DW_OP_shl);
724 emitOp(Op: dwarf::DW_OP_lit1);
725 emitOp(Op: dwarf::DW_OP_minus);
726 }
727 emitOp(Op: dwarf::DW_OP_and);
728}
729
730void DwarfExpression::addWasmLocation(unsigned Index, uint64_t Offset) {
731 emitOp(Op: dwarf::DW_OP_WASM_location);
732 emitUnsigned(Value: Index == 4/*TI_LOCAL_INDIRECT*/ ? 0/*TI_LOCAL*/ : Index);
733 emitUnsigned(Value: Offset);
734 if (Index == 4 /*TI_LOCAL_INDIRECT*/) {
735 assert(LocationKind == Unknown);
736 LocationKind = Memory;
737 } else {
738 assert(LocationKind == Implicit || LocationKind == Unknown);
739 LocationKind = Implicit;
740 }
741}
742

source code of llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp