1 | //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "llvm/MC/MCAssembler.h" |
10 | #include "llvm/ADT/ArrayRef.h" |
11 | #include "llvm/ADT/SmallString.h" |
12 | #include "llvm/ADT/SmallVector.h" |
13 | #include "llvm/ADT/Statistic.h" |
14 | #include "llvm/ADT/StringRef.h" |
15 | #include "llvm/ADT/Twine.h" |
16 | #include "llvm/MC/MCAsmBackend.h" |
17 | #include "llvm/MC/MCAsmInfo.h" |
18 | #include "llvm/MC/MCAsmLayout.h" |
19 | #include "llvm/MC/MCCodeEmitter.h" |
20 | #include "llvm/MC/MCCodeView.h" |
21 | #include "llvm/MC/MCContext.h" |
22 | #include "llvm/MC/MCDwarf.h" |
23 | #include "llvm/MC/MCExpr.h" |
24 | #include "llvm/MC/MCFixup.h" |
25 | #include "llvm/MC/MCFixupKindInfo.h" |
26 | #include "llvm/MC/MCFragment.h" |
27 | #include "llvm/MC/MCInst.h" |
28 | #include "llvm/MC/MCObjectWriter.h" |
29 | #include "llvm/MC/MCSection.h" |
30 | #include "llvm/MC/MCSymbol.h" |
31 | #include "llvm/MC/MCValue.h" |
32 | #include "llvm/Support/Alignment.h" |
33 | #include "llvm/Support/Casting.h" |
34 | #include "llvm/Support/Debug.h" |
35 | #include "llvm/Support/EndianStream.h" |
36 | #include "llvm/Support/ErrorHandling.h" |
37 | #include "llvm/Support/LEB128.h" |
38 | #include "llvm/Support/raw_ostream.h" |
39 | #include <cassert> |
40 | #include <cstdint> |
41 | #include <tuple> |
42 | #include <utility> |
43 | |
44 | using namespace llvm; |
45 | |
46 | namespace llvm { |
47 | class MCSubtargetInfo; |
48 | } |
49 | |
50 | #define DEBUG_TYPE "assembler" |
51 | |
52 | namespace { |
53 | namespace stats { |
54 | |
55 | STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total" ); |
56 | STATISTIC(EmittedRelaxableFragments, |
57 | "Number of emitted assembler fragments - relaxable" ); |
58 | STATISTIC(EmittedDataFragments, |
59 | "Number of emitted assembler fragments - data" ); |
60 | STATISTIC(EmittedCompactEncodedInstFragments, |
61 | "Number of emitted assembler fragments - compact encoded inst" ); |
62 | STATISTIC(EmittedAlignFragments, |
63 | "Number of emitted assembler fragments - align" ); |
64 | STATISTIC(EmittedFillFragments, |
65 | "Number of emitted assembler fragments - fill" ); |
66 | STATISTIC(EmittedNopsFragments, "Number of emitted assembler fragments - nops" ); |
67 | STATISTIC(EmittedOrgFragments, "Number of emitted assembler fragments - org" ); |
68 | STATISTIC(evaluateFixup, "Number of evaluated fixups" ); |
69 | STATISTIC(FragmentLayouts, "Number of fragment layouts" ); |
70 | STATISTIC(ObjectBytes, "Number of emitted object file bytes" ); |
71 | STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps" ); |
72 | STATISTIC(RelaxedInstructions, "Number of relaxed instructions" ); |
73 | |
74 | } // end namespace stats |
75 | } // end anonymous namespace |
76 | |
77 | // FIXME FIXME FIXME: There are number of places in this file where we convert |
78 | // what is a 64-bit assembler value used for computation into a value in the |
79 | // object file, which may truncate it. We should detect that truncation where |
80 | // invalid and report errors back. |
81 | |
82 | /* *** */ |
83 | |
84 | MCAssembler::MCAssembler(MCContext &Context, |
85 | std::unique_ptr<MCAsmBackend> Backend, |
86 | std::unique_ptr<MCCodeEmitter> Emitter, |
87 | std::unique_ptr<MCObjectWriter> Writer) |
88 | : Context(Context), Backend(std::move(Backend)), |
89 | Emitter(std::move(Emitter)), Writer(std::move(Writer)), |
90 | BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false), |
91 | IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) { |
92 | VersionInfo.Major = 0; // Major version == 0 for "none specified" |
93 | DarwinTargetVariantVersionInfo.Major = 0; |
94 | } |
95 | |
96 | MCAssembler::~MCAssembler() = default; |
97 | |
98 | void MCAssembler::reset() { |
99 | Sections.clear(); |
100 | Symbols.clear(); |
101 | IndirectSymbols.clear(); |
102 | DataRegions.clear(); |
103 | LinkerOptions.clear(); |
104 | FileNames.clear(); |
105 | ThumbFuncs.clear(); |
106 | BundleAlignSize = 0; |
107 | RelaxAll = false; |
108 | SubsectionsViaSymbols = false; |
109 | IncrementalLinkerCompatible = false; |
110 | ELFHeaderEFlags = 0; |
111 | LOHContainer.reset(); |
112 | VersionInfo.Major = 0; |
113 | VersionInfo.SDKVersion = VersionTuple(); |
114 | DarwinTargetVariantVersionInfo.Major = 0; |
115 | DarwinTargetVariantVersionInfo.SDKVersion = VersionTuple(); |
116 | |
117 | // reset objects owned by us |
118 | if (getBackendPtr()) |
119 | getBackendPtr()->reset(); |
120 | if (getEmitterPtr()) |
121 | getEmitterPtr()->reset(); |
122 | if (getWriterPtr()) |
123 | getWriterPtr()->reset(); |
124 | getLOHContainer().reset(); |
125 | } |
126 | |
127 | bool MCAssembler::registerSection(MCSection &Section) { |
128 | if (Section.isRegistered()) |
129 | return false; |
130 | Sections.push_back(x: &Section); |
131 | Section.setIsRegistered(true); |
132 | return true; |
133 | } |
134 | |
135 | bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const { |
136 | if (ThumbFuncs.count(Ptr: Symbol)) |
137 | return true; |
138 | |
139 | if (!Symbol->isVariable()) |
140 | return false; |
141 | |
142 | const MCExpr *Expr = Symbol->getVariableValue(); |
143 | |
144 | MCValue V; |
145 | if (!Expr->evaluateAsRelocatable(Res&: V, Layout: nullptr, Fixup: nullptr)) |
146 | return false; |
147 | |
148 | if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None) |
149 | return false; |
150 | |
151 | const MCSymbolRefExpr *Ref = V.getSymA(); |
152 | if (!Ref) |
153 | return false; |
154 | |
155 | if (Ref->getKind() != MCSymbolRefExpr::VK_None) |
156 | return false; |
157 | |
158 | const MCSymbol &Sym = Ref->getSymbol(); |
159 | if (!isThumbFunc(Symbol: &Sym)) |
160 | return false; |
161 | |
162 | ThumbFuncs.insert(Ptr: Symbol); // Cache it. |
163 | return true; |
164 | } |
165 | |
166 | bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { |
167 | // Non-temporary labels should always be visible to the linker. |
168 | if (!Symbol.isTemporary()) |
169 | return true; |
170 | |
171 | if (Symbol.isUsedInReloc()) |
172 | return true; |
173 | |
174 | return false; |
175 | } |
176 | |
177 | const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const { |
178 | // Linker visible symbols define atoms. |
179 | if (isSymbolLinkerVisible(Symbol: S)) |
180 | return &S; |
181 | |
182 | // Absolute and undefined symbols have no defining atom. |
183 | if (!S.isInSection()) |
184 | return nullptr; |
185 | |
186 | // Non-linker visible symbols in sections which can't be atomized have no |
187 | // defining atom. |
188 | if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols( |
189 | Section: *S.getFragment()->getParent())) |
190 | return nullptr; |
191 | |
192 | // Otherwise, return the atom for the containing fragment. |
193 | return S.getFragment()->getAtom(); |
194 | } |
195 | |
196 | bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup, |
197 | const MCFragment *DF, MCValue &Target, |
198 | const MCSubtargetInfo *STI, uint64_t &Value, |
199 | bool &WasForced) const { |
200 | ++stats::evaluateFixup; |
201 | |
202 | // FIXME: This code has some duplication with recordRelocation. We should |
203 | // probably merge the two into a single callback that tries to evaluate a |
204 | // fixup and records a relocation if one is needed. |
205 | |
206 | // On error claim to have completely evaluated the fixup, to prevent any |
207 | // further processing from being done. |
208 | const MCExpr *Expr = Fixup.getValue(); |
209 | MCContext &Ctx = getContext(); |
210 | Value = 0; |
211 | WasForced = false; |
212 | if (!Expr->evaluateAsRelocatable(Res&: Target, Layout: &Layout, Fixup: &Fixup)) { |
213 | Ctx.reportError(L: Fixup.getLoc(), Msg: "expected relocatable expression" ); |
214 | return true; |
215 | } |
216 | if (const MCSymbolRefExpr *RefB = Target.getSymB()) { |
217 | if (RefB->getKind() != MCSymbolRefExpr::VK_None) { |
218 | Ctx.reportError(L: Fixup.getLoc(), |
219 | Msg: "unsupported subtraction of qualified symbol" ); |
220 | return true; |
221 | } |
222 | } |
223 | |
224 | assert(getBackendPtr() && "Expected assembler backend" ); |
225 | bool IsTarget = getBackendPtr()->getFixupKindInfo(Kind: Fixup.getKind()).Flags & |
226 | MCFixupKindInfo::FKF_IsTarget; |
227 | |
228 | if (IsTarget) |
229 | return getBackend().evaluateTargetFixup(Asm: *this, Layout, Fixup, DF, Target, |
230 | STI, Value, WasForced); |
231 | |
232 | unsigned FixupFlags = getBackendPtr()->getFixupKindInfo(Kind: Fixup.getKind()).Flags; |
233 | bool IsPCRel = getBackendPtr()->getFixupKindInfo(Kind: Fixup.getKind()).Flags & |
234 | MCFixupKindInfo::FKF_IsPCRel; |
235 | |
236 | bool IsResolved = false; |
237 | if (IsPCRel) { |
238 | if (Target.getSymB()) { |
239 | IsResolved = false; |
240 | } else if (!Target.getSymA()) { |
241 | IsResolved = false; |
242 | } else { |
243 | const MCSymbolRefExpr *A = Target.getSymA(); |
244 | const MCSymbol &SA = A->getSymbol(); |
245 | if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) { |
246 | IsResolved = false; |
247 | } else if (auto *Writer = getWriterPtr()) { |
248 | IsResolved = (FixupFlags & MCFixupKindInfo::FKF_Constant) || |
249 | Writer->isSymbolRefDifferenceFullyResolvedImpl( |
250 | Asm: *this, SymA: SA, FB: *DF, InSet: false, IsPCRel: true); |
251 | } |
252 | } |
253 | } else { |
254 | IsResolved = Target.isAbsolute(); |
255 | } |
256 | |
257 | Value = Target.getConstant(); |
258 | |
259 | if (const MCSymbolRefExpr *A = Target.getSymA()) { |
260 | const MCSymbol &Sym = A->getSymbol(); |
261 | if (Sym.isDefined()) |
262 | Value += Layout.getSymbolOffset(S: Sym); |
263 | } |
264 | if (const MCSymbolRefExpr *B = Target.getSymB()) { |
265 | const MCSymbol &Sym = B->getSymbol(); |
266 | if (Sym.isDefined()) |
267 | Value -= Layout.getSymbolOffset(S: Sym); |
268 | } |
269 | |
270 | bool ShouldAlignPC = getBackend().getFixupKindInfo(Kind: Fixup.getKind()).Flags & |
271 | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; |
272 | assert((ShouldAlignPC ? IsPCRel : true) && |
273 | "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!" ); |
274 | |
275 | if (IsPCRel) { |
276 | uint64_t Offset = Layout.getFragmentOffset(F: DF) + Fixup.getOffset(); |
277 | |
278 | // A number of ARM fixups in Thumb mode require that the effective PC |
279 | // address be determined as the 32-bit aligned version of the actual offset. |
280 | if (ShouldAlignPC) Offset &= ~0x3; |
281 | Value -= Offset; |
282 | } |
283 | |
284 | // Let the backend force a relocation if needed. |
285 | if (IsResolved && |
286 | getBackend().shouldForceRelocation(Asm: *this, Fixup, Target, STI)) { |
287 | IsResolved = false; |
288 | WasForced = true; |
289 | } |
290 | |
291 | // A linker relaxation target may emit ADD/SUB relocations for A-B+C. Let |
292 | // recordRelocation handle non-VK_None cases like A@plt-B+C. |
293 | if (!IsResolved && Target.getSymA() && Target.getSymB() && |
294 | Target.getSymA()->getKind() == MCSymbolRefExpr::VK_None && |
295 | getBackend().handleAddSubRelocations(Layout, F: *DF, Fixup, Target, FixedValue&: Value)) |
296 | return true; |
297 | |
298 | return IsResolved; |
299 | } |
300 | |
301 | uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, |
302 | const MCFragment &F) const { |
303 | assert(getBackendPtr() && "Requires assembler backend" ); |
304 | switch (F.getKind()) { |
305 | case MCFragment::FT_Data: |
306 | return cast<MCDataFragment>(Val: F).getContents().size(); |
307 | case MCFragment::FT_Relaxable: |
308 | return cast<MCRelaxableFragment>(Val: F).getContents().size(); |
309 | case MCFragment::FT_CompactEncodedInst: |
310 | return cast<MCCompactEncodedInstFragment>(Val: F).getContents().size(); |
311 | case MCFragment::FT_Fill: { |
312 | auto &FF = cast<MCFillFragment>(Val: F); |
313 | int64_t NumValues = 0; |
314 | if (!FF.getNumValues().evaluateKnownAbsolute(Res&: NumValues, Layout)) { |
315 | getContext().reportError(L: FF.getLoc(), |
316 | Msg: "expected assembly-time absolute expression" ); |
317 | return 0; |
318 | } |
319 | int64_t Size = NumValues * FF.getValueSize(); |
320 | if (Size < 0) { |
321 | getContext().reportError(L: FF.getLoc(), Msg: "invalid number of bytes" ); |
322 | return 0; |
323 | } |
324 | return Size; |
325 | } |
326 | |
327 | case MCFragment::FT_Nops: |
328 | return cast<MCNopsFragment>(Val: F).getNumBytes(); |
329 | |
330 | case MCFragment::FT_LEB: |
331 | return cast<MCLEBFragment>(Val: F).getContents().size(); |
332 | |
333 | case MCFragment::FT_BoundaryAlign: |
334 | return cast<MCBoundaryAlignFragment>(Val: F).getSize(); |
335 | |
336 | case MCFragment::FT_SymbolId: |
337 | return 4; |
338 | |
339 | case MCFragment::FT_Align: { |
340 | const MCAlignFragment &AF = cast<MCAlignFragment>(Val: F); |
341 | unsigned Offset = Layout.getFragmentOffset(F: &AF); |
342 | unsigned Size = offsetToAlignment(Value: Offset, Alignment: AF.getAlignment()); |
343 | |
344 | // Insert extra Nops for code alignment if the target define |
345 | // shouldInsertExtraNopBytesForCodeAlign target hook. |
346 | if (AF.getParent()->useCodeAlign() && AF.hasEmitNops() && |
347 | getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) |
348 | return Size; |
349 | |
350 | // If we are padding with nops, force the padding to be larger than the |
351 | // minimum nop size. |
352 | if (Size > 0 && AF.hasEmitNops()) { |
353 | while (Size % getBackend().getMinimumNopSize()) |
354 | Size += AF.getAlignment().value(); |
355 | } |
356 | if (Size > AF.getMaxBytesToEmit()) |
357 | return 0; |
358 | return Size; |
359 | } |
360 | |
361 | case MCFragment::FT_Org: { |
362 | const MCOrgFragment &OF = cast<MCOrgFragment>(Val: F); |
363 | MCValue Value; |
364 | if (!OF.getOffset().evaluateAsValue(Res&: Value, Layout)) { |
365 | getContext().reportError(L: OF.getLoc(), |
366 | Msg: "expected assembly-time absolute expression" ); |
367 | return 0; |
368 | } |
369 | |
370 | uint64_t FragmentOffset = Layout.getFragmentOffset(F: &OF); |
371 | int64_t TargetLocation = Value.getConstant(); |
372 | if (const MCSymbolRefExpr *A = Value.getSymA()) { |
373 | uint64_t Val; |
374 | if (!Layout.getSymbolOffset(S: A->getSymbol(), Val)) { |
375 | getContext().reportError(L: OF.getLoc(), Msg: "expected absolute expression" ); |
376 | return 0; |
377 | } |
378 | TargetLocation += Val; |
379 | } |
380 | int64_t Size = TargetLocation - FragmentOffset; |
381 | if (Size < 0 || Size >= 0x40000000) { |
382 | getContext().reportError( |
383 | L: OF.getLoc(), Msg: "invalid .org offset '" + Twine(TargetLocation) + |
384 | "' (at offset '" + Twine(FragmentOffset) + "')" ); |
385 | return 0; |
386 | } |
387 | return Size; |
388 | } |
389 | |
390 | case MCFragment::FT_Dwarf: |
391 | return cast<MCDwarfLineAddrFragment>(Val: F).getContents().size(); |
392 | case MCFragment::FT_DwarfFrame: |
393 | return cast<MCDwarfCallFrameFragment>(Val: F).getContents().size(); |
394 | case MCFragment::FT_CVInlineLines: |
395 | return cast<MCCVInlineLineTableFragment>(Val: F).getContents().size(); |
396 | case MCFragment::FT_CVDefRange: |
397 | return cast<MCCVDefRangeFragment>(Val: F).getContents().size(); |
398 | case MCFragment::FT_PseudoProbe: |
399 | return cast<MCPseudoProbeAddrFragment>(Val: F).getContents().size(); |
400 | case MCFragment::FT_Dummy: |
401 | llvm_unreachable("Should not have been added" ); |
402 | } |
403 | |
404 | llvm_unreachable("invalid fragment kind" ); |
405 | } |
406 | |
407 | void MCAsmLayout::layoutFragment(MCFragment *F) { |
408 | MCFragment *Prev = F->getPrevNode(); |
409 | |
410 | // We should never try to recompute something which is valid. |
411 | assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!" ); |
412 | // We should never try to compute the fragment layout if its predecessor |
413 | // isn't valid. |
414 | assert((!Prev || isFragmentValid(Prev)) && |
415 | "Attempt to compute fragment before its predecessor!" ); |
416 | |
417 | assert(!F->IsBeingLaidOut && "Already being laid out!" ); |
418 | F->IsBeingLaidOut = true; |
419 | |
420 | ++stats::FragmentLayouts; |
421 | |
422 | // Compute fragment offset and size. |
423 | if (Prev) |
424 | F->Offset = Prev->Offset + getAssembler().computeFragmentSize(Layout: *this, F: *Prev); |
425 | else |
426 | F->Offset = 0; |
427 | F->IsBeingLaidOut = false; |
428 | LastValidFragment[F->getParent()] = F; |
429 | |
430 | // If bundling is enabled and this fragment has instructions in it, it has to |
431 | // obey the bundling restrictions. With padding, we'll have: |
432 | // |
433 | // |
434 | // BundlePadding |
435 | // ||| |
436 | // ------------------------------------- |
437 | // Prev |##########| F | |
438 | // ------------------------------------- |
439 | // ^ |
440 | // | |
441 | // F->Offset |
442 | // |
443 | // The fragment's offset will point to after the padding, and its computed |
444 | // size won't include the padding. |
445 | // |
446 | // When the -mc-relax-all flag is used, we optimize bundling by writting the |
447 | // padding directly into fragments when the instructions are emitted inside |
448 | // the streamer. When the fragment is larger than the bundle size, we need to |
449 | // ensure that it's bundle aligned. This means that if we end up with |
450 | // multiple fragments, we must emit bundle padding between fragments. |
451 | // |
452 | // ".align N" is an example of a directive that introduces multiple |
453 | // fragments. We could add a special case to handle ".align N" by emitting |
454 | // within-fragment padding (which would produce less padding when N is less |
455 | // than the bundle size), but for now we don't. |
456 | // |
457 | if (Assembler.isBundlingEnabled() && F->hasInstructions()) { |
458 | assert(isa<MCEncodedFragment>(F) && |
459 | "Only MCEncodedFragment implementations have instructions" ); |
460 | MCEncodedFragment *EF = cast<MCEncodedFragment>(Val: F); |
461 | uint64_t FSize = Assembler.computeFragmentSize(Layout: *this, F: *EF); |
462 | |
463 | if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize()) |
464 | report_fatal_error(reason: "Fragment can't be larger than a bundle size" ); |
465 | |
466 | uint64_t RequiredBundlePadding = |
467 | computeBundlePadding(Assembler, F: EF, FOffset: EF->Offset, FSize); |
468 | if (RequiredBundlePadding > UINT8_MAX) |
469 | report_fatal_error(reason: "Padding cannot exceed 255 bytes" ); |
470 | EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); |
471 | EF->Offset += RequiredBundlePadding; |
472 | } |
473 | } |
474 | |
475 | bool MCAssembler::registerSymbol(const MCSymbol &Symbol) { |
476 | bool Changed = !Symbol.isRegistered(); |
477 | if (Changed) { |
478 | Symbol.setIsRegistered(true); |
479 | Symbols.push_back(x: &Symbol); |
480 | } |
481 | return Changed; |
482 | } |
483 | |
484 | void MCAssembler::writeFragmentPadding(raw_ostream &OS, |
485 | const MCEncodedFragment &EF, |
486 | uint64_t FSize) const { |
487 | assert(getBackendPtr() && "Expected assembler backend" ); |
488 | // Should NOP padding be written out before this fragment? |
489 | unsigned BundlePadding = EF.getBundlePadding(); |
490 | if (BundlePadding > 0) { |
491 | assert(isBundlingEnabled() && |
492 | "Writing bundle padding with disabled bundling" ); |
493 | assert(EF.hasInstructions() && |
494 | "Writing bundle padding for a fragment without instructions" ); |
495 | |
496 | unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize); |
497 | const MCSubtargetInfo *STI = EF.getSubtargetInfo(); |
498 | if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { |
499 | // If the padding itself crosses a bundle boundary, it must be emitted |
500 | // in 2 pieces, since even nop instructions must not cross boundaries. |
501 | // v--------------v <- BundleAlignSize |
502 | // v---------v <- BundlePadding |
503 | // ---------------------------- |
504 | // | Prev |####|####| F | |
505 | // ---------------------------- |
506 | // ^-------------------^ <- TotalLength |
507 | unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); |
508 | if (!getBackend().writeNopData(OS, Count: DistanceToBoundary, STI)) |
509 | report_fatal_error(reason: "unable to write NOP sequence of " + |
510 | Twine(DistanceToBoundary) + " bytes" ); |
511 | BundlePadding -= DistanceToBoundary; |
512 | } |
513 | if (!getBackend().writeNopData(OS, Count: BundlePadding, STI)) |
514 | report_fatal_error(reason: "unable to write NOP sequence of " + |
515 | Twine(BundlePadding) + " bytes" ); |
516 | } |
517 | } |
518 | |
519 | /// Write the fragment \p F to the output file. |
520 | static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, |
521 | const MCAsmLayout &Layout, const MCFragment &F) { |
522 | // FIXME: Embed in fragments instead? |
523 | uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); |
524 | |
525 | llvm::endianness Endian = Asm.getBackend().Endian; |
526 | |
527 | if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(Val: &F)) |
528 | Asm.writeFragmentPadding(OS, EF: *EF, FSize: FragmentSize); |
529 | |
530 | // This variable (and its dummy usage) is to participate in the assert at |
531 | // the end of the function. |
532 | uint64_t Start = OS.tell(); |
533 | (void) Start; |
534 | |
535 | ++stats::EmittedFragments; |
536 | |
537 | switch (F.getKind()) { |
538 | case MCFragment::FT_Align: { |
539 | ++stats::EmittedAlignFragments; |
540 | const MCAlignFragment &AF = cast<MCAlignFragment>(Val: F); |
541 | assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!" ); |
542 | |
543 | uint64_t Count = FragmentSize / AF.getValueSize(); |
544 | |
545 | // FIXME: This error shouldn't actually occur (the front end should emit |
546 | // multiple .align directives to enforce the semantics it wants), but is |
547 | // severe enough that we want to report it. How to handle this? |
548 | if (Count * AF.getValueSize() != FragmentSize) |
549 | report_fatal_error(reason: "undefined .align directive, value size '" + |
550 | Twine(AF.getValueSize()) + |
551 | "' is not a divisor of padding size '" + |
552 | Twine(FragmentSize) + "'" ); |
553 | |
554 | // See if we are aligning with nops, and if so do that first to try to fill |
555 | // the Count bytes. Then if that did not fill any bytes or there are any |
556 | // bytes left to fill use the Value and ValueSize to fill the rest. |
557 | // If we are aligning with nops, ask that target to emit the right data. |
558 | if (AF.hasEmitNops()) { |
559 | if (!Asm.getBackend().writeNopData(OS, Count, STI: AF.getSubtargetInfo())) |
560 | report_fatal_error(reason: "unable to write nop sequence of " + |
561 | Twine(Count) + " bytes" ); |
562 | break; |
563 | } |
564 | |
565 | // Otherwise, write out in multiples of the value size. |
566 | for (uint64_t i = 0; i != Count; ++i) { |
567 | switch (AF.getValueSize()) { |
568 | default: llvm_unreachable("Invalid size!" ); |
569 | case 1: OS << char(AF.getValue()); break; |
570 | case 2: |
571 | support::endian::write<uint16_t>(os&: OS, value: AF.getValue(), endian: Endian); |
572 | break; |
573 | case 4: |
574 | support::endian::write<uint32_t>(os&: OS, value: AF.getValue(), endian: Endian); |
575 | break; |
576 | case 8: |
577 | support::endian::write<uint64_t>(os&: OS, value: AF.getValue(), endian: Endian); |
578 | break; |
579 | } |
580 | } |
581 | break; |
582 | } |
583 | |
584 | case MCFragment::FT_Data: |
585 | ++stats::EmittedDataFragments; |
586 | OS << cast<MCDataFragment>(Val: F).getContents(); |
587 | break; |
588 | |
589 | case MCFragment::FT_Relaxable: |
590 | ++stats::EmittedRelaxableFragments; |
591 | OS << cast<MCRelaxableFragment>(Val: F).getContents(); |
592 | break; |
593 | |
594 | case MCFragment::FT_CompactEncodedInst: |
595 | ++stats::EmittedCompactEncodedInstFragments; |
596 | OS << cast<MCCompactEncodedInstFragment>(Val: F).getContents(); |
597 | break; |
598 | |
599 | case MCFragment::FT_Fill: { |
600 | ++stats::EmittedFillFragments; |
601 | const MCFillFragment &FF = cast<MCFillFragment>(Val: F); |
602 | uint64_t V = FF.getValue(); |
603 | unsigned VSize = FF.getValueSize(); |
604 | const unsigned MaxChunkSize = 16; |
605 | char Data[MaxChunkSize]; |
606 | assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size" ); |
607 | // Duplicate V into Data as byte vector to reduce number of |
608 | // writes done. As such, do endian conversion here. |
609 | for (unsigned I = 0; I != VSize; ++I) { |
610 | unsigned index = Endian == llvm::endianness::little ? I : (VSize - I - 1); |
611 | Data[I] = uint8_t(V >> (index * 8)); |
612 | } |
613 | for (unsigned I = VSize; I < MaxChunkSize; ++I) |
614 | Data[I] = Data[I - VSize]; |
615 | |
616 | // Set to largest multiple of VSize in Data. |
617 | const unsigned NumPerChunk = MaxChunkSize / VSize; |
618 | // Set ChunkSize to largest multiple of VSize in Data |
619 | const unsigned ChunkSize = VSize * NumPerChunk; |
620 | |
621 | // Do copies by chunk. |
622 | StringRef Ref(Data, ChunkSize); |
623 | for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I) |
624 | OS << Ref; |
625 | |
626 | // do remainder if needed. |
627 | unsigned TrailingCount = FragmentSize % ChunkSize; |
628 | if (TrailingCount) |
629 | OS.write(Ptr: Data, Size: TrailingCount); |
630 | break; |
631 | } |
632 | |
633 | case MCFragment::FT_Nops: { |
634 | ++stats::EmittedNopsFragments; |
635 | const MCNopsFragment &NF = cast<MCNopsFragment>(Val: F); |
636 | |
637 | int64_t NumBytes = NF.getNumBytes(); |
638 | int64_t ControlledNopLength = NF.getControlledNopLength(); |
639 | int64_t MaximumNopLength = |
640 | Asm.getBackend().getMaximumNopSize(STI: *NF.getSubtargetInfo()); |
641 | |
642 | assert(NumBytes > 0 && "Expected positive NOPs fragment size" ); |
643 | assert(ControlledNopLength >= 0 && "Expected non-negative NOP size" ); |
644 | |
645 | if (ControlledNopLength > MaximumNopLength) { |
646 | Asm.getContext().reportError(L: NF.getLoc(), |
647 | Msg: "illegal NOP size " + |
648 | std::to_string(val: ControlledNopLength) + |
649 | ". (expected within [0, " + |
650 | std::to_string(val: MaximumNopLength) + "])" ); |
651 | // Clamp the NOP length as reportError does not stop the execution |
652 | // immediately. |
653 | ControlledNopLength = MaximumNopLength; |
654 | } |
655 | |
656 | // Use maximum value if the size of each NOP is not specified |
657 | if (!ControlledNopLength) |
658 | ControlledNopLength = MaximumNopLength; |
659 | |
660 | while (NumBytes) { |
661 | uint64_t NumBytesToEmit = |
662 | (uint64_t)std::min(a: NumBytes, b: ControlledNopLength); |
663 | assert(NumBytesToEmit && "try to emit empty NOP instruction" ); |
664 | if (!Asm.getBackend().writeNopData(OS, Count: NumBytesToEmit, |
665 | STI: NF.getSubtargetInfo())) { |
666 | report_fatal_error(reason: "unable to write nop sequence of the remaining " + |
667 | Twine(NumBytesToEmit) + " bytes" ); |
668 | break; |
669 | } |
670 | NumBytes -= NumBytesToEmit; |
671 | } |
672 | break; |
673 | } |
674 | |
675 | case MCFragment::FT_LEB: { |
676 | const MCLEBFragment &LF = cast<MCLEBFragment>(Val: F); |
677 | OS << LF.getContents(); |
678 | break; |
679 | } |
680 | |
681 | case MCFragment::FT_BoundaryAlign: { |
682 | const MCBoundaryAlignFragment &BF = cast<MCBoundaryAlignFragment>(Val: F); |
683 | if (!Asm.getBackend().writeNopData(OS, Count: FragmentSize, STI: BF.getSubtargetInfo())) |
684 | report_fatal_error(reason: "unable to write nop sequence of " + |
685 | Twine(FragmentSize) + " bytes" ); |
686 | break; |
687 | } |
688 | |
689 | case MCFragment::FT_SymbolId: { |
690 | const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(Val: F); |
691 | support::endian::write<uint32_t>(os&: OS, value: SF.getSymbol()->getIndex(), endian: Endian); |
692 | break; |
693 | } |
694 | |
695 | case MCFragment::FT_Org: { |
696 | ++stats::EmittedOrgFragments; |
697 | const MCOrgFragment &OF = cast<MCOrgFragment>(Val: F); |
698 | |
699 | for (uint64_t i = 0, e = FragmentSize; i != e; ++i) |
700 | OS << char(OF.getValue()); |
701 | |
702 | break; |
703 | } |
704 | |
705 | case MCFragment::FT_Dwarf: { |
706 | const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(Val: F); |
707 | OS << OF.getContents(); |
708 | break; |
709 | } |
710 | case MCFragment::FT_DwarfFrame: { |
711 | const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(Val: F); |
712 | OS << CF.getContents(); |
713 | break; |
714 | } |
715 | case MCFragment::FT_CVInlineLines: { |
716 | const auto &OF = cast<MCCVInlineLineTableFragment>(Val: F); |
717 | OS << OF.getContents(); |
718 | break; |
719 | } |
720 | case MCFragment::FT_CVDefRange: { |
721 | const auto &DRF = cast<MCCVDefRangeFragment>(Val: F); |
722 | OS << DRF.getContents(); |
723 | break; |
724 | } |
725 | case MCFragment::FT_PseudoProbe: { |
726 | const MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(Val: F); |
727 | OS << PF.getContents(); |
728 | break; |
729 | } |
730 | case MCFragment::FT_Dummy: |
731 | llvm_unreachable("Should not have been added" ); |
732 | } |
733 | |
734 | assert(OS.tell() - Start == FragmentSize && |
735 | "The stream should advance by fragment size" ); |
736 | } |
737 | |
738 | void MCAssembler::writeSectionData(raw_ostream &OS, const MCSection *Sec, |
739 | const MCAsmLayout &Layout) const { |
740 | assert(getBackendPtr() && "Expected assembler backend" ); |
741 | |
742 | // Ignore virtual sections. |
743 | if (Sec->isVirtualSection()) { |
744 | assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!" ); |
745 | |
746 | // Check that contents are only things legal inside a virtual section. |
747 | for (const MCFragment &F : *Sec) { |
748 | switch (F.getKind()) { |
749 | default: llvm_unreachable("Invalid fragment in virtual section!" ); |
750 | case MCFragment::FT_Data: { |
751 | // Check that we aren't trying to write a non-zero contents (or fixups) |
752 | // into a virtual section. This is to support clients which use standard |
753 | // directives to fill the contents of virtual sections. |
754 | const MCDataFragment &DF = cast<MCDataFragment>(Val: F); |
755 | if (DF.fixup_begin() != DF.fixup_end()) |
756 | getContext().reportError(L: SMLoc(), Msg: Sec->getVirtualSectionKind() + |
757 | " section '" + Sec->getName() + |
758 | "' cannot have fixups" ); |
759 | for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) |
760 | if (DF.getContents()[i]) { |
761 | getContext().reportError(L: SMLoc(), |
762 | Msg: Sec->getVirtualSectionKind() + |
763 | " section '" + Sec->getName() + |
764 | "' cannot have non-zero initializers" ); |
765 | break; |
766 | } |
767 | break; |
768 | } |
769 | case MCFragment::FT_Align: |
770 | // Check that we aren't trying to write a non-zero value into a virtual |
771 | // section. |
772 | assert((cast<MCAlignFragment>(F).getValueSize() == 0 || |
773 | cast<MCAlignFragment>(F).getValue() == 0) && |
774 | "Invalid align in virtual section!" ); |
775 | break; |
776 | case MCFragment::FT_Fill: |
777 | assert((cast<MCFillFragment>(F).getValue() == 0) && |
778 | "Invalid fill in virtual section!" ); |
779 | break; |
780 | case MCFragment::FT_Org: |
781 | break; |
782 | } |
783 | } |
784 | |
785 | return; |
786 | } |
787 | |
788 | uint64_t Start = OS.tell(); |
789 | (void)Start; |
790 | |
791 | for (const MCFragment &F : *Sec) |
792 | writeFragment(OS, Asm: *this, Layout, F); |
793 | |
794 | assert(getContext().hadError() || |
795 | OS.tell() - Start == Layout.getSectionAddressSize(Sec)); |
796 | } |
797 | |
798 | std::tuple<MCValue, uint64_t, bool> |
799 | MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F, |
800 | const MCFixup &Fixup, const MCSubtargetInfo *STI) { |
801 | // Evaluate the fixup. |
802 | MCValue Target; |
803 | uint64_t FixedValue; |
804 | bool WasForced; |
805 | bool IsResolved = |
806 | evaluateFixup(Layout, Fixup, DF: &F, Target, STI, Value&: FixedValue, WasForced); |
807 | if (!IsResolved) { |
808 | // The fixup was unresolved, we need a relocation. Inform the object |
809 | // writer of the relocation, and give it an opportunity to adjust the |
810 | // fixup value if need be. |
811 | getWriter().recordRelocation(Asm&: *this, Layout, Fragment: &F, Fixup, Target, FixedValue); |
812 | } |
813 | return std::make_tuple(args&: Target, args&: FixedValue, args&: IsResolved); |
814 | } |
815 | |
816 | void MCAssembler::layout(MCAsmLayout &Layout) { |
817 | assert(getBackendPtr() && "Expected assembler backend" ); |
818 | DEBUG_WITH_TYPE("mc-dump" , { |
819 | errs() << "assembler backend - pre-layout\n--\n" ; |
820 | dump(); }); |
821 | |
822 | // Create dummy fragments and assign section ordinals. |
823 | unsigned SectionIndex = 0; |
824 | for (MCSection &Sec : *this) { |
825 | // Create dummy fragments to eliminate any empty sections, this simplifies |
826 | // layout. |
827 | if (Sec.getFragmentList().empty()) |
828 | new MCDataFragment(&Sec); |
829 | |
830 | Sec.setOrdinal(SectionIndex++); |
831 | } |
832 | |
833 | // Assign layout order indices to sections and fragments. |
834 | for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) { |
835 | MCSection *Sec = Layout.getSectionOrder()[i]; |
836 | Sec->setLayoutOrder(i); |
837 | |
838 | unsigned FragmentIndex = 0; |
839 | for (MCFragment &Frag : *Sec) |
840 | Frag.setLayoutOrder(FragmentIndex++); |
841 | } |
842 | |
843 | // Layout until everything fits. |
844 | while (layoutOnce(Layout)) { |
845 | if (getContext().hadError()) |
846 | return; |
847 | // Size of fragments in one section can depend on the size of fragments in |
848 | // another. If any fragment has changed size, we have to re-layout (and |
849 | // as a result possibly further relax) all. |
850 | for (MCSection &Sec : *this) |
851 | Layout.invalidateFragmentsFrom(F: &*Sec.begin()); |
852 | } |
853 | |
854 | DEBUG_WITH_TYPE("mc-dump" , { |
855 | errs() << "assembler backend - post-relaxation\n--\n" ; |
856 | dump(); }); |
857 | |
858 | // Finalize the layout, including fragment lowering. |
859 | finishLayout(Layout); |
860 | |
861 | DEBUG_WITH_TYPE("mc-dump" , { |
862 | errs() << "assembler backend - final-layout\n--\n" ; |
863 | dump(); }); |
864 | |
865 | // Allow the object writer a chance to perform post-layout binding (for |
866 | // example, to set the index fields in the symbol data). |
867 | getWriter().executePostLayoutBinding(Asm&: *this, Layout); |
868 | |
869 | // Evaluate and apply the fixups, generating relocation entries as necessary. |
870 | for (MCSection &Sec : *this) { |
871 | for (MCFragment &Frag : Sec) { |
872 | ArrayRef<MCFixup> Fixups; |
873 | MutableArrayRef<char> Contents; |
874 | const MCSubtargetInfo *STI = nullptr; |
875 | |
876 | // Process MCAlignFragment and MCEncodedFragmentWithFixups here. |
877 | switch (Frag.getKind()) { |
878 | default: |
879 | continue; |
880 | case MCFragment::FT_Align: { |
881 | MCAlignFragment &AF = cast<MCAlignFragment>(Val&: Frag); |
882 | // Insert fixup type for code alignment if the target define |
883 | // shouldInsertFixupForCodeAlign target hook. |
884 | if (Sec.useCodeAlign() && AF.hasEmitNops()) |
885 | getBackend().shouldInsertFixupForCodeAlign(Asm&: *this, Layout, AF); |
886 | continue; |
887 | } |
888 | case MCFragment::FT_Data: { |
889 | MCDataFragment &DF = cast<MCDataFragment>(Val&: Frag); |
890 | Fixups = DF.getFixups(); |
891 | Contents = DF.getContents(); |
892 | STI = DF.getSubtargetInfo(); |
893 | assert(!DF.hasInstructions() || STI != nullptr); |
894 | break; |
895 | } |
896 | case MCFragment::FT_Relaxable: { |
897 | MCRelaxableFragment &RF = cast<MCRelaxableFragment>(Val&: Frag); |
898 | Fixups = RF.getFixups(); |
899 | Contents = RF.getContents(); |
900 | STI = RF.getSubtargetInfo(); |
901 | assert(!RF.hasInstructions() || STI != nullptr); |
902 | break; |
903 | } |
904 | case MCFragment::FT_CVDefRange: { |
905 | MCCVDefRangeFragment &CF = cast<MCCVDefRangeFragment>(Val&: Frag); |
906 | Fixups = CF.getFixups(); |
907 | Contents = CF.getContents(); |
908 | break; |
909 | } |
910 | case MCFragment::FT_Dwarf: { |
911 | MCDwarfLineAddrFragment &DF = cast<MCDwarfLineAddrFragment>(Val&: Frag); |
912 | Fixups = DF.getFixups(); |
913 | Contents = DF.getContents(); |
914 | break; |
915 | } |
916 | case MCFragment::FT_DwarfFrame: { |
917 | MCDwarfCallFrameFragment &DF = cast<MCDwarfCallFrameFragment>(Val&: Frag); |
918 | Fixups = DF.getFixups(); |
919 | Contents = DF.getContents(); |
920 | break; |
921 | } |
922 | case MCFragment::FT_LEB: { |
923 | auto &LF = cast<MCLEBFragment>(Val&: Frag); |
924 | Fixups = LF.getFixups(); |
925 | Contents = LF.getContents(); |
926 | break; |
927 | } |
928 | case MCFragment::FT_PseudoProbe: { |
929 | MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(Val&: Frag); |
930 | Fixups = PF.getFixups(); |
931 | Contents = PF.getContents(); |
932 | break; |
933 | } |
934 | } |
935 | for (const MCFixup &Fixup : Fixups) { |
936 | uint64_t FixedValue; |
937 | bool IsResolved; |
938 | MCValue Target; |
939 | std::tie(args&: Target, args&: FixedValue, args&: IsResolved) = |
940 | handleFixup(Layout, F&: Frag, Fixup, STI); |
941 | getBackend().applyFixup(Asm: *this, Fixup, Target, Data: Contents, Value: FixedValue, |
942 | IsResolved, STI); |
943 | } |
944 | } |
945 | } |
946 | } |
947 | |
948 | void MCAssembler::Finish() { |
949 | // Create the layout object. |
950 | MCAsmLayout Layout(*this); |
951 | layout(Layout); |
952 | |
953 | // Write the object file. |
954 | stats::ObjectBytes += getWriter().writeObject(Asm&: *this, Layout); |
955 | } |
956 | |
957 | bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, |
958 | const MCRelaxableFragment *DF, |
959 | const MCAsmLayout &Layout) const { |
960 | assert(getBackendPtr() && "Expected assembler backend" ); |
961 | MCValue Target; |
962 | uint64_t Value; |
963 | bool WasForced; |
964 | bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, |
965 | STI: DF->getSubtargetInfo(), Value, WasForced); |
966 | if (Target.getSymA() && |
967 | Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && |
968 | Fixup.getKind() == FK_Data_1) |
969 | return false; |
970 | return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF, |
971 | Layout, WasForced); |
972 | } |
973 | |
974 | bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, |
975 | const MCAsmLayout &Layout) const { |
976 | assert(getBackendPtr() && "Expected assembler backend" ); |
977 | // If this inst doesn't ever need relaxation, ignore it. This occurs when we |
978 | // are intentionally pushing out inst fragments, or because we relaxed a |
979 | // previous instruction to one that doesn't need relaxation. |
980 | if (!getBackend().mayNeedRelaxation(Inst: F->getInst(), STI: *F->getSubtargetInfo())) |
981 | return false; |
982 | |
983 | for (const MCFixup &Fixup : F->getFixups()) |
984 | if (fixupNeedsRelaxation(Fixup, DF: F, Layout)) |
985 | return true; |
986 | |
987 | return false; |
988 | } |
989 | |
990 | bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, |
991 | MCRelaxableFragment &F) { |
992 | assert(getEmitterPtr() && |
993 | "Expected CodeEmitter defined for relaxInstruction" ); |
994 | if (!fragmentNeedsRelaxation(F: &F, Layout)) |
995 | return false; |
996 | |
997 | ++stats::RelaxedInstructions; |
998 | |
999 | // FIXME-PERF: We could immediately lower out instructions if we can tell |
1000 | // they are fully resolved, to avoid retesting on later passes. |
1001 | |
1002 | // Relax the fragment. |
1003 | |
1004 | MCInst Relaxed = F.getInst(); |
1005 | getBackend().relaxInstruction(Inst&: Relaxed, STI: *F.getSubtargetInfo()); |
1006 | |
1007 | // Encode the new instruction. |
1008 | F.setInst(Relaxed); |
1009 | F.getFixups().clear(); |
1010 | F.getContents().clear(); |
1011 | getEmitter().encodeInstruction(Inst: Relaxed, CB&: F.getContents(), Fixups&: F.getFixups(), |
1012 | STI: *F.getSubtargetInfo()); |
1013 | return true; |
1014 | } |
1015 | |
1016 | bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { |
1017 | const unsigned OldSize = static_cast<unsigned>(LF.getContents().size()); |
1018 | unsigned PadTo = OldSize; |
1019 | int64_t Value; |
1020 | SmallVectorImpl<char> &Data = LF.getContents(); |
1021 | LF.getFixups().clear(); |
1022 | // Use evaluateKnownAbsolute for Mach-O as a hack: .subsections_via_symbols |
1023 | // requires that .uleb128 A-B is foldable where A and B reside in different |
1024 | // fragments. This is used by __gcc_except_table. |
1025 | bool Abs = getSubsectionsViaSymbols() |
1026 | ? LF.getValue().evaluateKnownAbsolute(Res&: Value, Layout) |
1027 | : LF.getValue().evaluateAsAbsolute(Res&: Value, Layout); |
1028 | if (!Abs) { |
1029 | bool Relaxed, UseZeroPad; |
1030 | std::tie(args&: Relaxed, args&: UseZeroPad) = getBackend().relaxLEB128(LF, Layout, Value); |
1031 | if (!Relaxed) { |
1032 | getContext().reportError(L: LF.getValue().getLoc(), |
1033 | Msg: Twine(LF.isSigned() ? ".s" : ".u" ) + |
1034 | "leb128 expression is not absolute" ); |
1035 | LF.setValue(MCConstantExpr::create(Value: 0, Ctx&: Context)); |
1036 | } |
1037 | uint8_t Tmp[10]; // maximum size: ceil(64/7) |
1038 | PadTo = std::max(a: PadTo, b: encodeULEB128(Value: uint64_t(Value), p: Tmp)); |
1039 | if (UseZeroPad) |
1040 | Value = 0; |
1041 | } |
1042 | Data.clear(); |
1043 | raw_svector_ostream OSE(Data); |
1044 | // The compiler can generate EH table assembly that is impossible to assemble |
1045 | // without either adding padding to an LEB fragment or adding extra padding |
1046 | // to a later alignment fragment. To accommodate such tables, relaxation can |
1047 | // only increase an LEB fragment size here, not decrease it. See PR35809. |
1048 | if (LF.isSigned()) |
1049 | encodeSLEB128(Value, OS&: OSE, PadTo); |
1050 | else |
1051 | encodeULEB128(Value, OS&: OSE, PadTo); |
1052 | return OldSize != LF.getContents().size(); |
1053 | } |
1054 | |
1055 | /// Check if the branch crosses the boundary. |
1056 | /// |
1057 | /// \param StartAddr start address of the fused/unfused branch. |
1058 | /// \param Size size of the fused/unfused branch. |
1059 | /// \param BoundaryAlignment alignment requirement of the branch. |
1060 | /// \returns true if the branch cross the boundary. |
1061 | static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, |
1062 | Align BoundaryAlignment) { |
1063 | uint64_t EndAddr = StartAddr + Size; |
1064 | return (StartAddr >> Log2(A: BoundaryAlignment)) != |
1065 | ((EndAddr - 1) >> Log2(A: BoundaryAlignment)); |
1066 | } |
1067 | |
1068 | /// Check if the branch is against the boundary. |
1069 | /// |
1070 | /// \param StartAddr start address of the fused/unfused branch. |
1071 | /// \param Size size of the fused/unfused branch. |
1072 | /// \param BoundaryAlignment alignment requirement of the branch. |
1073 | /// \returns true if the branch is against the boundary. |
1074 | static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, |
1075 | Align BoundaryAlignment) { |
1076 | uint64_t EndAddr = StartAddr + Size; |
1077 | return (EndAddr & (BoundaryAlignment.value() - 1)) == 0; |
1078 | } |
1079 | |
1080 | /// Check if the branch needs padding. |
1081 | /// |
1082 | /// \param StartAddr start address of the fused/unfused branch. |
1083 | /// \param Size size of the fused/unfused branch. |
1084 | /// \param BoundaryAlignment alignment requirement of the branch. |
1085 | /// \returns true if the branch needs padding. |
1086 | static bool needPadding(uint64_t StartAddr, uint64_t Size, |
1087 | Align BoundaryAlignment) { |
1088 | return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) || |
1089 | isAgainstBoundary(StartAddr, Size, BoundaryAlignment); |
1090 | } |
1091 | |
1092 | bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout, |
1093 | MCBoundaryAlignFragment &BF) { |
1094 | // BoundaryAlignFragment that doesn't need to align any fragment should not be |
1095 | // relaxed. |
1096 | if (!BF.getLastFragment()) |
1097 | return false; |
1098 | |
1099 | uint64_t AlignedOffset = Layout.getFragmentOffset(F: &BF); |
1100 | uint64_t AlignedSize = 0; |
1101 | for (const MCFragment *F = BF.getLastFragment(); F != &BF; |
1102 | F = F->getPrevNode()) |
1103 | AlignedSize += computeFragmentSize(Layout, F: *F); |
1104 | |
1105 | Align BoundaryAlignment = BF.getAlignment(); |
1106 | uint64_t NewSize = needPadding(StartAddr: AlignedOffset, Size: AlignedSize, BoundaryAlignment) |
1107 | ? offsetToAlignment(Value: AlignedOffset, Alignment: BoundaryAlignment) |
1108 | : 0U; |
1109 | if (NewSize == BF.getSize()) |
1110 | return false; |
1111 | BF.setSize(NewSize); |
1112 | Layout.invalidateFragmentsFrom(F: &BF); |
1113 | return true; |
1114 | } |
1115 | |
1116 | bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, |
1117 | MCDwarfLineAddrFragment &DF) { |
1118 | |
1119 | bool WasRelaxed; |
1120 | if (getBackend().relaxDwarfLineAddr(DF, Layout, WasRelaxed)) |
1121 | return WasRelaxed; |
1122 | |
1123 | MCContext &Context = Layout.getAssembler().getContext(); |
1124 | uint64_t OldSize = DF.getContents().size(); |
1125 | int64_t AddrDelta; |
1126 | bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(Res&: AddrDelta, Layout); |
1127 | assert(Abs && "We created a line delta with an invalid expression" ); |
1128 | (void)Abs; |
1129 | int64_t LineDelta; |
1130 | LineDelta = DF.getLineDelta(); |
1131 | SmallVectorImpl<char> &Data = DF.getContents(); |
1132 | Data.clear(); |
1133 | DF.getFixups().clear(); |
1134 | |
1135 | MCDwarfLineAddr::encode(Context, Params: getDWARFLinetableParams(), LineDelta, |
1136 | AddrDelta, OS&: Data); |
1137 | return OldSize != Data.size(); |
1138 | } |
1139 | |
1140 | bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, |
1141 | MCDwarfCallFrameFragment &DF) { |
1142 | bool WasRelaxed; |
1143 | if (getBackend().relaxDwarfCFA(DF, Layout, WasRelaxed)) |
1144 | return WasRelaxed; |
1145 | |
1146 | MCContext &Context = Layout.getAssembler().getContext(); |
1147 | int64_t Value; |
1148 | bool Abs = DF.getAddrDelta().evaluateAsAbsolute(Res&: Value, Layout); |
1149 | if (!Abs) { |
1150 | getContext().reportError(L: DF.getAddrDelta().getLoc(), |
1151 | Msg: "invalid CFI advance_loc expression" ); |
1152 | DF.setAddrDelta(MCConstantExpr::create(Value: 0, Ctx&: Context)); |
1153 | return false; |
1154 | } |
1155 | |
1156 | SmallVectorImpl<char> &Data = DF.getContents(); |
1157 | uint64_t OldSize = Data.size(); |
1158 | Data.clear(); |
1159 | DF.getFixups().clear(); |
1160 | |
1161 | MCDwarfFrameEmitter::encodeAdvanceLoc(Context, AddrDelta: Value, OS&: Data); |
1162 | return OldSize != Data.size(); |
1163 | } |
1164 | |
1165 | bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout, |
1166 | MCCVInlineLineTableFragment &F) { |
1167 | unsigned OldSize = F.getContents().size(); |
1168 | getContext().getCVContext().encodeInlineLineTable(Layout, F); |
1169 | return OldSize != F.getContents().size(); |
1170 | } |
1171 | |
1172 | bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout, |
1173 | MCCVDefRangeFragment &F) { |
1174 | unsigned OldSize = F.getContents().size(); |
1175 | getContext().getCVContext().encodeDefRange(Layout, F); |
1176 | return OldSize != F.getContents().size(); |
1177 | } |
1178 | |
1179 | bool MCAssembler::relaxPseudoProbeAddr(MCAsmLayout &Layout, |
1180 | MCPseudoProbeAddrFragment &PF) { |
1181 | uint64_t OldSize = PF.getContents().size(); |
1182 | int64_t AddrDelta; |
1183 | bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(Res&: AddrDelta, Layout); |
1184 | assert(Abs && "We created a pseudo probe with an invalid expression" ); |
1185 | (void)Abs; |
1186 | SmallVectorImpl<char> &Data = PF.getContents(); |
1187 | Data.clear(); |
1188 | raw_svector_ostream OSE(Data); |
1189 | PF.getFixups().clear(); |
1190 | |
1191 | // AddrDelta is a signed integer |
1192 | encodeSLEB128(Value: AddrDelta, OS&: OSE, PadTo: OldSize); |
1193 | return OldSize != Data.size(); |
1194 | } |
1195 | |
1196 | bool MCAssembler::relaxFragment(MCAsmLayout &Layout, MCFragment &F) { |
1197 | switch(F.getKind()) { |
1198 | default: |
1199 | return false; |
1200 | case MCFragment::FT_Relaxable: |
1201 | assert(!getRelaxAll() && |
1202 | "Did not expect a MCRelaxableFragment in RelaxAll mode" ); |
1203 | return relaxInstruction(Layout, F&: cast<MCRelaxableFragment>(Val&: F)); |
1204 | case MCFragment::FT_Dwarf: |
1205 | return relaxDwarfLineAddr(Layout, DF&: cast<MCDwarfLineAddrFragment>(Val&: F)); |
1206 | case MCFragment::FT_DwarfFrame: |
1207 | return relaxDwarfCallFrameFragment(Layout, |
1208 | DF&: cast<MCDwarfCallFrameFragment>(Val&: F)); |
1209 | case MCFragment::FT_LEB: |
1210 | return relaxLEB(Layout, LF&: cast<MCLEBFragment>(Val&: F)); |
1211 | case MCFragment::FT_BoundaryAlign: |
1212 | return relaxBoundaryAlign(Layout, BF&: cast<MCBoundaryAlignFragment>(Val&: F)); |
1213 | case MCFragment::FT_CVInlineLines: |
1214 | return relaxCVInlineLineTable(Layout, F&: cast<MCCVInlineLineTableFragment>(Val&: F)); |
1215 | case MCFragment::FT_CVDefRange: |
1216 | return relaxCVDefRange(Layout, F&: cast<MCCVDefRangeFragment>(Val&: F)); |
1217 | case MCFragment::FT_PseudoProbe: |
1218 | return relaxPseudoProbeAddr(Layout, PF&: cast<MCPseudoProbeAddrFragment>(Val&: F)); |
1219 | } |
1220 | } |
1221 | |
1222 | bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) { |
1223 | // Holds the first fragment which needed relaxing during this layout. It will |
1224 | // remain NULL if none were relaxed. |
1225 | // When a fragment is relaxed, all the fragments following it should get |
1226 | // invalidated because their offset is going to change. |
1227 | MCFragment *FirstRelaxedFragment = nullptr; |
1228 | |
1229 | // Attempt to relax all the fragments in the section. |
1230 | for (MCFragment &Frag : Sec) { |
1231 | // Check if this is a fragment that needs relaxation. |
1232 | bool RelaxedFrag = relaxFragment(Layout, F&: Frag); |
1233 | if (RelaxedFrag && !FirstRelaxedFragment) |
1234 | FirstRelaxedFragment = &Frag; |
1235 | } |
1236 | if (FirstRelaxedFragment) { |
1237 | Layout.invalidateFragmentsFrom(F: FirstRelaxedFragment); |
1238 | return true; |
1239 | } |
1240 | return false; |
1241 | } |
1242 | |
1243 | bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { |
1244 | ++stats::RelaxationSteps; |
1245 | |
1246 | bool WasRelaxed = false; |
1247 | for (MCSection &Sec : *this) { |
1248 | while (layoutSectionOnce(Layout, Sec)) |
1249 | WasRelaxed = true; |
1250 | } |
1251 | |
1252 | return WasRelaxed; |
1253 | } |
1254 | |
1255 | void MCAssembler::finishLayout(MCAsmLayout &Layout) { |
1256 | assert(getBackendPtr() && "Expected assembler backend" ); |
1257 | // The layout is done. Mark every fragment as valid. |
1258 | for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { |
1259 | MCSection &Section = *Layout.getSectionOrder()[i]; |
1260 | Layout.getFragmentOffset(F: &*Section.getFragmentList().rbegin()); |
1261 | computeFragmentSize(Layout, F: *Section.getFragmentList().rbegin()); |
1262 | } |
1263 | getBackend().finishLayout(Asm: *this, Layout); |
1264 | } |
1265 | |
1266 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
1267 | LLVM_DUMP_METHOD void MCAssembler::dump() const{ |
1268 | raw_ostream &OS = errs(); |
1269 | |
1270 | OS << "<MCAssembler\n" ; |
1271 | OS << " Sections:[\n " ; |
1272 | for (const_iterator it = begin(), ie = end(); it != ie; ++it) { |
1273 | if (it != begin()) OS << ",\n " ; |
1274 | it->dump(); |
1275 | } |
1276 | OS << "],\n" ; |
1277 | OS << " Symbols:[" ; |
1278 | |
1279 | for (const_symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) { |
1280 | if (it != symbol_begin()) OS << ",\n " ; |
1281 | OS << "(" ; |
1282 | it->dump(); |
1283 | OS << ", Index:" << it->getIndex() << ", " ; |
1284 | OS << ")" ; |
1285 | } |
1286 | OS << "]>\n" ; |
1287 | } |
1288 | #endif |
1289 | |