1//===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares several CodeGen-specific LLVM IR analysis utilities.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CODEGEN_ANALYSIS_H
14#define LLVM_CODEGEN_ANALYSIS_H
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/CodeGen/ISDOpcodes.h"
19#include "llvm/IR/Instructions.h"
20
21namespace llvm {
22template <typename T> class SmallVectorImpl;
23class GlobalValue;
24class LLT;
25class MachineBasicBlock;
26class MachineFunction;
27class TargetLoweringBase;
28class TargetLowering;
29class TargetMachine;
30struct EVT;
31
32/// Compute the linearized index of a member in a nested
33/// aggregate/struct/array.
34///
35/// Given an LLVM IR aggregate type and a sequence of insertvalue or
36/// extractvalue indices that identify a member, return the linearized index of
37/// the start of the member, i.e the number of element in memory before the
38/// sought one. This is disconnected from the number of bytes.
39///
40/// \param Ty is the type indexed by \p Indices.
41/// \param Indices is an optional pointer in the indices list to the current
42/// index.
43/// \param IndicesEnd is the end of the indices list.
44/// \param CurIndex is the current index in the recursion.
45///
46/// \returns \p CurIndex plus the linear index in \p Ty the indices list.
47unsigned ComputeLinearIndex(Type *Ty,
48 const unsigned *Indices,
49 const unsigned *IndicesEnd,
50 unsigned CurIndex = 0);
51
52inline unsigned ComputeLinearIndex(Type *Ty,
53 ArrayRef<unsigned> Indices,
54 unsigned CurIndex = 0) {
55 return ComputeLinearIndex(Ty, Indices: Indices.begin(), IndicesEnd: Indices.end(), CurIndex);
56}
57
58/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
59/// EVTs that represent all the individual underlying
60/// non-aggregate types that comprise it.
61///
62/// If Offsets is non-null, it points to a vector to be filled in
63/// with the in-memory offsets of each of the individual values.
64///
65void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
66 SmallVectorImpl<EVT> &ValueVTs,
67 SmallVectorImpl<TypeSize> *Offsets,
68 TypeSize StartingOffset);
69void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
70 SmallVectorImpl<EVT> &ValueVTs,
71 SmallVectorImpl<TypeSize> *Offsets = nullptr,
72 uint64_t StartingOffset = 0);
73void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
74 SmallVectorImpl<EVT> &ValueVTs,
75 SmallVectorImpl<uint64_t> *FixedOffsets,
76 uint64_t StartingOffset);
77
78/// Variant of ComputeValueVTs that also produces the memory VTs.
79void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
80 SmallVectorImpl<EVT> &ValueVTs,
81 SmallVectorImpl<EVT> *MemVTs,
82 SmallVectorImpl<TypeSize> *Offsets,
83 TypeSize StartingOffset);
84void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
85 SmallVectorImpl<EVT> &ValueVTs,
86 SmallVectorImpl<EVT> *MemVTs,
87 SmallVectorImpl<TypeSize> *Offsets = nullptr,
88 uint64_t StartingOffset = 0);
89void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
90 SmallVectorImpl<EVT> &ValueVTs,
91 SmallVectorImpl<EVT> *MemVTs,
92 SmallVectorImpl<uint64_t> *FixedOffsets,
93 uint64_t StartingOffset);
94
95/// computeValueLLTs - Given an LLVM IR type, compute a sequence of
96/// LLTs that represent all the individual underlying
97/// non-aggregate types that comprise it.
98///
99/// If Offsets is non-null, it points to a vector to be filled in
100/// with the in-memory offsets of each of the individual values.
101///
102void computeValueLLTs(const DataLayout &DL, Type &Ty,
103 SmallVectorImpl<LLT> &ValueTys,
104 SmallVectorImpl<uint64_t> *Offsets = nullptr,
105 uint64_t StartingOffset = 0);
106
107/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
108GlobalValue *ExtractTypeInfo(Value *V);
109
110/// getFCmpCondCode - Return the ISD condition code corresponding to
111/// the given LLVM IR floating-point condition code. This includes
112/// consideration of global floating-point math flags.
113///
114ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
115
116/// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats,
117/// return the equivalent code if we're allowed to assume that NaNs won't occur.
118ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC);
119
120/// getICmpCondCode - Return the ISD condition code corresponding to
121/// the given LLVM IR integer condition code.
122ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
123
124/// getICmpCondCode - Return the LLVM IR integer condition code
125/// corresponding to the given ISD integer condition code.
126ICmpInst::Predicate getICmpCondCode(ISD::CondCode Pred);
127
128/// Test if the given instruction is in a position to be optimized
129/// with a tail-call. This roughly means that it's in a block with
130/// a return and there's nothing that needs to be scheduled
131/// between it and the return.
132///
133/// This function only tests target-independent requirements.
134bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM);
135
136/// Test if given that the input instruction is in the tail call position, if
137/// there is an attribute mismatch between the caller and the callee that will
138/// inhibit tail call optimizations.
139/// \p AllowDifferingSizes is an output parameter which, if forming a tail call
140/// is permitted, determines whether it's permitted only if the size of the
141/// caller's and callee's return types match exactly.
142bool attributesPermitTailCall(const Function *F, const Instruction *I,
143 const ReturnInst *Ret,
144 const TargetLoweringBase &TLI,
145 bool *AllowDifferingSizes = nullptr);
146
147/// Test if given that the input instruction is in the tail call position if the
148/// return type or any attributes of the function will inhibit tail call
149/// optimization.
150bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I,
151 const ReturnInst *Ret,
152 const TargetLoweringBase &TLI);
153
154DenseMap<const MachineBasicBlock *, int>
155getEHScopeMembership(const MachineFunction &MF);
156
157} // End llvm namespace
158
159#endif
160

source code of llvm/include/llvm/CodeGen/Analysis.h