1 | //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the IRBuilder class, which is used as a convenient way |
10 | // to create LLVM instructions with a consistent and simplified interface. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "llvm/IR/IRBuilder.h" |
15 | #include "llvm/ADT/ArrayRef.h" |
16 | #include "llvm/IR/Constant.h" |
17 | #include "llvm/IR/Constants.h" |
18 | #include "llvm/IR/DebugInfoMetadata.h" |
19 | #include "llvm/IR/DerivedTypes.h" |
20 | #include "llvm/IR/Function.h" |
21 | #include "llvm/IR/GlobalValue.h" |
22 | #include "llvm/IR/GlobalVariable.h" |
23 | #include "llvm/IR/IntrinsicInst.h" |
24 | #include "llvm/IR/Intrinsics.h" |
25 | #include "llvm/IR/LLVMContext.h" |
26 | #include "llvm/IR/NoFolder.h" |
27 | #include "llvm/IR/Operator.h" |
28 | #include "llvm/IR/Statepoint.h" |
29 | #include "llvm/IR/Type.h" |
30 | #include "llvm/IR/Value.h" |
31 | #include "llvm/Support/Casting.h" |
32 | #include <cassert> |
33 | #include <cstdint> |
34 | #include <optional> |
35 | #include <vector> |
36 | |
37 | using namespace llvm; |
38 | |
39 | /// CreateGlobalString - Make a new global variable with an initializer that |
40 | /// has array of i8 type filled in with the nul terminated string value |
41 | /// specified. If Name is specified, it is the name of the global variable |
42 | /// created. |
43 | GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str, |
44 | const Twine &Name, |
45 | unsigned AddressSpace, |
46 | Module *M) { |
47 | Constant *StrConstant = ConstantDataArray::getString(Context, Initializer: Str); |
48 | if (!M) |
49 | M = BB->getParent()->getParent(); |
50 | auto *GV = new GlobalVariable( |
51 | *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage, |
52 | StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace); |
53 | GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); |
54 | GV->setAlignment(Align(1)); |
55 | return GV; |
56 | } |
57 | |
58 | Type *IRBuilderBase::getCurrentFunctionReturnType() const { |
59 | assert(BB && BB->getParent() && "No current function!" ); |
60 | return BB->getParent()->getReturnType(); |
61 | } |
62 | |
63 | DebugLoc IRBuilderBase::getCurrentDebugLocation() const { |
64 | for (auto &KV : MetadataToCopy) |
65 | if (KV.first == LLVMContext::MD_dbg) |
66 | return {cast<DILocation>(Val: KV.second)}; |
67 | |
68 | return {}; |
69 | } |
70 | void IRBuilderBase::SetInstDebugLocation(Instruction *I) const { |
71 | for (const auto &KV : MetadataToCopy) |
72 | if (KV.first == LLVMContext::MD_dbg) { |
73 | I->setDebugLoc(DebugLoc(KV.second)); |
74 | return; |
75 | } |
76 | } |
77 | |
78 | CallInst * |
79 | IRBuilderBase::createCallHelper(Function *Callee, ArrayRef<Value *> Ops, |
80 | const Twine &Name, Instruction *FMFSource, |
81 | ArrayRef<OperandBundleDef> OpBundles) { |
82 | CallInst *CI = CreateCall(Callee, Args: Ops, OpBundles, Name); |
83 | if (FMFSource) |
84 | CI->copyFastMathFlags(I: FMFSource); |
85 | return CI; |
86 | } |
87 | |
88 | Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) { |
89 | assert(isa<ConstantInt>(Scaling) && "Expected constant integer" ); |
90 | if (cast<ConstantInt>(Val: Scaling)->isZero()) |
91 | return Scaling; |
92 | Module *M = GetInsertBlock()->getParent()->getParent(); |
93 | Function *TheFn = |
94 | Intrinsic::getDeclaration(M, Intrinsic::id: vscale, Tys: {Scaling->getType()}); |
95 | CallInst *CI = CreateCall(Callee: TheFn, Args: {}, OpBundles: {}, Name); |
96 | return cast<ConstantInt>(Val: Scaling)->isOne() ? CI : CreateMul(LHS: CI, RHS: Scaling); |
97 | } |
98 | |
99 | Value *IRBuilderBase::CreateElementCount(Type *DstType, ElementCount EC) { |
100 | Constant *MinEC = ConstantInt::get(Ty: DstType, V: EC.getKnownMinValue()); |
101 | return EC.isScalable() ? CreateVScale(Scaling: MinEC) : MinEC; |
102 | } |
103 | |
104 | Value *IRBuilderBase::CreateTypeSize(Type *DstType, TypeSize Size) { |
105 | Constant *MinSize = ConstantInt::get(Ty: DstType, V: Size.getKnownMinValue()); |
106 | return Size.isScalable() ? CreateVScale(Scaling: MinSize) : MinSize; |
107 | } |
108 | |
109 | Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) { |
110 | Type *STy = DstType->getScalarType(); |
111 | if (isa<ScalableVectorType>(Val: DstType)) { |
112 | Type *StepVecType = DstType; |
113 | // TODO: We expect this special case (element type < 8 bits) to be |
114 | // temporary - once the intrinsic properly supports < 8 bits this code |
115 | // can be removed. |
116 | if (STy->getScalarSizeInBits() < 8) |
117 | StepVecType = |
118 | VectorType::get(ElementType: getInt8Ty(), Other: cast<ScalableVectorType>(Val: DstType)); |
119 | Value *Res = CreateIntrinsic(Intrinsic::experimental_stepvector, |
120 | {StepVecType}, {}, nullptr, Name); |
121 | if (StepVecType != DstType) |
122 | Res = CreateTrunc(V: Res, DestTy: DstType); |
123 | return Res; |
124 | } |
125 | |
126 | unsigned NumEls = cast<FixedVectorType>(Val: DstType)->getNumElements(); |
127 | |
128 | // Create a vector of consecutive numbers from zero to VF. |
129 | SmallVector<Constant *, 8> Indices; |
130 | for (unsigned i = 0; i < NumEls; ++i) |
131 | Indices.push_back(Elt: ConstantInt::get(Ty: STy, V: i)); |
132 | |
133 | // Add the consecutive indices to the vector value. |
134 | return ConstantVector::get(V: Indices); |
135 | } |
136 | |
137 | CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size, |
138 | MaybeAlign Align, bool isVolatile, |
139 | MDNode *TBAATag, MDNode *ScopeTag, |
140 | MDNode *NoAliasTag) { |
141 | Value *Ops[] = {Ptr, Val, Size, getInt1(V: isVolatile)}; |
142 | Type *Tys[] = { Ptr->getType(), Size->getType() }; |
143 | Module *M = BB->getParent()->getParent(); |
144 | Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::id: memset, Tys); |
145 | |
146 | CallInst *CI = CreateCall(Callee: TheFn, Args: Ops); |
147 | |
148 | if (Align) |
149 | cast<MemSetInst>(Val: CI)->setDestAlignment(*Align); |
150 | |
151 | // Set the TBAA info if present. |
152 | if (TBAATag) |
153 | CI->setMetadata(KindID: LLVMContext::MD_tbaa, Node: TBAATag); |
154 | |
155 | if (ScopeTag) |
156 | CI->setMetadata(KindID: LLVMContext::MD_alias_scope, Node: ScopeTag); |
157 | |
158 | if (NoAliasTag) |
159 | CI->setMetadata(KindID: LLVMContext::MD_noalias, Node: NoAliasTag); |
160 | |
161 | return CI; |
162 | } |
163 | |
164 | CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, |
165 | Value *Val, Value *Size, |
166 | bool IsVolatile, MDNode *TBAATag, |
167 | MDNode *ScopeTag, |
168 | MDNode *NoAliasTag) { |
169 | Value *Ops[] = {Dst, Val, Size, getInt1(V: IsVolatile)}; |
170 | Type *Tys[] = {Dst->getType(), Size->getType()}; |
171 | Module *M = BB->getParent()->getParent(); |
172 | Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::id: memset_inline, Tys); |
173 | |
174 | CallInst *CI = CreateCall(Callee: TheFn, Args: Ops); |
175 | |
176 | if (DstAlign) |
177 | cast<MemSetInlineInst>(Val: CI)->setDestAlignment(*DstAlign); |
178 | |
179 | // Set the TBAA info if present. |
180 | if (TBAATag) |
181 | CI->setMetadata(KindID: LLVMContext::MD_tbaa, Node: TBAATag); |
182 | |
183 | if (ScopeTag) |
184 | CI->setMetadata(KindID: LLVMContext::MD_alias_scope, Node: ScopeTag); |
185 | |
186 | if (NoAliasTag) |
187 | CI->setMetadata(KindID: LLVMContext::MD_noalias, Node: NoAliasTag); |
188 | |
189 | return CI; |
190 | } |
191 | |
192 | CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet( |
193 | Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize, |
194 | MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) { |
195 | |
196 | Value *Ops[] = {Ptr, Val, Size, getInt32(C: ElementSize)}; |
197 | Type *Tys[] = {Ptr->getType(), Size->getType()}; |
198 | Module *M = BB->getParent()->getParent(); |
199 | Function *TheFn = Intrinsic::getDeclaration( |
200 | M, Intrinsic::id: memset_element_unordered_atomic, Tys); |
201 | |
202 | CallInst *CI = CreateCall(Callee: TheFn, Args: Ops); |
203 | |
204 | cast<AtomicMemSetInst>(Val: CI)->setDestAlignment(Alignment); |
205 | |
206 | // Set the TBAA info if present. |
207 | if (TBAATag) |
208 | CI->setMetadata(KindID: LLVMContext::MD_tbaa, Node: TBAATag); |
209 | |
210 | if (ScopeTag) |
211 | CI->setMetadata(KindID: LLVMContext::MD_alias_scope, Node: ScopeTag); |
212 | |
213 | if (NoAliasTag) |
214 | CI->setMetadata(KindID: LLVMContext::MD_noalias, Node: NoAliasTag); |
215 | |
216 | return CI; |
217 | } |
218 | |
219 | CallInst *IRBuilderBase::CreateMemTransferInst( |
220 | Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, |
221 | MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag, |
222 | MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) { |
223 | assert((IntrID == Intrinsic::memcpy || IntrID == Intrinsic::memcpy_inline || |
224 | IntrID == Intrinsic::memmove) && |
225 | "Unexpected intrinsic ID" ); |
226 | Value *Ops[] = {Dst, Src, Size, getInt1(V: isVolatile)}; |
227 | Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() }; |
228 | Module *M = BB->getParent()->getParent(); |
229 | Function *TheFn = Intrinsic::getDeclaration(M, id: IntrID, Tys); |
230 | |
231 | CallInst *CI = CreateCall(Callee: TheFn, Args: Ops); |
232 | |
233 | auto* MCI = cast<MemTransferInst>(Val: CI); |
234 | if (DstAlign) |
235 | MCI->setDestAlignment(*DstAlign); |
236 | if (SrcAlign) |
237 | MCI->setSourceAlignment(*SrcAlign); |
238 | |
239 | // Set the TBAA info if present. |
240 | if (TBAATag) |
241 | CI->setMetadata(KindID: LLVMContext::MD_tbaa, Node: TBAATag); |
242 | |
243 | // Set the TBAA Struct info if present. |
244 | if (TBAAStructTag) |
245 | CI->setMetadata(KindID: LLVMContext::MD_tbaa_struct, Node: TBAAStructTag); |
246 | |
247 | if (ScopeTag) |
248 | CI->setMetadata(KindID: LLVMContext::MD_alias_scope, Node: ScopeTag); |
249 | |
250 | if (NoAliasTag) |
251 | CI->setMetadata(KindID: LLVMContext::MD_noalias, Node: NoAliasTag); |
252 | |
253 | return CI; |
254 | } |
255 | |
256 | CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy( |
257 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, |
258 | uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag, |
259 | MDNode *ScopeTag, MDNode *NoAliasTag) { |
260 | assert(DstAlign >= ElementSize && |
261 | "Pointer alignment must be at least element size" ); |
262 | assert(SrcAlign >= ElementSize && |
263 | "Pointer alignment must be at least element size" ); |
264 | Value *Ops[] = {Dst, Src, Size, getInt32(C: ElementSize)}; |
265 | Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; |
266 | Module *M = BB->getParent()->getParent(); |
267 | Function *TheFn = Intrinsic::getDeclaration( |
268 | M, Intrinsic::id: memcpy_element_unordered_atomic, Tys); |
269 | |
270 | CallInst *CI = CreateCall(Callee: TheFn, Args: Ops); |
271 | |
272 | // Set the alignment of the pointer args. |
273 | auto *AMCI = cast<AtomicMemCpyInst>(Val: CI); |
274 | AMCI->setDestAlignment(DstAlign); |
275 | AMCI->setSourceAlignment(SrcAlign); |
276 | |
277 | // Set the TBAA info if present. |
278 | if (TBAATag) |
279 | CI->setMetadata(KindID: LLVMContext::MD_tbaa, Node: TBAATag); |
280 | |
281 | // Set the TBAA Struct info if present. |
282 | if (TBAAStructTag) |
283 | CI->setMetadata(KindID: LLVMContext::MD_tbaa_struct, Node: TBAAStructTag); |
284 | |
285 | if (ScopeTag) |
286 | CI->setMetadata(KindID: LLVMContext::MD_alias_scope, Node: ScopeTag); |
287 | |
288 | if (NoAliasTag) |
289 | CI->setMetadata(KindID: LLVMContext::MD_noalias, Node: NoAliasTag); |
290 | |
291 | return CI; |
292 | } |
293 | |
294 | /// isConstantOne - Return true only if val is constant int 1 |
295 | static bool isConstantOne(const Value *Val) { |
296 | assert(Val && "isConstantOne does not work with nullptr Val" ); |
297 | const ConstantInt *CVal = dyn_cast<ConstantInt>(Val); |
298 | return CVal && CVal->isOne(); |
299 | } |
300 | |
301 | CallInst *IRBuilderBase::CreateMalloc(Type *IntPtrTy, Type *AllocTy, |
302 | Value *AllocSize, Value *ArraySize, |
303 | ArrayRef<OperandBundleDef> OpB, |
304 | Function *MallocF, const Twine &Name) { |
305 | // malloc(type) becomes: |
306 | // i8* malloc(typeSize) |
307 | // malloc(type, arraySize) becomes: |
308 | // i8* malloc(typeSize*arraySize) |
309 | if (!ArraySize) |
310 | ArraySize = ConstantInt::get(Ty: IntPtrTy, V: 1); |
311 | else if (ArraySize->getType() != IntPtrTy) |
312 | ArraySize = CreateIntCast(V: ArraySize, DestTy: IntPtrTy, isSigned: false); |
313 | |
314 | if (!isConstantOne(Val: ArraySize)) { |
315 | if (isConstantOne(Val: AllocSize)) { |
316 | AllocSize = ArraySize; // Operand * 1 = Operand |
317 | } else { |
318 | // Multiply type size by the array size... |
319 | AllocSize = CreateMul(LHS: ArraySize, RHS: AllocSize, Name: "mallocsize" ); |
320 | } |
321 | } |
322 | |
323 | assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size" ); |
324 | // Create the call to Malloc. |
325 | Module *M = BB->getParent()->getParent(); |
326 | Type *BPTy = PointerType::getUnqual(C&: Context); |
327 | FunctionCallee MallocFunc = MallocF; |
328 | if (!MallocFunc) |
329 | // prototype malloc as "void *malloc(size_t)" |
330 | MallocFunc = M->getOrInsertFunction(Name: "malloc" , RetTy: BPTy, Args: IntPtrTy); |
331 | CallInst *MCall = CreateCall(Callee: MallocFunc, Args: AllocSize, OpBundles: OpB, Name); |
332 | |
333 | MCall->setTailCall(); |
334 | if (Function *F = dyn_cast<Function>(Val: MallocFunc.getCallee())) { |
335 | MCall->setCallingConv(F->getCallingConv()); |
336 | F->setReturnDoesNotAlias(); |
337 | } |
338 | |
339 | assert(!MCall->getType()->isVoidTy() && "Malloc has void return type" ); |
340 | |
341 | return MCall; |
342 | } |
343 | |
344 | CallInst *IRBuilderBase::CreateMalloc(Type *IntPtrTy, Type *AllocTy, |
345 | Value *AllocSize, Value *ArraySize, |
346 | Function *MallocF, const Twine &Name) { |
347 | |
348 | return CreateMalloc(IntPtrTy, AllocTy, AllocSize, ArraySize, OpB: std::nullopt, |
349 | MallocF, Name); |
350 | } |
351 | |
352 | /// CreateFree - Generate the IR for a call to the builtin free function. |
353 | CallInst *IRBuilderBase::CreateFree(Value *Source, |
354 | ArrayRef<OperandBundleDef> Bundles) { |
355 | assert(Source->getType()->isPointerTy() && |
356 | "Can not free something of nonpointer type!" ); |
357 | |
358 | Module *M = BB->getParent()->getParent(); |
359 | |
360 | Type *VoidTy = Type::getVoidTy(C&: M->getContext()); |
361 | Type *VoidPtrTy = PointerType::getUnqual(C&: M->getContext()); |
362 | // prototype free as "void free(void*)" |
363 | FunctionCallee FreeFunc = M->getOrInsertFunction(Name: "free" , RetTy: VoidTy, Args: VoidPtrTy); |
364 | CallInst *Result = CreateCall(Callee: FreeFunc, Args: Source, OpBundles: Bundles, Name: "" ); |
365 | Result->setTailCall(); |
366 | if (Function *F = dyn_cast<Function>(Val: FreeFunc.getCallee())) |
367 | Result->setCallingConv(F->getCallingConv()); |
368 | |
369 | return Result; |
370 | } |
371 | |
372 | CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove( |
373 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, |
374 | uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag, |
375 | MDNode *ScopeTag, MDNode *NoAliasTag) { |
376 | assert(DstAlign >= ElementSize && |
377 | "Pointer alignment must be at least element size" ); |
378 | assert(SrcAlign >= ElementSize && |
379 | "Pointer alignment must be at least element size" ); |
380 | Value *Ops[] = {Dst, Src, Size, getInt32(C: ElementSize)}; |
381 | Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()}; |
382 | Module *M = BB->getParent()->getParent(); |
383 | Function *TheFn = Intrinsic::getDeclaration( |
384 | M, Intrinsic::id: memmove_element_unordered_atomic, Tys); |
385 | |
386 | CallInst *CI = CreateCall(Callee: TheFn, Args: Ops); |
387 | |
388 | // Set the alignment of the pointer args. |
389 | CI->addParamAttr(ArgNo: 0, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: DstAlign)); |
390 | CI->addParamAttr(ArgNo: 1, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: SrcAlign)); |
391 | |
392 | // Set the TBAA info if present. |
393 | if (TBAATag) |
394 | CI->setMetadata(KindID: LLVMContext::MD_tbaa, Node: TBAATag); |
395 | |
396 | // Set the TBAA Struct info if present. |
397 | if (TBAAStructTag) |
398 | CI->setMetadata(KindID: LLVMContext::MD_tbaa_struct, Node: TBAAStructTag); |
399 | |
400 | if (ScopeTag) |
401 | CI->setMetadata(KindID: LLVMContext::MD_alias_scope, Node: ScopeTag); |
402 | |
403 | if (NoAliasTag) |
404 | CI->setMetadata(KindID: LLVMContext::MD_noalias, Node: NoAliasTag); |
405 | |
406 | return CI; |
407 | } |
408 | |
409 | CallInst *IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID, Value *Src) { |
410 | Module *M = GetInsertBlock()->getParent()->getParent(); |
411 | Value *Ops[] = {Src}; |
412 | Type *Tys[] = { Src->getType() }; |
413 | auto Decl = Intrinsic::getDeclaration(M, id: ID, Tys); |
414 | return CreateCall(Callee: Decl, Args: Ops); |
415 | } |
416 | |
417 | CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) { |
418 | Module *M = GetInsertBlock()->getParent()->getParent(); |
419 | Value *Ops[] = {Acc, Src}; |
420 | auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd, |
421 | {Src->getType()}); |
422 | return CreateCall(Decl, Ops); |
423 | } |
424 | |
425 | CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) { |
426 | Module *M = GetInsertBlock()->getParent()->getParent(); |
427 | Value *Ops[] = {Acc, Src}; |
428 | auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul, |
429 | {Src->getType()}); |
430 | return CreateCall(Decl, Ops); |
431 | } |
432 | |
433 | CallInst *IRBuilderBase::CreateAddReduce(Value *Src) { |
434 | return getReductionIntrinsic(Intrinsic::ID: vector_reduce_add, Src); |
435 | } |
436 | |
437 | CallInst *IRBuilderBase::CreateMulReduce(Value *Src) { |
438 | return getReductionIntrinsic(Intrinsic::ID: vector_reduce_mul, Src); |
439 | } |
440 | |
441 | CallInst *IRBuilderBase::CreateAndReduce(Value *Src) { |
442 | return getReductionIntrinsic(Intrinsic::ID: vector_reduce_and, Src); |
443 | } |
444 | |
445 | CallInst *IRBuilderBase::CreateOrReduce(Value *Src) { |
446 | return getReductionIntrinsic(Intrinsic::ID: vector_reduce_or, Src); |
447 | } |
448 | |
449 | CallInst *IRBuilderBase::CreateXorReduce(Value *Src) { |
450 | return getReductionIntrinsic(Intrinsic::ID: vector_reduce_xor, Src); |
451 | } |
452 | |
453 | CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) { |
454 | auto ID = |
455 | IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax; |
456 | return getReductionIntrinsic(ID: ID, Src); |
457 | } |
458 | |
459 | CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) { |
460 | auto ID = |
461 | IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin; |
462 | return getReductionIntrinsic(ID: ID, Src); |
463 | } |
464 | |
465 | CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) { |
466 | return getReductionIntrinsic(Intrinsic::ID: vector_reduce_fmax, Src); |
467 | } |
468 | |
469 | CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) { |
470 | return getReductionIntrinsic(Intrinsic::ID: vector_reduce_fmin, Src); |
471 | } |
472 | |
473 | CallInst *IRBuilderBase::CreateFPMaximumReduce(Value *Src) { |
474 | return getReductionIntrinsic(Intrinsic::ID: vector_reduce_fmaximum, Src); |
475 | } |
476 | |
477 | CallInst *IRBuilderBase::CreateFPMinimumReduce(Value *Src) { |
478 | return getReductionIntrinsic(Intrinsic::ID: vector_reduce_fminimum, Src); |
479 | } |
480 | |
481 | CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) { |
482 | assert(isa<PointerType>(Ptr->getType()) && |
483 | "lifetime.start only applies to pointers." ); |
484 | if (!Size) |
485 | Size = getInt64(C: -1); |
486 | else |
487 | assert(Size->getType() == getInt64Ty() && |
488 | "lifetime.start requires the size to be an i64" ); |
489 | Value *Ops[] = { Size, Ptr }; |
490 | Module *M = BB->getParent()->getParent(); |
491 | Function *TheFn = |
492 | Intrinsic::getDeclaration(M, Intrinsic::id: lifetime_start, Tys: {Ptr->getType()}); |
493 | return CreateCall(Callee: TheFn, Args: Ops); |
494 | } |
495 | |
496 | CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) { |
497 | assert(isa<PointerType>(Ptr->getType()) && |
498 | "lifetime.end only applies to pointers." ); |
499 | if (!Size) |
500 | Size = getInt64(C: -1); |
501 | else |
502 | assert(Size->getType() == getInt64Ty() && |
503 | "lifetime.end requires the size to be an i64" ); |
504 | Value *Ops[] = { Size, Ptr }; |
505 | Module *M = BB->getParent()->getParent(); |
506 | Function *TheFn = |
507 | Intrinsic::getDeclaration(M, Intrinsic::id: lifetime_end, Tys: {Ptr->getType()}); |
508 | return CreateCall(Callee: TheFn, Args: Ops); |
509 | } |
510 | |
511 | CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) { |
512 | |
513 | assert(isa<PointerType>(Ptr->getType()) && |
514 | "invariant.start only applies to pointers." ); |
515 | if (!Size) |
516 | Size = getInt64(C: -1); |
517 | else |
518 | assert(Size->getType() == getInt64Ty() && |
519 | "invariant.start requires the size to be an i64" ); |
520 | |
521 | Value *Ops[] = {Size, Ptr}; |
522 | // Fill in the single overloaded type: memory object type. |
523 | Type *ObjectPtr[1] = {Ptr->getType()}; |
524 | Module *M = BB->getParent()->getParent(); |
525 | Function *TheFn = |
526 | Intrinsic::getDeclaration(M, Intrinsic::id: invariant_start, Tys: ObjectPtr); |
527 | return CreateCall(Callee: TheFn, Args: Ops); |
528 | } |
529 | |
530 | static MaybeAlign getAlign(Value *Ptr) { |
531 | if (auto *O = dyn_cast<GlobalObject>(Val: Ptr)) |
532 | return O->getAlign(); |
533 | if (auto *A = dyn_cast<GlobalAlias>(Val: Ptr)) |
534 | return A->getAliaseeObject()->getAlign(); |
535 | return {}; |
536 | } |
537 | |
538 | CallInst *IRBuilderBase::CreateThreadLocalAddress(Value *Ptr) { |
539 | assert(isa<GlobalValue>(Ptr) && cast<GlobalValue>(Ptr)->isThreadLocal() && |
540 | "threadlocal_address only applies to thread local variables." ); |
541 | CallInst *CI = CreateIntrinsic(llvm::Intrinsic::threadlocal_address, |
542 | {Ptr->getType()}, {Ptr}); |
543 | if (MaybeAlign A = getAlign(Ptr)) { |
544 | CI->addParamAttr(ArgNo: 0, Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: *A)); |
545 | CI->addRetAttr(Attr: Attribute::getWithAlignment(Context&: CI->getContext(), Alignment: *A)); |
546 | } |
547 | return CI; |
548 | } |
549 | |
550 | CallInst * |
551 | IRBuilderBase::CreateAssumption(Value *Cond, |
552 | ArrayRef<OperandBundleDef> OpBundles) { |
553 | assert(Cond->getType() == getInt1Ty() && |
554 | "an assumption condition must be of type i1" ); |
555 | |
556 | Value *Ops[] = { Cond }; |
557 | Module *M = BB->getParent()->getParent(); |
558 | Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::id: assume); |
559 | return CreateCall(Callee: FnAssume, Args: Ops, OpBundles); |
560 | } |
561 | |
562 | Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) { |
563 | Module *M = BB->getModule(); |
564 | auto *FnIntrinsic = Intrinsic::getDeclaration( |
565 | M, Intrinsic::experimental_noalias_scope_decl, {}); |
566 | return CreateCall(FnIntrinsic, {Scope}); |
567 | } |
568 | |
569 | /// Create a call to a Masked Load intrinsic. |
570 | /// \p Ty - vector type to load |
571 | /// \p Ptr - base pointer for the load |
572 | /// \p Alignment - alignment of the source location |
573 | /// \p Mask - vector of booleans which indicates what vector lanes should |
574 | /// be accessed in memory |
575 | /// \p PassThru - pass-through value that is used to fill the masked-off lanes |
576 | /// of the result |
577 | /// \p Name - name of the result variable |
578 | CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, |
579 | Value *Mask, Value *PassThru, |
580 | const Twine &Name) { |
581 | auto *PtrTy = cast<PointerType>(Val: Ptr->getType()); |
582 | assert(Ty->isVectorTy() && "Type should be vector" ); |
583 | assert(Mask && "Mask should not be all-ones (null)" ); |
584 | if (!PassThru) |
585 | PassThru = PoisonValue::get(T: Ty); |
586 | Type *OverloadedTypes[] = { Ty, PtrTy }; |
587 | Value *Ops[] = {Ptr, getInt32(C: Alignment.value()), Mask, PassThru}; |
588 | return CreateMaskedIntrinsic(Intrinsic::Id: masked_load, Ops, |
589 | OverloadedTypes, Name); |
590 | } |
591 | |
592 | /// Create a call to a Masked Store intrinsic. |
593 | /// \p Val - data to be stored, |
594 | /// \p Ptr - base pointer for the store |
595 | /// \p Alignment - alignment of the destination location |
596 | /// \p Mask - vector of booleans which indicates what vector lanes should |
597 | /// be accessed in memory |
598 | CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr, |
599 | Align Alignment, Value *Mask) { |
600 | auto *PtrTy = cast<PointerType>(Val: Ptr->getType()); |
601 | Type *DataTy = Val->getType(); |
602 | assert(DataTy->isVectorTy() && "Val should be a vector" ); |
603 | assert(Mask && "Mask should not be all-ones (null)" ); |
604 | Type *OverloadedTypes[] = { DataTy, PtrTy }; |
605 | Value *Ops[] = {Val, Ptr, getInt32(C: Alignment.value()), Mask}; |
606 | return CreateMaskedIntrinsic(Intrinsic::Id: masked_store, Ops, OverloadedTypes); |
607 | } |
608 | |
609 | /// Create a call to a Masked intrinsic, with given intrinsic Id, |
610 | /// an array of operands - Ops, and an array of overloaded types - |
611 | /// OverloadedTypes. |
612 | CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id, |
613 | ArrayRef<Value *> Ops, |
614 | ArrayRef<Type *> OverloadedTypes, |
615 | const Twine &Name) { |
616 | Module *M = BB->getParent()->getParent(); |
617 | Function *TheFn = Intrinsic::getDeclaration(M, id: Id, Tys: OverloadedTypes); |
618 | return CreateCall(Callee: TheFn, Args: Ops, OpBundles: {}, Name); |
619 | } |
620 | |
621 | /// Create a call to a Masked Gather intrinsic. |
622 | /// \p Ty - vector type to gather |
623 | /// \p Ptrs - vector of pointers for loading |
624 | /// \p Align - alignment for one element |
625 | /// \p Mask - vector of booleans which indicates what vector lanes should |
626 | /// be accessed in memory |
627 | /// \p PassThru - pass-through value that is used to fill the masked-off lanes |
628 | /// of the result |
629 | /// \p Name - name of the result variable |
630 | CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs, |
631 | Align Alignment, Value *Mask, |
632 | Value *PassThru, |
633 | const Twine &Name) { |
634 | auto *VecTy = cast<VectorType>(Val: Ty); |
635 | ElementCount NumElts = VecTy->getElementCount(); |
636 | auto *PtrsTy = cast<VectorType>(Val: Ptrs->getType()); |
637 | assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch" ); |
638 | |
639 | if (!Mask) |
640 | Mask = getAllOnesMask(NumElts); |
641 | |
642 | if (!PassThru) |
643 | PassThru = PoisonValue::get(T: Ty); |
644 | |
645 | Type *OverloadedTypes[] = {Ty, PtrsTy}; |
646 | Value *Ops[] = {Ptrs, getInt32(C: Alignment.value()), Mask, PassThru}; |
647 | |
648 | // We specify only one type when we create this intrinsic. Types of other |
649 | // arguments are derived from this type. |
650 | return CreateMaskedIntrinsic(Intrinsic::Id: masked_gather, Ops, OverloadedTypes, |
651 | Name); |
652 | } |
653 | |
654 | /// Create a call to a Masked Scatter intrinsic. |
655 | /// \p Data - data to be stored, |
656 | /// \p Ptrs - the vector of pointers, where the \p Data elements should be |
657 | /// stored |
658 | /// \p Align - alignment for one element |
659 | /// \p Mask - vector of booleans which indicates what vector lanes should |
660 | /// be accessed in memory |
661 | CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs, |
662 | Align Alignment, Value *Mask) { |
663 | auto *PtrsTy = cast<VectorType>(Val: Ptrs->getType()); |
664 | auto *DataTy = cast<VectorType>(Val: Data->getType()); |
665 | ElementCount NumElts = PtrsTy->getElementCount(); |
666 | |
667 | if (!Mask) |
668 | Mask = getAllOnesMask(NumElts); |
669 | |
670 | Type *OverloadedTypes[] = {DataTy, PtrsTy}; |
671 | Value *Ops[] = {Data, Ptrs, getInt32(C: Alignment.value()), Mask}; |
672 | |
673 | // We specify only one type when we create this intrinsic. Types of other |
674 | // arguments are derived from this type. |
675 | return CreateMaskedIntrinsic(Intrinsic::Id: masked_scatter, Ops, OverloadedTypes); |
676 | } |
677 | |
678 | /// Create a call to Masked Expand Load intrinsic |
679 | /// \p Ty - vector type to load |
680 | /// \p Ptr - base pointer for the load |
681 | /// \p Mask - vector of booleans which indicates what vector lanes should |
682 | /// be accessed in memory |
683 | /// \p PassThru - pass-through value that is used to fill the masked-off lanes |
684 | /// of the result |
685 | /// \p Name - name of the result variable |
686 | CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr, |
687 | Value *Mask, Value *PassThru, |
688 | const Twine &Name) { |
689 | assert(Ty->isVectorTy() && "Type should be vector" ); |
690 | assert(Mask && "Mask should not be all-ones (null)" ); |
691 | if (!PassThru) |
692 | PassThru = PoisonValue::get(T: Ty); |
693 | Type *OverloadedTypes[] = {Ty}; |
694 | Value *Ops[] = {Ptr, Mask, PassThru}; |
695 | return CreateMaskedIntrinsic(Intrinsic::Id: masked_expandload, Ops, |
696 | OverloadedTypes, Name); |
697 | } |
698 | |
699 | /// Create a call to Masked Compress Store intrinsic |
700 | /// \p Val - data to be stored, |
701 | /// \p Ptr - base pointer for the store |
702 | /// \p Mask - vector of booleans which indicates what vector lanes should |
703 | /// be accessed in memory |
704 | CallInst *IRBuilderBase::CreateMaskedCompressStore(Value *Val, Value *Ptr, |
705 | Value *Mask) { |
706 | Type *DataTy = Val->getType(); |
707 | assert(DataTy->isVectorTy() && "Val should be a vector" ); |
708 | assert(Mask && "Mask should not be all-ones (null)" ); |
709 | Type *OverloadedTypes[] = {DataTy}; |
710 | Value *Ops[] = {Val, Ptr, Mask}; |
711 | return CreateMaskedIntrinsic(Intrinsic::Id: masked_compressstore, Ops, |
712 | OverloadedTypes); |
713 | } |
714 | |
715 | template <typename T0> |
716 | static std::vector<Value *> |
717 | getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes, |
718 | Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) { |
719 | std::vector<Value *> Args; |
720 | Args.push_back(x: B.getInt64(C: ID)); |
721 | Args.push_back(x: B.getInt32(C: NumPatchBytes)); |
722 | Args.push_back(x: ActualCallee); |
723 | Args.push_back(B.getInt32(C: CallArgs.size())); |
724 | Args.push_back(x: B.getInt32(C: Flags)); |
725 | llvm::append_range(Args, CallArgs); |
726 | // GC Transition and Deopt args are now always handled via operand bundle. |
727 | // They will be removed from the signature of gc.statepoint shortly. |
728 | Args.push_back(x: B.getInt32(C: 0)); |
729 | Args.push_back(x: B.getInt32(C: 0)); |
730 | // GC args are now encoded in the gc-live operand bundle |
731 | return Args; |
732 | } |
733 | |
734 | template<typename T1, typename T2, typename T3> |
735 | static std::vector<OperandBundleDef> |
736 | getStatepointBundles(std::optional<ArrayRef<T1>> TransitionArgs, |
737 | std::optional<ArrayRef<T2>> DeoptArgs, |
738 | ArrayRef<T3> GCArgs) { |
739 | std::vector<OperandBundleDef> Rval; |
740 | if (DeoptArgs) { |
741 | SmallVector<Value*, 16> DeoptValues; |
742 | llvm::append_range(DeoptValues, *DeoptArgs); |
743 | Rval.emplace_back(args: "deopt" , args&: DeoptValues); |
744 | } |
745 | if (TransitionArgs) { |
746 | SmallVector<Value*, 16> TransitionValues; |
747 | llvm::append_range(TransitionValues, *TransitionArgs); |
748 | Rval.emplace_back(args: "gc-transition" , args&: TransitionValues); |
749 | } |
750 | if (GCArgs.size()) { |
751 | SmallVector<Value*, 16> LiveValues; |
752 | llvm::append_range(LiveValues, GCArgs); |
753 | Rval.emplace_back(args: "gc-live" , args&: LiveValues); |
754 | } |
755 | return Rval; |
756 | } |
757 | |
758 | template <typename T0, typename T1, typename T2, typename T3> |
759 | static CallInst *CreateGCStatepointCallCommon( |
760 | IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, |
761 | FunctionCallee ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs, |
762 | std::optional<ArrayRef<T1>> TransitionArgs, |
763 | std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs, |
764 | const Twine &Name) { |
765 | Module *M = Builder->GetInsertBlock()->getParent()->getParent(); |
766 | // Fill in the one generic type'd argument (the function is also vararg) |
767 | Function *FnStatepoint = |
768 | Intrinsic::getDeclaration(M, Intrinsic::id: experimental_gc_statepoint, |
769 | Tys: {ActualCallee.getCallee()->getType()}); |
770 | |
771 | std::vector<Value *> Args = getStatepointArgs( |
772 | *Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs); |
773 | |
774 | CallInst *CI = Builder->CreateCall( |
775 | FnStatepoint, Args, |
776 | getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name); |
777 | CI->addParamAttr(2, |
778 | Attribute::get(Builder->getContext(), Attribute::ElementType, |
779 | ActualCallee.getFunctionType())); |
780 | return CI; |
781 | } |
782 | |
783 | CallInst *IRBuilderBase::CreateGCStatepointCall( |
784 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, |
785 | ArrayRef<Value *> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs, |
786 | ArrayRef<Value *> GCArgs, const Twine &Name) { |
787 | return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>( |
788 | Builder: this, ID, NumPatchBytes, ActualCallee, Flags: uint32_t(StatepointFlags::None), |
789 | CallArgs, TransitionArgs: std::nullopt /* No Transition Args */, DeoptArgs, GCArgs, Name); |
790 | } |
791 | |
792 | CallInst *IRBuilderBase::CreateGCStatepointCall( |
793 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, |
794 | uint32_t Flags, ArrayRef<Value *> CallArgs, |
795 | std::optional<ArrayRef<Use>> TransitionArgs, |
796 | std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, |
797 | const Twine &Name) { |
798 | return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>( |
799 | Builder: this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs, |
800 | DeoptArgs, GCArgs, Name); |
801 | } |
802 | |
803 | CallInst *IRBuilderBase::CreateGCStatepointCall( |
804 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, |
805 | ArrayRef<Use> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs, |
806 | ArrayRef<Value *> GCArgs, const Twine &Name) { |
807 | return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>( |
808 | Builder: this, ID, NumPatchBytes, ActualCallee, Flags: uint32_t(StatepointFlags::None), |
809 | CallArgs, TransitionArgs: std::nullopt, DeoptArgs, GCArgs, Name); |
810 | } |
811 | |
812 | template <typename T0, typename T1, typename T2, typename T3> |
813 | static InvokeInst *CreateGCStatepointInvokeCommon( |
814 | IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, |
815 | FunctionCallee ActualInvokee, BasicBlock *NormalDest, |
816 | BasicBlock *UnwindDest, uint32_t Flags, ArrayRef<T0> InvokeArgs, |
817 | std::optional<ArrayRef<T1>> TransitionArgs, |
818 | std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs, |
819 | const Twine &Name) { |
820 | Module *M = Builder->GetInsertBlock()->getParent()->getParent(); |
821 | // Fill in the one generic type'd argument (the function is also vararg) |
822 | Function *FnStatepoint = |
823 | Intrinsic::getDeclaration(M, Intrinsic::id: experimental_gc_statepoint, |
824 | Tys: {ActualInvokee.getCallee()->getType()}); |
825 | |
826 | std::vector<Value *> Args = |
827 | getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(), |
828 | Flags, InvokeArgs); |
829 | |
830 | InvokeInst *II = Builder->CreateInvoke( |
831 | FnStatepoint, NormalDest, UnwindDest, Args, |
832 | getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name); |
833 | II->addParamAttr(2, |
834 | Attribute::get(Builder->getContext(), Attribute::ElementType, |
835 | ActualInvokee.getFunctionType())); |
836 | return II; |
837 | } |
838 | |
839 | InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( |
840 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, |
841 | BasicBlock *NormalDest, BasicBlock *UnwindDest, |
842 | ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Value *>> DeoptArgs, |
843 | ArrayRef<Value *> GCArgs, const Twine &Name) { |
844 | return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>( |
845 | Builder: this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, |
846 | Flags: uint32_t(StatepointFlags::None), InvokeArgs, |
847 | TransitionArgs: std::nullopt /* No Transition Args*/, DeoptArgs, GCArgs, Name); |
848 | } |
849 | |
850 | InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( |
851 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, |
852 | BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, |
853 | ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Use>> TransitionArgs, |
854 | std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, |
855 | const Twine &Name) { |
856 | return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>( |
857 | Builder: this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags, |
858 | InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name); |
859 | } |
860 | |
861 | InvokeInst *IRBuilderBase::CreateGCStatepointInvoke( |
862 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, |
863 | BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, |
864 | std::optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, |
865 | const Twine &Name) { |
866 | return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>( |
867 | Builder: this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, |
868 | Flags: uint32_t(StatepointFlags::None), InvokeArgs, TransitionArgs: std::nullopt, DeoptArgs, |
869 | GCArgs, Name); |
870 | } |
871 | |
872 | CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint, |
873 | Type *ResultType, const Twine &Name) { |
874 | Intrinsic::ID ID = Intrinsic::experimental_gc_result; |
875 | Module *M = BB->getParent()->getParent(); |
876 | Type *Types[] = {ResultType}; |
877 | Function *FnGCResult = Intrinsic::getDeclaration(M, id: ID, Tys: Types); |
878 | |
879 | Value *Args[] = {Statepoint}; |
880 | return CreateCall(Callee: FnGCResult, Args, OpBundles: {}, Name); |
881 | } |
882 | |
883 | CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint, |
884 | int BaseOffset, int DerivedOffset, |
885 | Type *ResultType, const Twine &Name) { |
886 | Module *M = BB->getParent()->getParent(); |
887 | Type *Types[] = {ResultType}; |
888 | Function *FnGCRelocate = |
889 | Intrinsic::getDeclaration(M, Intrinsic::id: experimental_gc_relocate, Tys: Types); |
890 | |
891 | Value *Args[] = {Statepoint, getInt32(C: BaseOffset), getInt32(C: DerivedOffset)}; |
892 | return CreateCall(Callee: FnGCRelocate, Args, OpBundles: {}, Name); |
893 | } |
894 | |
895 | CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr, |
896 | const Twine &Name) { |
897 | Module *M = BB->getParent()->getParent(); |
898 | Type *PtrTy = DerivedPtr->getType(); |
899 | Function *FnGCFindBase = Intrinsic::getDeclaration( |
900 | M, Intrinsic::id: experimental_gc_get_pointer_base, Tys: {PtrTy, PtrTy}); |
901 | return CreateCall(Callee: FnGCFindBase, Args: {DerivedPtr}, OpBundles: {}, Name); |
902 | } |
903 | |
904 | CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr, |
905 | const Twine &Name) { |
906 | Module *M = BB->getParent()->getParent(); |
907 | Type *PtrTy = DerivedPtr->getType(); |
908 | Function *FnGCGetOffset = Intrinsic::getDeclaration( |
909 | M, Intrinsic::id: experimental_gc_get_pointer_offset, Tys: {PtrTy}); |
910 | return CreateCall(Callee: FnGCGetOffset, Args: {DerivedPtr}, OpBundles: {}, Name); |
911 | } |
912 | |
913 | CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, |
914 | Instruction *FMFSource, |
915 | const Twine &Name) { |
916 | Module *M = BB->getModule(); |
917 | Function *Fn = Intrinsic::getDeclaration(M, id: ID, Tys: {V->getType()}); |
918 | return createCallHelper(Callee: Fn, Ops: {V}, Name, FMFSource); |
919 | } |
920 | |
921 | Value *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, |
922 | Value *RHS, Instruction *FMFSource, |
923 | const Twine &Name) { |
924 | Module *M = BB->getModule(); |
925 | Function *Fn = Intrinsic::getDeclaration(M, id: ID, Tys: { LHS->getType() }); |
926 | if (Value *V = Folder.FoldBinaryIntrinsic(ID, LHS, RHS, Ty: Fn->getReturnType(), |
927 | FMFSource)) |
928 | return V; |
929 | return createCallHelper(Callee: Fn, Ops: {LHS, RHS}, Name, FMFSource); |
930 | } |
931 | |
932 | CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID, |
933 | ArrayRef<Type *> Types, |
934 | ArrayRef<Value *> Args, |
935 | Instruction *FMFSource, |
936 | const Twine &Name) { |
937 | Module *M = BB->getModule(); |
938 | Function *Fn = Intrinsic::getDeclaration(M, id: ID, Tys: Types); |
939 | return createCallHelper(Callee: Fn, Ops: Args, Name, FMFSource); |
940 | } |
941 | |
942 | CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID, |
943 | ArrayRef<Value *> Args, |
944 | Instruction *FMFSource, |
945 | const Twine &Name) { |
946 | Module *M = BB->getModule(); |
947 | |
948 | SmallVector<Intrinsic::IITDescriptor> Table; |
949 | Intrinsic::getIntrinsicInfoTableEntries(id: ID, T&: Table); |
950 | ArrayRef<Intrinsic::IITDescriptor> TableRef(Table); |
951 | |
952 | SmallVector<Type *> ArgTys; |
953 | ArgTys.reserve(N: Args.size()); |
954 | for (auto &I : Args) |
955 | ArgTys.push_back(Elt: I->getType()); |
956 | FunctionType *FTy = FunctionType::get(Result: RetTy, Params: ArgTys, isVarArg: false); |
957 | SmallVector<Type *> OverloadTys; |
958 | Intrinsic::MatchIntrinsicTypesResult Res = |
959 | matchIntrinsicSignature(FTy, Infos&: TableRef, ArgTys&: OverloadTys); |
960 | (void)Res; |
961 | assert(Res == Intrinsic::MatchIntrinsicTypes_Match && TableRef.empty() && |
962 | "Wrong types for intrinsic!" ); |
963 | // TODO: Handle varargs intrinsics. |
964 | |
965 | Function *Fn = Intrinsic::getDeclaration(M, id: ID, Tys: OverloadTys); |
966 | return createCallHelper(Callee: Fn, Ops: Args, Name, FMFSource); |
967 | } |
968 | |
969 | CallInst *IRBuilderBase::CreateConstrainedFPBinOp( |
970 | Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource, |
971 | const Twine &Name, MDNode *FPMathTag, |
972 | std::optional<RoundingMode> Rounding, |
973 | std::optional<fp::ExceptionBehavior> Except) { |
974 | Value *RoundingV = getConstrainedFPRounding(Rounding); |
975 | Value *ExceptV = getConstrainedFPExcept(Except); |
976 | |
977 | FastMathFlags UseFMF = FMF; |
978 | if (FMFSource) |
979 | UseFMF = FMFSource->getFastMathFlags(); |
980 | |
981 | CallInst *C = CreateIntrinsic(ID, Types: {L->getType()}, |
982 | Args: {L, R, RoundingV, ExceptV}, FMFSource: nullptr, Name); |
983 | setConstrainedFPCallAttr(C); |
984 | setFPAttrs(I: C, FPMD: FPMathTag, FMF: UseFMF); |
985 | return C; |
986 | } |
987 | |
988 | CallInst *IRBuilderBase::CreateConstrainedFPUnroundedBinOp( |
989 | Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource, |
990 | const Twine &Name, MDNode *FPMathTag, |
991 | std::optional<fp::ExceptionBehavior> Except) { |
992 | Value *ExceptV = getConstrainedFPExcept(Except); |
993 | |
994 | FastMathFlags UseFMF = FMF; |
995 | if (FMFSource) |
996 | UseFMF = FMFSource->getFastMathFlags(); |
997 | |
998 | CallInst *C = |
999 | CreateIntrinsic(ID, Types: {L->getType()}, Args: {L, R, ExceptV}, FMFSource: nullptr, Name); |
1000 | setConstrainedFPCallAttr(C); |
1001 | setFPAttrs(I: C, FPMD: FPMathTag, FMF: UseFMF); |
1002 | return C; |
1003 | } |
1004 | |
1005 | Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, |
1006 | const Twine &Name, MDNode *FPMathTag) { |
1007 | if (Instruction::isBinaryOp(Opcode: Opc)) { |
1008 | assert(Ops.size() == 2 && "Invalid number of operands!" ); |
1009 | return CreateBinOp(Opc: static_cast<Instruction::BinaryOps>(Opc), |
1010 | LHS: Ops[0], RHS: Ops[1], Name, FPMathTag); |
1011 | } |
1012 | if (Instruction::isUnaryOp(Opcode: Opc)) { |
1013 | assert(Ops.size() == 1 && "Invalid number of operands!" ); |
1014 | return CreateUnOp(Opc: static_cast<Instruction::UnaryOps>(Opc), |
1015 | V: Ops[0], Name, FPMathTag); |
1016 | } |
1017 | llvm_unreachable("Unexpected opcode!" ); |
1018 | } |
1019 | |
1020 | CallInst *IRBuilderBase::CreateConstrainedFPCast( |
1021 | Intrinsic::ID ID, Value *V, Type *DestTy, |
1022 | Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag, |
1023 | std::optional<RoundingMode> Rounding, |
1024 | std::optional<fp::ExceptionBehavior> Except) { |
1025 | Value *ExceptV = getConstrainedFPExcept(Except); |
1026 | |
1027 | FastMathFlags UseFMF = FMF; |
1028 | if (FMFSource) |
1029 | UseFMF = FMFSource->getFastMathFlags(); |
1030 | |
1031 | CallInst *C; |
1032 | bool HasRoundingMD = false; |
1033 | switch (ID) { |
1034 | default: |
1035 | break; |
1036 | #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ |
1037 | case Intrinsic::INTRINSIC: \ |
1038 | HasRoundingMD = ROUND_MODE; \ |
1039 | break; |
1040 | #include "llvm/IR/ConstrainedOps.def" |
1041 | } |
1042 | if (HasRoundingMD) { |
1043 | Value *RoundingV = getConstrainedFPRounding(Rounding); |
1044 | C = CreateIntrinsic(ID, Types: {DestTy, V->getType()}, Args: {V, RoundingV, ExceptV}, |
1045 | FMFSource: nullptr, Name); |
1046 | } else |
1047 | C = CreateIntrinsic(ID, Types: {DestTy, V->getType()}, Args: {V, ExceptV}, FMFSource: nullptr, |
1048 | Name); |
1049 | |
1050 | setConstrainedFPCallAttr(C); |
1051 | |
1052 | if (isa<FPMathOperator>(Val: C)) |
1053 | setFPAttrs(I: C, FPMD: FPMathTag, FMF: UseFMF); |
1054 | return C; |
1055 | } |
1056 | |
1057 | Value *IRBuilderBase::CreateFCmpHelper( |
1058 | CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name, |
1059 | MDNode *FPMathTag, bool IsSignaling) { |
1060 | if (IsFPConstrained) { |
1061 | auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps |
1062 | : Intrinsic::experimental_constrained_fcmp; |
1063 | return CreateConstrainedFPCmp(ID: ID, P, L: LHS, R: RHS, Name); |
1064 | } |
1065 | |
1066 | if (auto *LC = dyn_cast<Constant>(Val: LHS)) |
1067 | if (auto *RC = dyn_cast<Constant>(Val: RHS)) |
1068 | return Insert(V: Folder.CreateFCmp(P, LHS: LC, RHS: RC), Name); |
1069 | return Insert(I: setFPAttrs(I: new FCmpInst(P, LHS, RHS), FPMD: FPMathTag, FMF), Name); |
1070 | } |
1071 | |
1072 | CallInst *IRBuilderBase::CreateConstrainedFPCmp( |
1073 | Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, |
1074 | const Twine &Name, std::optional<fp::ExceptionBehavior> Except) { |
1075 | Value *PredicateV = getConstrainedFPPredicate(Predicate: P); |
1076 | Value *ExceptV = getConstrainedFPExcept(Except); |
1077 | |
1078 | CallInst *C = CreateIntrinsic(ID, Types: {L->getType()}, |
1079 | Args: {L, R, PredicateV, ExceptV}, FMFSource: nullptr, Name); |
1080 | setConstrainedFPCallAttr(C); |
1081 | return C; |
1082 | } |
1083 | |
1084 | CallInst *IRBuilderBase::CreateConstrainedFPCall( |
1085 | Function *Callee, ArrayRef<Value *> Args, const Twine &Name, |
1086 | std::optional<RoundingMode> Rounding, |
1087 | std::optional<fp::ExceptionBehavior> Except) { |
1088 | llvm::SmallVector<Value *, 6> UseArgs; |
1089 | |
1090 | append_range(C&: UseArgs, R&: Args); |
1091 | bool HasRoundingMD = false; |
1092 | switch (Callee->getIntrinsicID()) { |
1093 | default: |
1094 | break; |
1095 | #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ |
1096 | case Intrinsic::INTRINSIC: \ |
1097 | HasRoundingMD = ROUND_MODE; \ |
1098 | break; |
1099 | #include "llvm/IR/ConstrainedOps.def" |
1100 | } |
1101 | if (HasRoundingMD) |
1102 | UseArgs.push_back(Elt: getConstrainedFPRounding(Rounding)); |
1103 | UseArgs.push_back(Elt: getConstrainedFPExcept(Except)); |
1104 | |
1105 | CallInst *C = CreateCall(Callee, Args: UseArgs, Name); |
1106 | setConstrainedFPCallAttr(C); |
1107 | return C; |
1108 | } |
1109 | |
1110 | Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False, |
1111 | const Twine &Name, Instruction *MDFrom) { |
1112 | if (auto *V = Folder.FoldSelect(C, True, False)) |
1113 | return V; |
1114 | |
1115 | SelectInst *Sel = SelectInst::Create(C, S1: True, S2: False); |
1116 | if (MDFrom) { |
1117 | MDNode *Prof = MDFrom->getMetadata(KindID: LLVMContext::MD_prof); |
1118 | MDNode *Unpred = MDFrom->getMetadata(KindID: LLVMContext::MD_unpredictable); |
1119 | Sel = addBranchMetadata(I: Sel, Weights: Prof, Unpredictable: Unpred); |
1120 | } |
1121 | if (isa<FPMathOperator>(Val: Sel)) |
1122 | setFPAttrs(I: Sel, FPMD: nullptr /* MDNode* */, FMF); |
1123 | return Insert(I: Sel, Name); |
1124 | } |
1125 | |
1126 | Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, |
1127 | const Twine &Name) { |
1128 | assert(LHS->getType() == RHS->getType() && |
1129 | "Pointer subtraction operand types must match!" ); |
1130 | Value *LHS_int = CreatePtrToInt(V: LHS, DestTy: Type::getInt64Ty(C&: Context)); |
1131 | Value *RHS_int = CreatePtrToInt(V: RHS, DestTy: Type::getInt64Ty(C&: Context)); |
1132 | Value *Difference = CreateSub(LHS: LHS_int, RHS: RHS_int); |
1133 | return CreateExactSDiv(LHS: Difference, RHS: ConstantExpr::getSizeOf(Ty: ElemTy), |
1134 | Name); |
1135 | } |
1136 | |
1137 | Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) { |
1138 | assert(isa<PointerType>(Ptr->getType()) && |
1139 | "launder.invariant.group only applies to pointers." ); |
1140 | auto *PtrType = Ptr->getType(); |
1141 | Module *M = BB->getParent()->getParent(); |
1142 | Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration( |
1143 | M, Intrinsic::launder_invariant_group, {PtrType}); |
1144 | |
1145 | assert(FnLaunderInvariantGroup->getReturnType() == PtrType && |
1146 | FnLaunderInvariantGroup->getFunctionType()->getParamType(0) == |
1147 | PtrType && |
1148 | "LaunderInvariantGroup should take and return the same type" ); |
1149 | |
1150 | return CreateCall(Callee: FnLaunderInvariantGroup, Args: {Ptr}); |
1151 | } |
1152 | |
1153 | Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) { |
1154 | assert(isa<PointerType>(Ptr->getType()) && |
1155 | "strip.invariant.group only applies to pointers." ); |
1156 | |
1157 | auto *PtrType = Ptr->getType(); |
1158 | Module *M = BB->getParent()->getParent(); |
1159 | Function *FnStripInvariantGroup = Intrinsic::getDeclaration( |
1160 | M, Intrinsic::strip_invariant_group, {PtrType}); |
1161 | |
1162 | assert(FnStripInvariantGroup->getReturnType() == PtrType && |
1163 | FnStripInvariantGroup->getFunctionType()->getParamType(0) == |
1164 | PtrType && |
1165 | "StripInvariantGroup should take and return the same type" ); |
1166 | |
1167 | return CreateCall(Callee: FnStripInvariantGroup, Args: {Ptr}); |
1168 | } |
1169 | |
1170 | Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) { |
1171 | auto *Ty = cast<VectorType>(Val: V->getType()); |
1172 | if (isa<ScalableVectorType>(Val: Ty)) { |
1173 | Module *M = BB->getParent()->getParent(); |
1174 | Function *F = Intrinsic::getDeclaration( |
1175 | M, Intrinsic::experimental_vector_reverse, Ty); |
1176 | return Insert(I: CallInst::Create(Func: F, Args: V), Name); |
1177 | } |
1178 | // Keep the original behaviour for fixed vector |
1179 | SmallVector<int, 8> ShuffleMask; |
1180 | int NumElts = Ty->getElementCount().getKnownMinValue(); |
1181 | for (int i = 0; i < NumElts; ++i) |
1182 | ShuffleMask.push_back(Elt: NumElts - i - 1); |
1183 | return CreateShuffleVector(V, Mask: ShuffleMask, Name); |
1184 | } |
1185 | |
1186 | Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, |
1187 | const Twine &Name) { |
1188 | assert(isa<VectorType>(V1->getType()) && "Unexpected type" ); |
1189 | assert(V1->getType() == V2->getType() && |
1190 | "Splice expects matching operand types!" ); |
1191 | |
1192 | if (auto *VTy = dyn_cast<ScalableVectorType>(Val: V1->getType())) { |
1193 | Module *M = BB->getParent()->getParent(); |
1194 | Function *F = Intrinsic::getDeclaration( |
1195 | M, Intrinsic::experimental_vector_splice, VTy); |
1196 | |
1197 | Value *Ops[] = {V1, V2, getInt32(C: Imm)}; |
1198 | return Insert(I: CallInst::Create(Func: F, Args: Ops), Name); |
1199 | } |
1200 | |
1201 | unsigned NumElts = cast<FixedVectorType>(Val: V1->getType())->getNumElements(); |
1202 | assert(((-Imm <= NumElts) || (Imm < NumElts)) && |
1203 | "Invalid immediate for vector splice!" ); |
1204 | |
1205 | // Keep the original behaviour for fixed vector |
1206 | unsigned Idx = (NumElts + Imm) % NumElts; |
1207 | SmallVector<int, 8> Mask; |
1208 | for (unsigned I = 0; I < NumElts; ++I) |
1209 | Mask.push_back(Elt: Idx + I); |
1210 | |
1211 | return CreateShuffleVector(V1, V2, Mask); |
1212 | } |
1213 | |
1214 | Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V, |
1215 | const Twine &Name) { |
1216 | auto EC = ElementCount::getFixed(MinVal: NumElts); |
1217 | return CreateVectorSplat(EC, V, Name); |
1218 | } |
1219 | |
1220 | Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V, |
1221 | const Twine &Name) { |
1222 | assert(EC.isNonZero() && "Cannot splat to an empty vector!" ); |
1223 | |
1224 | // First insert it into a poison vector so we can shuffle it. |
1225 | Value *Poison = PoisonValue::get(T: VectorType::get(ElementType: V->getType(), EC)); |
1226 | V = CreateInsertElement(Vec: Poison, NewElt: V, Idx: getInt64(C: 0), Name: Name + ".splatinsert" ); |
1227 | |
1228 | // Shuffle the value across the desired number of elements. |
1229 | SmallVector<int, 16> Zeros; |
1230 | Zeros.resize(N: EC.getKnownMinValue()); |
1231 | return CreateShuffleVector(V, Mask: Zeros, Name: Name + ".splat" ); |
1232 | } |
1233 | |
1234 | Value *IRBuilderBase::CreatePreserveArrayAccessIndex( |
1235 | Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex, |
1236 | MDNode *DbgInfo) { |
1237 | auto *BaseType = Base->getType(); |
1238 | assert(isa<PointerType>(BaseType) && |
1239 | "Invalid Base ptr type for preserve.array.access.index." ); |
1240 | |
1241 | Value *LastIndexV = getInt32(C: LastIndex); |
1242 | Constant *Zero = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: 0); |
1243 | SmallVector<Value *, 4> IdxList(Dimension, Zero); |
1244 | IdxList.push_back(Elt: LastIndexV); |
1245 | |
1246 | Type *ResultType = GetElementPtrInst::getGEPReturnType(Ptr: Base, IdxList); |
1247 | |
1248 | Module *M = BB->getParent()->getParent(); |
1249 | Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration( |
1250 | M, Intrinsic::preserve_array_access_index, {ResultType, BaseType}); |
1251 | |
1252 | Value *DimV = getInt32(C: Dimension); |
1253 | CallInst *Fn = |
1254 | CreateCall(Callee: FnPreserveArrayAccessIndex, Args: {Base, DimV, LastIndexV}); |
1255 | Fn->addParamAttr( |
1256 | 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy)); |
1257 | if (DbgInfo) |
1258 | Fn->setMetadata(KindID: LLVMContext::MD_preserve_access_index, Node: DbgInfo); |
1259 | |
1260 | return Fn; |
1261 | } |
1262 | |
1263 | Value *IRBuilderBase::CreatePreserveUnionAccessIndex( |
1264 | Value *Base, unsigned FieldIndex, MDNode *DbgInfo) { |
1265 | assert(isa<PointerType>(Base->getType()) && |
1266 | "Invalid Base ptr type for preserve.union.access.index." ); |
1267 | auto *BaseType = Base->getType(); |
1268 | |
1269 | Module *M = BB->getParent()->getParent(); |
1270 | Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration( |
1271 | M, Intrinsic::preserve_union_access_index, {BaseType, BaseType}); |
1272 | |
1273 | Value *DIIndex = getInt32(C: FieldIndex); |
1274 | CallInst *Fn = |
1275 | CreateCall(Callee: FnPreserveUnionAccessIndex, Args: {Base, DIIndex}); |
1276 | if (DbgInfo) |
1277 | Fn->setMetadata(KindID: LLVMContext::MD_preserve_access_index, Node: DbgInfo); |
1278 | |
1279 | return Fn; |
1280 | } |
1281 | |
1282 | Value *IRBuilderBase::CreatePreserveStructAccessIndex( |
1283 | Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex, |
1284 | MDNode *DbgInfo) { |
1285 | auto *BaseType = Base->getType(); |
1286 | assert(isa<PointerType>(BaseType) && |
1287 | "Invalid Base ptr type for preserve.struct.access.index." ); |
1288 | |
1289 | Value *GEPIndex = getInt32(C: Index); |
1290 | Constant *Zero = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: 0); |
1291 | Type *ResultType = |
1292 | GetElementPtrInst::getGEPReturnType(Ptr: Base, IdxList: {Zero, GEPIndex}); |
1293 | |
1294 | Module *M = BB->getParent()->getParent(); |
1295 | Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration( |
1296 | M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType}); |
1297 | |
1298 | Value *DIIndex = getInt32(C: FieldIndex); |
1299 | CallInst *Fn = CreateCall(Callee: FnPreserveStructAccessIndex, |
1300 | Args: {Base, GEPIndex, DIIndex}); |
1301 | Fn->addParamAttr( |
1302 | 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy)); |
1303 | if (DbgInfo) |
1304 | Fn->setMetadata(KindID: LLVMContext::MD_preserve_access_index, Node: DbgInfo); |
1305 | |
1306 | return Fn; |
1307 | } |
1308 | |
1309 | Value *IRBuilderBase::createIsFPClass(Value *FPNum, unsigned Test) { |
1310 | ConstantInt *TestV = getInt32(C: Test); |
1311 | Module *M = BB->getParent()->getParent(); |
1312 | Function *FnIsFPClass = |
1313 | Intrinsic::getDeclaration(M, Intrinsic::is_fpclass, {FPNum->getType()}); |
1314 | return CreateCall(Callee: FnIsFPClass, Args: {FPNum, TestV}); |
1315 | } |
1316 | |
1317 | CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL, |
1318 | Value *PtrValue, |
1319 | Value *AlignValue, |
1320 | Value *OffsetValue) { |
1321 | SmallVector<Value *, 4> Vals({PtrValue, AlignValue}); |
1322 | if (OffsetValue) |
1323 | Vals.push_back(Elt: OffsetValue); |
1324 | OperandBundleDefT<Value *> AlignOpB("align" , Vals); |
1325 | return CreateAssumption(Cond: ConstantInt::getTrue(Context&: getContext()), OpBundles: {AlignOpB}); |
1326 | } |
1327 | |
1328 | CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, |
1329 | Value *PtrValue, |
1330 | unsigned Alignment, |
1331 | Value *OffsetValue) { |
1332 | assert(isa<PointerType>(PtrValue->getType()) && |
1333 | "trying to create an alignment assumption on a non-pointer?" ); |
1334 | assert(Alignment != 0 && "Invalid Alignment" ); |
1335 | auto *PtrTy = cast<PointerType>(Val: PtrValue->getType()); |
1336 | Type *IntPtrTy = getIntPtrTy(DL, AddrSpace: PtrTy->getAddressSpace()); |
1337 | Value *AlignValue = ConstantInt::get(Ty: IntPtrTy, V: Alignment); |
1338 | return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue); |
1339 | } |
1340 | |
1341 | CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL, |
1342 | Value *PtrValue, |
1343 | Value *Alignment, |
1344 | Value *OffsetValue) { |
1345 | assert(isa<PointerType>(PtrValue->getType()) && |
1346 | "trying to create an alignment assumption on a non-pointer?" ); |
1347 | return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue: Alignment, OffsetValue); |
1348 | } |
1349 | |
1350 | IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default; |
1351 | IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default; |
1352 | IRBuilderFolder::~IRBuilderFolder() = default; |
1353 | void ConstantFolder::anchor() {} |
1354 | void NoFolder::anchor() {} |
1355 | |