1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCUDARuntime.h"
14#include "CGCXXABI.h"
15#include "CGObjCRuntime.h"
16#include "CGOpenCLRuntime.h"
17#include "CGRecordLayout.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "PatternInit.h"
22#include "TargetInfo.h"
23#include "clang/AST/ASTContext.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/Decl.h"
26#include "clang/AST/OSLog.h"
27#include "clang/Basic/TargetBuiltins.h"
28#include "clang/Basic/TargetInfo.h"
29#include "clang/CodeGen/CGFunctionInfo.h"
30#include "llvm/ADT/APFloat.h"
31#include "llvm/ADT/APInt.h"
32#include "llvm/ADT/SmallPtrSet.h"
33#include "llvm/ADT/StringExtras.h"
34#include "llvm/Analysis/ValueTracking.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/InlineAsm.h"
37#include "llvm/IR/Intrinsics.h"
38#include "llvm/IR/IntrinsicsAArch64.h"
39#include "llvm/IR/IntrinsicsAMDGPU.h"
40#include "llvm/IR/IntrinsicsARM.h"
41#include "llvm/IR/IntrinsicsBPF.h"
42#include "llvm/IR/IntrinsicsHexagon.h"
43#include "llvm/IR/IntrinsicsNVPTX.h"
44#include "llvm/IR/IntrinsicsPowerPC.h"
45#include "llvm/IR/IntrinsicsR600.h"
46#include "llvm/IR/IntrinsicsRISCV.h"
47#include "llvm/IR/IntrinsicsS390.h"
48#include "llvm/IR/IntrinsicsWebAssembly.h"
49#include "llvm/IR/IntrinsicsX86.h"
50#include "llvm/IR/MDBuilder.h"
51#include "llvm/IR/MatrixBuilder.h"
52#include "llvm/Support/ConvertUTF.h"
53#include "llvm/Support/ScopedPrinter.h"
54#include "llvm/Support/X86TargetParser.h"
55#include <sstream>
56
57using namespace clang;
58using namespace CodeGen;
59using namespace llvm;
60
61static
62int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
63 return std::min(High, std::max(Low, Value));
64}
65
66static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
67 Align AlignmentInBytes) {
68 ConstantInt *Byte;
69 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
70 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
71 // Nothing to initialize.
72 return;
73 case LangOptions::TrivialAutoVarInitKind::Zero:
74 Byte = CGF.Builder.getInt8(0x00);
75 break;
76 case LangOptions::TrivialAutoVarInitKind::Pattern: {
77 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
78 Byte = llvm::dyn_cast<llvm::ConstantInt>(
79 initializationPatternFor(CGF.CGM, Int8));
80 break;
81 }
82 }
83 if (CGF.CGM.stopAutoInit())
84 return;
85 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
86 I->addAnnotationMetadata("auto-init");
87}
88
89/// getBuiltinLibFunction - Given a builtin id for a function like
90/// "__builtin_fabsf", return a Function* for "fabsf".
91llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
92 unsigned BuiltinID) {
93 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
94
95 // Get the name, skip over the __builtin_ prefix (if necessary).
96 StringRef Name;
97 GlobalDecl D(FD);
98
99 // If the builtin has been declared explicitly with an assembler label,
100 // use the mangled name. This differs from the plain label on platforms
101 // that prefix labels.
102 if (FD->hasAttr<AsmLabelAttr>())
103 Name = getMangledName(D);
104 else
105 Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
106
107 llvm::FunctionType *Ty =
108 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
109
110 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
111}
112
113/// Emit the conversions required to turn the given value into an
114/// integer of the given size.
115static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
116 QualType T, llvm::IntegerType *IntType) {
117 V = CGF.EmitToMemory(V, T);
118
119 if (V->getType()->isPointerTy())
120 return CGF.Builder.CreatePtrToInt(V, IntType);
121
122 assert(V->getType() == IntType);
123 return V;
124}
125
126static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
127 QualType T, llvm::Type *ResultType) {
128 V = CGF.EmitFromMemory(V, T);
129
130 if (ResultType->isPointerTy())
131 return CGF.Builder.CreateIntToPtr(V, ResultType);
132
133 assert(V->getType() == ResultType);
134 return V;
135}
136
137/// Utility to insert an atomic instruction based on Intrinsic::ID
138/// and the expression node.
139static Value *MakeBinaryAtomicValue(
140 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
141 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
142 QualType T = E->getType();
143 assert(E->getArg(0)->getType()->isPointerType());
144 assert(CGF.getContext().hasSameUnqualifiedType(T,
145 E->getArg(0)->getType()->getPointeeType()));
146 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
147
148 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
149 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
150
151 llvm::IntegerType *IntType =
152 llvm::IntegerType::get(CGF.getLLVMContext(),
153 CGF.getContext().getTypeSize(T));
154 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
155
156 llvm::Value *Args[2];
157 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
158 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
159 llvm::Type *ValueType = Args[1]->getType();
160 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
161
162 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
163 Kind, Args[0], Args[1], Ordering);
164 return EmitFromInt(CGF, Result, T, ValueType);
165}
166
167static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
168 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
169 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
170
171 // Convert the type of the pointer to a pointer to the stored type.
172 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
173 Value *BC = CGF.Builder.CreateBitCast(
174 Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
175 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
176 LV.setNontemporal(true);
177 CGF.EmitStoreOfScalar(Val, LV, false);
178 return nullptr;
179}
180
181static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
182 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
183
184 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
185 LV.setNontemporal(true);
186 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
187}
188
189static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
190 llvm::AtomicRMWInst::BinOp Kind,
191 const CallExpr *E) {
192 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
193}
194
195/// Utility to insert an atomic instruction based Intrinsic::ID and
196/// the expression node, where the return value is the result of the
197/// operation.
198static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
199 llvm::AtomicRMWInst::BinOp Kind,
200 const CallExpr *E,
201 Instruction::BinaryOps Op,
202 bool Invert = false) {
203 QualType T = E->getType();
204 assert(E->getArg(0)->getType()->isPointerType());
205 assert(CGF.getContext().hasSameUnqualifiedType(T,
206 E->getArg(0)->getType()->getPointeeType()));
207 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
208
209 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
210 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
211
212 llvm::IntegerType *IntType =
213 llvm::IntegerType::get(CGF.getLLVMContext(),
214 CGF.getContext().getTypeSize(T));
215 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
216
217 llvm::Value *Args[2];
218 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
219 llvm::Type *ValueType = Args[1]->getType();
220 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
221 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
222
223 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
224 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
225 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
226 if (Invert)
227 Result =
228 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
229 llvm::ConstantInt::getAllOnesValue(IntType));
230 Result = EmitFromInt(CGF, Result, T, ValueType);
231 return RValue::get(Result);
232}
233
234/// Utility to insert an atomic cmpxchg instruction.
235///
236/// @param CGF The current codegen function.
237/// @param E Builtin call expression to convert to cmpxchg.
238/// arg0 - address to operate on
239/// arg1 - value to compare with
240/// arg2 - new value
241/// @param ReturnBool Specifies whether to return success flag of
242/// cmpxchg result or the old value.
243///
244/// @returns result of cmpxchg, according to ReturnBool
245///
246/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
247/// invoke the function EmitAtomicCmpXchgForMSIntrin.
248static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
249 bool ReturnBool) {
250 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
251 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
252 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
253
254 llvm::IntegerType *IntType = llvm::IntegerType::get(
255 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
256 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
257
258 Value *Args[3];
259 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
260 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
261 llvm::Type *ValueType = Args[1]->getType();
262 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
263 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
264
265 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
266 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
267 llvm::AtomicOrdering::SequentiallyConsistent);
268 if (ReturnBool)
269 // Extract boolean success flag and zext it to int.
270 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
271 CGF.ConvertType(E->getType()));
272 else
273 // Extract old value and emit it using the same type as compare value.
274 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
275 ValueType);
276}
277
278/// This function should be invoked to emit atomic cmpxchg for Microsoft's
279/// _InterlockedCompareExchange* intrinsics which have the following signature:
280/// T _InterlockedCompareExchange(T volatile *Destination,
281/// T Exchange,
282/// T Comparand);
283///
284/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
285/// cmpxchg *Destination, Comparand, Exchange.
286/// So we need to swap Comparand and Exchange when invoking
287/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
288/// function MakeAtomicCmpXchgValue since it expects the arguments to be
289/// already swapped.
290
291static
292Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
293 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
294 assert(E->getArg(0)->getType()->isPointerType());
295 assert(CGF.getContext().hasSameUnqualifiedType(
296 E->getType(), E->getArg(0)->getType()->getPointeeType()));
297 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
298 E->getArg(1)->getType()));
299 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
300 E->getArg(2)->getType()));
301
302 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
303 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
304 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
305
306 // For Release ordering, the failure ordering should be Monotonic.
307 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
308 AtomicOrdering::Monotonic :
309 SuccessOrdering;
310
311 // The atomic instruction is marked volatile for consistency with MSVC. This
312 // blocks the few atomics optimizations that LLVM has. If we want to optimize
313 // _Interlocked* operations in the future, we will have to remove the volatile
314 // marker.
315 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
316 Destination, Comparand, Exchange,
317 SuccessOrdering, FailureOrdering);
318 Result->setVolatile(true);
319 return CGF.Builder.CreateExtractValue(Result, 0);
320}
321
322// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
323// prototyped like this:
324//
325// unsigned char _InterlockedCompareExchange128...(
326// __int64 volatile * _Destination,
327// __int64 _ExchangeHigh,
328// __int64 _ExchangeLow,
329// __int64 * _ComparandResult);
330static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
331 const CallExpr *E,
332 AtomicOrdering SuccessOrdering) {
333 assert(E->getNumArgs() == 4);
334 llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0));
335 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
336 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
337 llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3));
338
339 assert(Destination->getType()->isPointerTy());
340 assert(!ExchangeHigh->getType()->isPointerTy());
341 assert(!ExchangeLow->getType()->isPointerTy());
342 assert(ComparandPtr->getType()->isPointerTy());
343
344 // For Release ordering, the failure ordering should be Monotonic.
345 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
346 ? AtomicOrdering::Monotonic
347 : SuccessOrdering;
348
349 // Convert to i128 pointers and values.
350 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
351 llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
352 Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy);
353 Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy),
354 CGF.getContext().toCharUnitsFromBits(128));
355
356 // (((i128)hi) << 64) | ((i128)lo)
357 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
358 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
359 ExchangeHigh =
360 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
361 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
362
363 // Load the comparand for the instruction.
364 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult);
365
366 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
367 SuccessOrdering, FailureOrdering);
368
369 // The atomic instruction is marked volatile for consistency with MSVC. This
370 // blocks the few atomics optimizations that LLVM has. If we want to optimize
371 // _Interlocked* operations in the future, we will have to remove the volatile
372 // marker.
373 CXI->setVolatile(true);
374
375 // Store the result as an outparameter.
376 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
377 ComparandResult);
378
379 // Get the success boolean and zero extend it to i8.
380 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
381 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
382}
383
384static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
385 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
386 assert(E->getArg(0)->getType()->isPointerType());
387
388 auto *IntTy = CGF.ConvertType(E->getType());
389 auto *Result = CGF.Builder.CreateAtomicRMW(
390 AtomicRMWInst::Add,
391 CGF.EmitScalarExpr(E->getArg(0)),
392 ConstantInt::get(IntTy, 1),
393 Ordering);
394 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
395}
396
397static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
398 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
399 assert(E->getArg(0)->getType()->isPointerType());
400
401 auto *IntTy = CGF.ConvertType(E->getType());
402 auto *Result = CGF.Builder.CreateAtomicRMW(
403 AtomicRMWInst::Sub,
404 CGF.EmitScalarExpr(E->getArg(0)),
405 ConstantInt::get(IntTy, 1),
406 Ordering);
407 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
408}
409
410// Build a plain volatile load.
411static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
412 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
413 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
414 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
415 llvm::Type *ITy =
416 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
417 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
418 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
419 Load->setVolatile(true);
420 return Load;
421}
422
423// Build a plain volatile store.
424static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
425 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
426 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
427 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
428 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
429 llvm::Type *ITy =
430 llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
431 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
432 llvm::StoreInst *Store =
433 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
434 Store->setVolatile(true);
435 return Store;
436}
437
438// Emit a simple mangled intrinsic that has 1 argument and a return type
439// matching the argument type. Depending on mode, this may be a constrained
440// floating-point intrinsic.
441static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
442 const CallExpr *E, unsigned IntrinsicID,
443 unsigned ConstrainedIntrinsicID) {
444 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
445
446 if (CGF.Builder.getIsFPConstrained()) {
447 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
448 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
449 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
450 } else {
451 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
452 return CGF.Builder.CreateCall(F, Src0);
453 }
454}
455
456// Emit an intrinsic that has 2 operands of the same type as its result.
457// Depending on mode, this may be a constrained floating-point intrinsic.
458static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
459 const CallExpr *E, unsigned IntrinsicID,
460 unsigned ConstrainedIntrinsicID) {
461 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
462 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
463
464 if (CGF.Builder.getIsFPConstrained()) {
465 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
466 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
467 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
468 } else {
469 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
470 return CGF.Builder.CreateCall(F, { Src0, Src1 });
471 }
472}
473
474// Emit an intrinsic that has 3 operands of the same type as its result.
475// Depending on mode, this may be a constrained floating-point intrinsic.
476static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
477 const CallExpr *E, unsigned IntrinsicID,
478 unsigned ConstrainedIntrinsicID) {
479 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
480 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
481 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
482
483 if (CGF.Builder.getIsFPConstrained()) {
484 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
485 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
486 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
487 } else {
488 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
489 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
490 }
491}
492
493// Emit an intrinsic where all operands are of the same type as the result.
494// Depending on mode, this may be a constrained floating-point intrinsic.
495static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
496 unsigned IntrinsicID,
497 unsigned ConstrainedIntrinsicID,
498 llvm::Type *Ty,
499 ArrayRef<Value *> Args) {
500 Function *F;
501 if (CGF.Builder.getIsFPConstrained())
502 F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
503 else
504 F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
505
506 if (CGF.Builder.getIsFPConstrained())
507 return CGF.Builder.CreateConstrainedFPCall(F, Args);
508 else
509 return CGF.Builder.CreateCall(F, Args);
510}
511
512// Emit a simple mangled intrinsic that has 1 argument and a return type
513// matching the argument type.
514static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
515 const CallExpr *E,
516 unsigned IntrinsicID) {
517 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
518
519 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
520 return CGF.Builder.CreateCall(F, Src0);
521}
522
523// Emit an intrinsic that has 2 operands of the same type as its result.
524static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
525 const CallExpr *E,
526 unsigned IntrinsicID) {
527 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
528 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
529
530 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
531 return CGF.Builder.CreateCall(F, { Src0, Src1 });
532}
533
534// Emit an intrinsic that has 3 operands of the same type as its result.
535static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
536 const CallExpr *E,
537 unsigned IntrinsicID) {
538 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
539 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
540 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
541
542 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
543 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
544}
545
546// Emit an intrinsic that has 1 float or double operand, and 1 integer.
547static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
548 const CallExpr *E,
549 unsigned IntrinsicID) {
550 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
551 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
552
553 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
554 return CGF.Builder.CreateCall(F, {Src0, Src1});
555}
556
557// Emit an intrinsic that has overloaded integer result and fp operand.
558static Value *
559emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
560 unsigned IntrinsicID,
561 unsigned ConstrainedIntrinsicID) {
562 llvm::Type *ResultType = CGF.ConvertType(E->getType());
563 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
564
565 if (CGF.Builder.getIsFPConstrained()) {
566 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
567 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
568 {ResultType, Src0->getType()});
569 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
570 } else {
571 Function *F =
572 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
573 return CGF.Builder.CreateCall(F, Src0);
574 }
575}
576
577/// EmitFAbs - Emit a call to @llvm.fabs().
578static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
579 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
580 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
581 Call->setDoesNotAccessMemory();
582 return Call;
583}
584
585/// Emit the computation of the sign bit for a floating point value. Returns
586/// the i1 sign bit value.
587static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
588 LLVMContext &C = CGF.CGM.getLLVMContext();
589
590 llvm::Type *Ty = V->getType();
591 int Width = Ty->getPrimitiveSizeInBits();
592 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
593 V = CGF.Builder.CreateBitCast(V, IntTy);
594 if (Ty->isPPC_FP128Ty()) {
595 // We want the sign bit of the higher-order double. The bitcast we just
596 // did works as if the double-double was stored to memory and then
597 // read as an i128. The "store" will put the higher-order double in the
598 // lower address in both little- and big-Endian modes, but the "load"
599 // will treat those bits as a different part of the i128: the low bits in
600 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
601 // we need to shift the high bits down to the low before truncating.
602 Width >>= 1;
603 if (CGF.getTarget().isBigEndian()) {
604 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
605 V = CGF.Builder.CreateLShr(V, ShiftCst);
606 }
607 // We are truncating value in order to extract the higher-order
608 // double, which we will be using to extract the sign from.
609 IntTy = llvm::IntegerType::get(C, Width);
610 V = CGF.Builder.CreateTrunc(V, IntTy);
611 }
612 Value *Zero = llvm::Constant::getNullValue(IntTy);
613 return CGF.Builder.CreateICmpSLT(V, Zero);
614}
615
616static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
617 const CallExpr *E, llvm::Constant *calleeValue) {
618 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
619 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
620}
621
622/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
623/// depending on IntrinsicID.
624///
625/// \arg CGF The current codegen function.
626/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
627/// \arg X The first argument to the llvm.*.with.overflow.*.
628/// \arg Y The second argument to the llvm.*.with.overflow.*.
629/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
630/// \returns The result (i.e. sum/product) returned by the intrinsic.
631static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
632 const llvm::Intrinsic::ID IntrinsicID,
633 llvm::Value *X, llvm::Value *Y,
634 llvm::Value *&Carry) {
635 // Make sure we have integers of the same width.
636 assert(X->getType() == Y->getType() &&
637 "Arguments must be the same type. (Did you forget to make sure both "
638 "arguments have the same integer width?)");
639
640 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
641 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
642 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
643 return CGF.Builder.CreateExtractValue(Tmp, 0);
644}
645
646static Value *emitRangedBuiltin(CodeGenFunction &CGF,
647 unsigned IntrinsicID,
648 int low, int high) {
649 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
650 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
651 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
652 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
653 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
654 return Call;
655}
656
657namespace {
658 struct WidthAndSignedness {
659 unsigned Width;
660 bool Signed;
661 };
662}
663
664static WidthAndSignedness
665getIntegerWidthAndSignedness(const clang::ASTContext &context,
666 const clang::QualType Type) {
667 assert(Type->isIntegerType() && "Given type is not an integer.");
668 unsigned Width = Type->isBooleanType() ? 1
669 : Type->isExtIntType() ? context.getIntWidth(Type)
670 : context.getTypeInfo(Type).Width;
671 bool Signed = Type->isSignedIntegerType();
672 return {Width, Signed};
673}
674
675// Given one or more integer types, this function produces an integer type that
676// encompasses them: any value in one of the given types could be expressed in
677// the encompassing type.
678static struct WidthAndSignedness
679EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
680 assert(Types.size() > 0 && "Empty list of types.");
681
682 // If any of the given types is signed, we must return a signed type.
683 bool Signed = false;
684 for (const auto &Type : Types) {
685 Signed |= Type.Signed;
686 }
687
688 // The encompassing type must have a width greater than or equal to the width
689 // of the specified types. Additionally, if the encompassing type is signed,
690 // its width must be strictly greater than the width of any unsigned types
691 // given.
692 unsigned Width = 0;
693 for (const auto &Type : Types) {
694 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
695 if (Width < MinWidth) {
696 Width = MinWidth;
697 }
698 }
699
700 return {Width, Signed};
701}
702
703Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
704 llvm::Type *DestType = Int8PtrTy;
705 if (ArgValue->getType() != DestType)
706 ArgValue =
707 Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
708
709 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
710 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
711}
712
713/// Checks if using the result of __builtin_object_size(p, @p From) in place of
714/// __builtin_object_size(p, @p To) is correct
715static bool areBOSTypesCompatible(int From, int To) {
716 // Note: Our __builtin_object_size implementation currently treats Type=0 and
717 // Type=2 identically. Encoding this implementation detail here may make
718 // improving __builtin_object_size difficult in the future, so it's omitted.
719 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
720}
721
722static llvm::Value *
723getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
724 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
725}
726
727llvm::Value *
728CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
729 llvm::IntegerType *ResType,
730 llvm::Value *EmittedE,
731 bool IsDynamic) {
732 uint64_t ObjectSize;
733 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
734 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
735 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
736}
737
738/// Returns a Value corresponding to the size of the given expression.
739/// This Value may be either of the following:
740/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
741/// it)
742/// - A call to the @llvm.objectsize intrinsic
743///
744/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
745/// and we wouldn't otherwise try to reference a pass_object_size parameter,
746/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
747llvm::Value *
748CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
749 llvm::IntegerType *ResType,
750 llvm::Value *EmittedE, bool IsDynamic) {
751 // We need to reference an argument if the pointer is a parameter with the
752 // pass_object_size attribute.
753 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
754 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
755 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
756 if (Param != nullptr && PS != nullptr &&
757 areBOSTypesCompatible(PS->getType(), Type)) {
758 auto Iter = SizeArguments.find(Param);
759 assert(Iter != SizeArguments.end());
760
761 const ImplicitParamDecl *D = Iter->second;
762 auto DIter = LocalDeclMap.find(D);
763 assert(DIter != LocalDeclMap.end());
764
765 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
766 getContext().getSizeType(), E->getBeginLoc());
767 }
768 }
769
770 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
771 // evaluate E for side-effects. In either case, we shouldn't lower to
772 // @llvm.objectsize.
773 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
774 return getDefaultBuiltinObjectSizeResult(Type, ResType);
775
776 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
777 assert(Ptr->getType()->isPointerTy() &&
778 "Non-pointer passed to __builtin_object_size?");
779
780 Function *F =
781 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
782
783 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
784 Value *Min = Builder.getInt1((Type & 2) != 0);
785 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
786 Value *NullIsUnknown = Builder.getTrue();
787 Value *Dynamic = Builder.getInt1(IsDynamic);
788 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
789}
790
791namespace {
792/// A struct to generically describe a bit test intrinsic.
793struct BitTest {
794 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
795 enum InterlockingKind : uint8_t {
796 Unlocked,
797 Sequential,
798 Acquire,
799 Release,
800 NoFence
801 };
802
803 ActionKind Action;
804 InterlockingKind Interlocking;
805 bool Is64Bit;
806
807 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
808};
809} // namespace
810
811BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
812 switch (BuiltinID) {
813 // Main portable variants.
814 case Builtin::BI_bittest:
815 return {TestOnly, Unlocked, false};
816 case Builtin::BI_bittestandcomplement:
817 return {Complement, Unlocked, false};
818 case Builtin::BI_bittestandreset:
819 return {Reset, Unlocked, false};
820 case Builtin::BI_bittestandset:
821 return {Set, Unlocked, false};
822 case Builtin::BI_interlockedbittestandreset:
823 return {Reset, Sequential, false};
824 case Builtin::BI_interlockedbittestandset:
825 return {Set, Sequential, false};
826
827 // X86-specific 64-bit variants.
828 case Builtin::BI_bittest64:
829 return {TestOnly, Unlocked, true};
830 case Builtin::BI_bittestandcomplement64:
831 return {Complement, Unlocked, true};
832 case Builtin::BI_bittestandreset64:
833 return {Reset, Unlocked, true};
834 case Builtin::BI_bittestandset64:
835 return {Set, Unlocked, true};
836 case Builtin::BI_interlockedbittestandreset64:
837 return {Reset, Sequential, true};
838 case Builtin::BI_interlockedbittestandset64:
839 return {Set, Sequential, true};
840
841 // ARM/AArch64-specific ordering variants.
842 case Builtin::BI_interlockedbittestandset_acq:
843 return {Set, Acquire, false};
844 case Builtin::BI_interlockedbittestandset_rel:
845 return {Set, Release, false};
846 case Builtin::BI_interlockedbittestandset_nf:
847 return {Set, NoFence, false};
848 case Builtin::BI_interlockedbittestandreset_acq:
849 return {Reset, Acquire, false};
850 case Builtin::BI_interlockedbittestandreset_rel:
851 return {Reset, Release, false};
852 case Builtin::BI_interlockedbittestandreset_nf:
853 return {Reset, NoFence, false};
854 }
855 llvm_unreachable("expected only bittest intrinsics");
856}
857
858static char bitActionToX86BTCode(BitTest::ActionKind A) {
859 switch (A) {
860 case BitTest::TestOnly: return '\0';
861 case BitTest::Complement: return 'c';
862 case BitTest::Reset: return 'r';
863 case BitTest::Set: return 's';
864 }
865 llvm_unreachable("invalid action");
866}
867
868static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
869 BitTest BT,
870 const CallExpr *E, Value *BitBase,
871 Value *BitPos) {
872 char Action = bitActionToX86BTCode(BT.Action);
873 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
874
875 // Build the assembly.
876 SmallString<64> Asm;
877 raw_svector_ostream AsmOS(Asm);
878 if (BT.Interlocking != BitTest::Unlocked)
879 AsmOS << "lock ";
880 AsmOS << "bt";
881 if (Action)
882 AsmOS << Action;
883 AsmOS << SizeSuffix << " $2, ($1)";
884
885 // Build the constraints. FIXME: We should support immediates when possible.
886 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
887 std::string MachineClobbers = CGF.getTarget().getClobbers();
888 if (!MachineClobbers.empty()) {
889 Constraints += ',';
890 Constraints += MachineClobbers;
891 }
892 llvm::IntegerType *IntType = llvm::IntegerType::get(
893 CGF.getLLVMContext(),
894 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
895 llvm::Type *IntPtrType = IntType->getPointerTo();
896 llvm::FunctionType *FTy =
897 llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
898
899 llvm::InlineAsm *IA =
900 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
901 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
902}
903
904static llvm::AtomicOrdering
905getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
906 switch (I) {
907 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
908 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
909 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
910 case BitTest::Release: return llvm::AtomicOrdering::Release;
911 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
912 }
913 llvm_unreachable("invalid interlocking");
914}
915
916/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
917/// bits and a bit position and read and optionally modify the bit at that
918/// position. The position index can be arbitrarily large, i.e. it can be larger
919/// than 31 or 63, so we need an indexed load in the general case.
920static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
921 unsigned BuiltinID,
922 const CallExpr *E) {
923 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
924 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
925
926 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
927
928 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
929 // indexing operation internally. Use them if possible.
930 if (CGF.getTarget().getTriple().isX86())
931 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
932
933 // Otherwise, use generic code to load one byte and test the bit. Use all but
934 // the bottom three bits as the array index, and the bottom three bits to form
935 // a mask.
936 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
937 Value *ByteIndex = CGF.Builder.CreateAShr(
938 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
939 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
940 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
941 ByteIndex, "bittest.byteaddr"),
942 CharUnits::One());
943 Value *PosLow =
944 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
945 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
946
947 // The updating instructions will need a mask.
948 Value *Mask = nullptr;
949 if (BT.Action != BitTest::TestOnly) {
950 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
951 "bittest.mask");
952 }
953
954 // Check the action and ordering of the interlocked intrinsics.
955 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
956
957 Value *OldByte = nullptr;
958 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
959 // Emit a combined atomicrmw load/store operation for the interlocked
960 // intrinsics.
961 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
962 if (BT.Action == BitTest::Reset) {
963 Mask = CGF.Builder.CreateNot(Mask);
964 RMWOp = llvm::AtomicRMWInst::And;
965 }
966 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
967 Ordering);
968 } else {
969 // Emit a plain load for the non-interlocked intrinsics.
970 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
971 Value *NewByte = nullptr;
972 switch (BT.Action) {
973 case BitTest::TestOnly:
974 // Don't store anything.
975 break;
976 case BitTest::Complement:
977 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
978 break;
979 case BitTest::Reset:
980 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
981 break;
982 case BitTest::Set:
983 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
984 break;
985 }
986 if (NewByte)
987 CGF.Builder.CreateStore(NewByte, ByteAddr);
988 }
989
990 // However we loaded the old byte, either by plain load or atomicrmw, shift
991 // the bit into the low position and mask it to 0 or 1.
992 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
993 return CGF.Builder.CreateAnd(
994 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
995}
996
997namespace {
998enum class MSVCSetJmpKind {
999 _setjmpex,
1000 _setjmp3,
1001 _setjmp
1002};
1003}
1004
1005/// MSVC handles setjmp a bit differently on different platforms. On every
1006/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1007/// parameters can be passed as variadic arguments, but we always pass none.
1008static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1009 const CallExpr *E) {
1010 llvm::Value *Arg1 = nullptr;
1011 llvm::Type *Arg1Ty = nullptr;
1012 StringRef Name;
1013 bool IsVarArg = false;
1014 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1015 Name = "_setjmp3";
1016 Arg1Ty = CGF.Int32Ty;
1017 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1018 IsVarArg = true;
1019 } else {
1020 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1021 Arg1Ty = CGF.Int8PtrTy;
1022 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1023 Arg1 = CGF.Builder.CreateCall(
1024 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1025 } else
1026 Arg1 = CGF.Builder.CreateCall(
1027 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1028 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1029 }
1030
1031 // Mark the call site and declaration with ReturnsTwice.
1032 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1033 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1034 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1035 llvm::Attribute::ReturnsTwice);
1036 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1037 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1038 ReturnsTwiceAttr, /*Local=*/true);
1039
1040 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1041 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1042 llvm::Value *Args[] = {Buf, Arg1};
1043 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1044 CB->setAttributes(ReturnsTwiceAttr);
1045 return RValue::get(CB);
1046}
1047
1048// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
1049// we handle them here.
1050enum class CodeGenFunction::MSVCIntrin {
1051 _BitScanForward,
1052 _BitScanReverse,
1053 _InterlockedAnd,
1054 _InterlockedDecrement,
1055 _InterlockedExchange,
1056 _InterlockedExchangeAdd,
1057 _InterlockedExchangeSub,
1058 _InterlockedIncrement,
1059 _InterlockedOr,
1060 _InterlockedXor,
1061 _InterlockedExchangeAdd_acq,
1062 _InterlockedExchangeAdd_rel,
1063 _InterlockedExchangeAdd_nf,
1064 _InterlockedExchange_acq,
1065 _InterlockedExchange_rel,
1066 _InterlockedExchange_nf,
1067 _InterlockedCompareExchange_acq,
1068 _InterlockedCompareExchange_rel,
1069 _InterlockedCompareExchange_nf,
1070 _InterlockedCompareExchange128,
1071 _InterlockedCompareExchange128_acq,
1072 _InterlockedCompareExchange128_rel,
1073 _InterlockedCompareExchange128_nf,
1074 _InterlockedOr_acq,
1075 _InterlockedOr_rel,
1076 _InterlockedOr_nf,
1077 _InterlockedXor_acq,
1078 _InterlockedXor_rel,
1079 _InterlockedXor_nf,
1080 _InterlockedAnd_acq,
1081 _InterlockedAnd_rel,
1082 _InterlockedAnd_nf,
1083 _InterlockedIncrement_acq,
1084 _InterlockedIncrement_rel,
1085 _InterlockedIncrement_nf,
1086 _InterlockedDecrement_acq,
1087 _InterlockedDecrement_rel,
1088 _InterlockedDecrement_nf,
1089 __fastfail,
1090};
1091
1092static Optional<CodeGenFunction::MSVCIntrin>
1093translateArmToMsvcIntrin(unsigned BuiltinID) {
1094 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1095 switch (BuiltinID) {
1096 default:
1097 return None;
1098 case ARM::BI_BitScanForward:
1099 case ARM::BI_BitScanForward64:
1100 return MSVCIntrin::_BitScanForward;
1101 case ARM::BI_BitScanReverse:
1102 case ARM::BI_BitScanReverse64:
1103 return MSVCIntrin::_BitScanReverse;
1104 case ARM::BI_InterlockedAnd64:
1105 return MSVCIntrin::_InterlockedAnd;
1106 case ARM::BI_InterlockedExchange64:
1107 return MSVCIntrin::_InterlockedExchange;
1108 case ARM::BI_InterlockedExchangeAdd64:
1109 return MSVCIntrin::_InterlockedExchangeAdd;
1110 case ARM::BI_InterlockedExchangeSub64:
1111 return MSVCIntrin::_InterlockedExchangeSub;
1112 case ARM::BI_InterlockedOr64:
1113 return MSVCIntrin::_InterlockedOr;
1114 case ARM::BI_InterlockedXor64:
1115 return MSVCIntrin::_InterlockedXor;
1116 case ARM::BI_InterlockedDecrement64:
1117 return MSVCIntrin::_InterlockedDecrement;
1118 case ARM::BI_InterlockedIncrement64:
1119 return MSVCIntrin::_InterlockedIncrement;
1120 case ARM::BI_InterlockedExchangeAdd8_acq:
1121 case ARM::BI_InterlockedExchangeAdd16_acq:
1122 case ARM::BI_InterlockedExchangeAdd_acq:
1123 case ARM::BI_InterlockedExchangeAdd64_acq:
1124 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1125 case ARM::BI_InterlockedExchangeAdd8_rel:
1126 case ARM::BI_InterlockedExchangeAdd16_rel:
1127 case ARM::BI_InterlockedExchangeAdd_rel:
1128 case ARM::BI_InterlockedExchangeAdd64_rel:
1129 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1130 case ARM::BI_InterlockedExchangeAdd8_nf:
1131 case ARM::BI_InterlockedExchangeAdd16_nf:
1132 case ARM::BI_InterlockedExchangeAdd_nf:
1133 case ARM::BI_InterlockedExchangeAdd64_nf:
1134 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1135 case ARM::BI_InterlockedExchange8_acq:
1136 case ARM::BI_InterlockedExchange16_acq:
1137 case ARM::BI_InterlockedExchange_acq:
1138 case ARM::BI_InterlockedExchange64_acq:
1139 return MSVCIntrin::_InterlockedExchange_acq;
1140 case ARM::BI_InterlockedExchange8_rel:
1141 case ARM::BI_InterlockedExchange16_rel:
1142 case ARM::BI_InterlockedExchange_rel:
1143 case ARM::BI_InterlockedExchange64_rel:
1144 return MSVCIntrin::_InterlockedExchange_rel;
1145 case ARM::BI_InterlockedExchange8_nf:
1146 case ARM::BI_InterlockedExchange16_nf:
1147 case ARM::BI_InterlockedExchange_nf:
1148 case ARM::BI_InterlockedExchange64_nf:
1149 return MSVCIntrin::_InterlockedExchange_nf;
1150 case ARM::BI_InterlockedCompareExchange8_acq:
1151 case ARM::BI_InterlockedCompareExchange16_acq:
1152 case ARM::BI_InterlockedCompareExchange_acq:
1153 case ARM::BI_InterlockedCompareExchange64_acq:
1154 return MSVCIntrin::_InterlockedCompareExchange_acq;
1155 case ARM::BI_InterlockedCompareExchange8_rel:
1156 case ARM::BI_InterlockedCompareExchange16_rel:
1157 case ARM::BI_InterlockedCompareExchange_rel:
1158 case ARM::BI_InterlockedCompareExchange64_rel:
1159 return MSVCIntrin::_InterlockedCompareExchange_rel;
1160 case ARM::BI_InterlockedCompareExchange8_nf:
1161 case ARM::BI_InterlockedCompareExchange16_nf:
1162 case ARM::BI_InterlockedCompareExchange_nf:
1163 case ARM::BI_InterlockedCompareExchange64_nf:
1164 return MSVCIntrin::_InterlockedCompareExchange_nf;
1165 case ARM::BI_InterlockedOr8_acq:
1166 case ARM::BI_InterlockedOr16_acq:
1167 case ARM::BI_InterlockedOr_acq:
1168 case ARM::BI_InterlockedOr64_acq:
1169 return MSVCIntrin::_InterlockedOr_acq;
1170 case ARM::BI_InterlockedOr8_rel:
1171 case ARM::BI_InterlockedOr16_rel:
1172 case ARM::BI_InterlockedOr_rel:
1173 case ARM::BI_InterlockedOr64_rel:
1174 return MSVCIntrin::_InterlockedOr_rel;
1175 case ARM::BI_InterlockedOr8_nf:
1176 case ARM::BI_InterlockedOr16_nf:
1177 case ARM::BI_InterlockedOr_nf:
1178 case ARM::BI_InterlockedOr64_nf:
1179 return MSVCIntrin::_InterlockedOr_nf;
1180 case ARM::BI_InterlockedXor8_acq:
1181 case ARM::BI_InterlockedXor16_acq:
1182 case ARM::BI_InterlockedXor_acq:
1183 case ARM::BI_InterlockedXor64_acq:
1184 return MSVCIntrin::_InterlockedXor_acq;
1185 case ARM::BI_InterlockedXor8_rel:
1186 case ARM::BI_InterlockedXor16_rel:
1187 case ARM::BI_InterlockedXor_rel:
1188 case ARM::BI_InterlockedXor64_rel:
1189 return MSVCIntrin::_InterlockedXor_rel;
1190 case ARM::BI_InterlockedXor8_nf:
1191 case ARM::BI_InterlockedXor16_nf:
1192 case ARM::BI_InterlockedXor_nf:
1193 case ARM::BI_InterlockedXor64_nf:
1194 return MSVCIntrin::_InterlockedXor_nf;
1195 case ARM::BI_InterlockedAnd8_acq:
1196 case ARM::BI_InterlockedAnd16_acq:
1197 case ARM::BI_InterlockedAnd_acq:
1198 case ARM::BI_InterlockedAnd64_acq:
1199 return MSVCIntrin::_InterlockedAnd_acq;
1200 case ARM::BI_InterlockedAnd8_rel:
1201 case ARM::BI_InterlockedAnd16_rel:
1202 case ARM::BI_InterlockedAnd_rel:
1203 case ARM::BI_InterlockedAnd64_rel:
1204 return MSVCIntrin::_InterlockedAnd_rel;
1205 case ARM::BI_InterlockedAnd8_nf:
1206 case ARM::BI_InterlockedAnd16_nf:
1207 case ARM::BI_InterlockedAnd_nf:
1208 case ARM::BI_InterlockedAnd64_nf:
1209 return MSVCIntrin::_InterlockedAnd_nf;
1210 case ARM::BI_InterlockedIncrement16_acq:
1211 case ARM::BI_InterlockedIncrement_acq:
1212 case ARM::BI_InterlockedIncrement64_acq:
1213 return MSVCIntrin::_InterlockedIncrement_acq;
1214 case ARM::BI_InterlockedIncrement16_rel:
1215 case ARM::BI_InterlockedIncrement_rel:
1216 case ARM::BI_InterlockedIncrement64_rel:
1217 return MSVCIntrin::_InterlockedIncrement_rel;
1218 case ARM::BI_InterlockedIncrement16_nf:
1219 case ARM::BI_InterlockedIncrement_nf:
1220 case ARM::BI_InterlockedIncrement64_nf:
1221 return MSVCIntrin::_InterlockedIncrement_nf;
1222 case ARM::BI_InterlockedDecrement16_acq:
1223 case ARM::BI_InterlockedDecrement_acq:
1224 case ARM::BI_InterlockedDecrement64_acq:
1225 return MSVCIntrin::_InterlockedDecrement_acq;
1226 case ARM::BI_InterlockedDecrement16_rel:
1227 case ARM::BI_InterlockedDecrement_rel:
1228 case ARM::BI_InterlockedDecrement64_rel:
1229 return MSVCIntrin::_InterlockedDecrement_rel;
1230 case ARM::BI_InterlockedDecrement16_nf:
1231 case ARM::BI_InterlockedDecrement_nf:
1232 case ARM::BI_InterlockedDecrement64_nf:
1233 return MSVCIntrin::_InterlockedDecrement_nf;
1234 }
1235 llvm_unreachable("must return from switch");
1236}
1237
1238static Optional<CodeGenFunction::MSVCIntrin>
1239translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
1240 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1241 switch (BuiltinID) {
1242 default:
1243 return None;
1244 case AArch64::BI_BitScanForward:
1245 case AArch64::BI_BitScanForward64:
1246 return MSVCIntrin::_BitScanForward;
1247 case AArch64::BI_BitScanReverse:
1248 case AArch64::BI_BitScanReverse64:
1249 return MSVCIntrin::_BitScanReverse;
1250 case AArch64::BI_InterlockedAnd64:
1251 return MSVCIntrin::_InterlockedAnd;
1252 case AArch64::BI_InterlockedExchange64:
1253 return MSVCIntrin::_InterlockedExchange;
1254 case AArch64::BI_InterlockedExchangeAdd64:
1255 return MSVCIntrin::_InterlockedExchangeAdd;
1256 case AArch64::BI_InterlockedExchangeSub64:
1257 return MSVCIntrin::_InterlockedExchangeSub;
1258 case AArch64::BI_InterlockedOr64:
1259 return MSVCIntrin::_InterlockedOr;
1260 case AArch64::BI_InterlockedXor64:
1261 return MSVCIntrin::_InterlockedXor;
1262 case AArch64::BI_InterlockedDecrement64:
1263 return MSVCIntrin::_InterlockedDecrement;
1264 case AArch64::BI_InterlockedIncrement64:
1265 return MSVCIntrin::_InterlockedIncrement;
1266 case AArch64::BI_InterlockedExchangeAdd8_acq:
1267 case AArch64::BI_InterlockedExchangeAdd16_acq:
1268 case AArch64::BI_InterlockedExchangeAdd_acq:
1269 case AArch64::BI_InterlockedExchangeAdd64_acq:
1270 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1271 case AArch64::BI_InterlockedExchangeAdd8_rel:
1272 case AArch64::BI_InterlockedExchangeAdd16_rel:
1273 case AArch64::BI_InterlockedExchangeAdd_rel:
1274 case AArch64::BI_InterlockedExchangeAdd64_rel:
1275 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1276 case AArch64::BI_InterlockedExchangeAdd8_nf:
1277 case AArch64::BI_InterlockedExchangeAdd16_nf:
1278 case AArch64::BI_InterlockedExchangeAdd_nf:
1279 case AArch64::BI_InterlockedExchangeAdd64_nf:
1280 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1281 case AArch64::BI_InterlockedExchange8_acq:
1282 case AArch64::BI_InterlockedExchange16_acq:
1283 case AArch64::BI_InterlockedExchange_acq:
1284 case AArch64::BI_InterlockedExchange64_acq:
1285 return MSVCIntrin::_InterlockedExchange_acq;
1286 case AArch64::BI_InterlockedExchange8_rel:
1287 case AArch64::BI_InterlockedExchange16_rel:
1288 case AArch64::BI_InterlockedExchange_rel:
1289 case AArch64::BI_InterlockedExchange64_rel:
1290 return MSVCIntrin::_InterlockedExchange_rel;
1291 case AArch64::BI_InterlockedExchange8_nf:
1292 case AArch64::BI_InterlockedExchange16_nf:
1293 case AArch64::BI_InterlockedExchange_nf:
1294 case AArch64::BI_InterlockedExchange64_nf:
1295 return MSVCIntrin::_InterlockedExchange_nf;
1296 case AArch64::BI_InterlockedCompareExchange8_acq:
1297 case AArch64::BI_InterlockedCompareExchange16_acq:
1298 case AArch64::BI_InterlockedCompareExchange_acq:
1299 case AArch64::BI_InterlockedCompareExchange64_acq:
1300 return MSVCIntrin::_InterlockedCompareExchange_acq;
1301 case AArch64::BI_InterlockedCompareExchange8_rel:
1302 case AArch64::BI_InterlockedCompareExchange16_rel:
1303 case AArch64::BI_InterlockedCompareExchange_rel:
1304 case AArch64::BI_InterlockedCompareExchange64_rel:
1305 return MSVCIntrin::_InterlockedCompareExchange_rel;
1306 case AArch64::BI_InterlockedCompareExchange8_nf:
1307 case AArch64::BI_InterlockedCompareExchange16_nf:
1308 case AArch64::BI_InterlockedCompareExchange_nf:
1309 case AArch64::BI_InterlockedCompareExchange64_nf:
1310 return MSVCIntrin::_InterlockedCompareExchange_nf;
1311 case AArch64::BI_InterlockedCompareExchange128:
1312 return MSVCIntrin::_InterlockedCompareExchange128;
1313 case AArch64::BI_InterlockedCompareExchange128_acq:
1314 return MSVCIntrin::_InterlockedCompareExchange128_acq;
1315 case AArch64::BI_InterlockedCompareExchange128_nf:
1316 return MSVCIntrin::_InterlockedCompareExchange128_nf;
1317 case AArch64::BI_InterlockedCompareExchange128_rel:
1318 return MSVCIntrin::_InterlockedCompareExchange128_rel;
1319 case AArch64::BI_InterlockedOr8_acq:
1320 case AArch64::BI_InterlockedOr16_acq:
1321 case AArch64::BI_InterlockedOr_acq:
1322 case AArch64::BI_InterlockedOr64_acq:
1323 return MSVCIntrin::_InterlockedOr_acq;
1324 case AArch64::BI_InterlockedOr8_rel:
1325 case AArch64::BI_InterlockedOr16_rel:
1326 case AArch64::BI_InterlockedOr_rel:
1327 case AArch64::BI_InterlockedOr64_rel:
1328 return MSVCIntrin::_InterlockedOr_rel;
1329 case AArch64::BI_InterlockedOr8_nf:
1330 case AArch64::BI_InterlockedOr16_nf:
1331 case AArch64::BI_InterlockedOr_nf:
1332 case AArch64::BI_InterlockedOr64_nf:
1333 return MSVCIntrin::_InterlockedOr_nf;
1334 case AArch64::BI_InterlockedXor8_acq:
1335 case AArch64::BI_InterlockedXor16_acq:
1336 case AArch64::BI_InterlockedXor_acq:
1337 case AArch64::BI_InterlockedXor64_acq:
1338 return MSVCIntrin::_InterlockedXor_acq;
1339 case AArch64::BI_InterlockedXor8_rel:
1340 case AArch64::BI_InterlockedXor16_rel:
1341 case AArch64::BI_InterlockedXor_rel:
1342 case AArch64::BI_InterlockedXor64_rel:
1343 return MSVCIntrin::_InterlockedXor_rel;
1344 case AArch64::BI_InterlockedXor8_nf:
1345 case AArch64::BI_InterlockedXor16_nf:
1346 case AArch64::BI_InterlockedXor_nf:
1347 case AArch64::BI_InterlockedXor64_nf:
1348 return MSVCIntrin::_InterlockedXor_nf;
1349 case AArch64::BI_InterlockedAnd8_acq:
1350 case AArch64::BI_InterlockedAnd16_acq:
1351 case AArch64::BI_InterlockedAnd_acq:
1352 case AArch64::BI_InterlockedAnd64_acq:
1353 return MSVCIntrin::_InterlockedAnd_acq;
1354 case AArch64::BI_InterlockedAnd8_rel:
1355 case AArch64::BI_InterlockedAnd16_rel:
1356 case AArch64::BI_InterlockedAnd_rel:
1357 case AArch64::BI_InterlockedAnd64_rel:
1358 return MSVCIntrin::_InterlockedAnd_rel;
1359 case AArch64::BI_InterlockedAnd8_nf:
1360 case AArch64::BI_InterlockedAnd16_nf:
1361 case AArch64::BI_InterlockedAnd_nf:
1362 case AArch64::BI_InterlockedAnd64_nf:
1363 return MSVCIntrin::_InterlockedAnd_nf;
1364 case AArch64::BI_InterlockedIncrement16_acq:
1365 case AArch64::BI_InterlockedIncrement_acq:
1366 case AArch64::BI_InterlockedIncrement64_acq:
1367 return MSVCIntrin::_InterlockedIncrement_acq;
1368 case AArch64::BI_InterlockedIncrement16_rel:
1369 case AArch64::BI_InterlockedIncrement_rel:
1370 case AArch64::BI_InterlockedIncrement64_rel:
1371 return MSVCIntrin::_InterlockedIncrement_rel;
1372 case AArch64::BI_InterlockedIncrement16_nf:
1373 case AArch64::BI_InterlockedIncrement_nf:
1374 case AArch64::BI_InterlockedIncrement64_nf:
1375 return MSVCIntrin::_InterlockedIncrement_nf;
1376 case AArch64::BI_InterlockedDecrement16_acq:
1377 case AArch64::BI_InterlockedDecrement_acq:
1378 case AArch64::BI_InterlockedDecrement64_acq:
1379 return MSVCIntrin::_InterlockedDecrement_acq;
1380 case AArch64::BI_InterlockedDecrement16_rel:
1381 case AArch64::BI_InterlockedDecrement_rel:
1382 case AArch64::BI_InterlockedDecrement64_rel:
1383 return MSVCIntrin::_InterlockedDecrement_rel;
1384 case AArch64::BI_InterlockedDecrement16_nf:
1385 case AArch64::BI_InterlockedDecrement_nf:
1386 case AArch64::BI_InterlockedDecrement64_nf:
1387 return MSVCIntrin::_InterlockedDecrement_nf;
1388 }
1389 llvm_unreachable("must return from switch");
1390}
1391
1392static Optional<CodeGenFunction::MSVCIntrin>
1393translateX86ToMsvcIntrin(unsigned BuiltinID) {
1394 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1395 switch (BuiltinID) {
1396 default:
1397 return None;
1398 case clang::X86::BI_BitScanForward:
1399 case clang::X86::BI_BitScanForward64:
1400 return MSVCIntrin::_BitScanForward;
1401 case clang::X86::BI_BitScanReverse:
1402 case clang::X86::BI_BitScanReverse64:
1403 return MSVCIntrin::_BitScanReverse;
1404 case clang::X86::BI_InterlockedAnd64:
1405 return MSVCIntrin::_InterlockedAnd;
1406 case clang::X86::BI_InterlockedCompareExchange128:
1407 return MSVCIntrin::_InterlockedCompareExchange128;
1408 case clang::X86::BI_InterlockedExchange64:
1409 return MSVCIntrin::_InterlockedExchange;
1410 case clang::X86::BI_InterlockedExchangeAdd64:
1411 return MSVCIntrin::_InterlockedExchangeAdd;
1412 case clang::X86::BI_InterlockedExchangeSub64:
1413 return MSVCIntrin::_InterlockedExchangeSub;
1414 case clang::X86::BI_InterlockedOr64:
1415 return MSVCIntrin::_InterlockedOr;
1416 case clang::X86::BI_InterlockedXor64:
1417 return MSVCIntrin::_InterlockedXor;
1418 case clang::X86::BI_InterlockedDecrement64:
1419 return MSVCIntrin::_InterlockedDecrement;
1420 case clang::X86::BI_InterlockedIncrement64:
1421 return MSVCIntrin::_InterlockedIncrement;
1422 }
1423 llvm_unreachable("must return from switch");
1424}
1425
1426// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1427Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
1428 const CallExpr *E) {
1429 switch (BuiltinID) {
1430 case MSVCIntrin::_BitScanForward:
1431 case MSVCIntrin::_BitScanReverse: {
1432 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1433 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1434
1435 llvm::Type *ArgType = ArgValue->getType();
1436 llvm::Type *IndexType =
1437 IndexAddress.getPointer()->getType()->getPointerElementType();
1438 llvm::Type *ResultType = ConvertType(E->getType());
1439
1440 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1441 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1442 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1443
1444 BasicBlock *Begin = Builder.GetInsertBlock();
1445 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1446 Builder.SetInsertPoint(End);
1447 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1448
1449 Builder.SetInsertPoint(Begin);
1450 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1451 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1452 Builder.CreateCondBr(IsZero, End, NotZero);
1453 Result->addIncoming(ResZero, Begin);
1454
1455 Builder.SetInsertPoint(NotZero);
1456
1457 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1458 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1459 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1460 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1461 Builder.CreateStore(ZeroCount, IndexAddress, false);
1462 } else {
1463 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1464 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1465
1466 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1467 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1468 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1469 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1470 Builder.CreateStore(Index, IndexAddress, false);
1471 }
1472 Builder.CreateBr(End);
1473 Result->addIncoming(ResOne, NotZero);
1474
1475 Builder.SetInsertPoint(End);
1476 return Result;
1477 }
1478 case MSVCIntrin::_InterlockedAnd:
1479 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1480 case MSVCIntrin::_InterlockedExchange:
1481 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1482 case MSVCIntrin::_InterlockedExchangeAdd:
1483 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1484 case MSVCIntrin::_InterlockedExchangeSub:
1485 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1486 case MSVCIntrin::_InterlockedOr:
1487 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1488 case MSVCIntrin::_InterlockedXor:
1489 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1490 case MSVCIntrin::_InterlockedExchangeAdd_acq:
1491 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1492 AtomicOrdering::Acquire);
1493 case MSVCIntrin::_InterlockedExchangeAdd_rel:
1494 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1495 AtomicOrdering::Release);
1496 case MSVCIntrin::_InterlockedExchangeAdd_nf:
1497 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1498 AtomicOrdering::Monotonic);
1499 case MSVCIntrin::_InterlockedExchange_acq:
1500 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1501 AtomicOrdering::Acquire);
1502 case MSVCIntrin::_InterlockedExchange_rel:
1503 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1504 AtomicOrdering::Release);
1505 case MSVCIntrin::_InterlockedExchange_nf:
1506 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1507 AtomicOrdering::Monotonic);
1508 case MSVCIntrin::_InterlockedCompareExchange_acq:
1509 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1510 case MSVCIntrin::_InterlockedCompareExchange_rel:
1511 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1512 case MSVCIntrin::_InterlockedCompareExchange_nf:
1513 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1514 case MSVCIntrin::_InterlockedCompareExchange128:
1515 return EmitAtomicCmpXchg128ForMSIntrin(
1516 *this, E, AtomicOrdering::SequentiallyConsistent);
1517 case MSVCIntrin::_InterlockedCompareExchange128_acq:
1518 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1519 case MSVCIntrin::_InterlockedCompareExchange128_rel:
1520 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1521 case MSVCIntrin::_InterlockedCompareExchange128_nf:
1522 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1523 case MSVCIntrin::_InterlockedOr_acq:
1524 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1525 AtomicOrdering::Acquire);
1526 case MSVCIntrin::_InterlockedOr_rel:
1527 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1528 AtomicOrdering::Release);
1529 case MSVCIntrin::_InterlockedOr_nf:
1530 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1531 AtomicOrdering::Monotonic);
1532 case MSVCIntrin::_InterlockedXor_acq:
1533 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1534 AtomicOrdering::Acquire);
1535 case MSVCIntrin::_InterlockedXor_rel:
1536 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1537 AtomicOrdering::Release);
1538 case MSVCIntrin::_InterlockedXor_nf:
1539 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1540 AtomicOrdering::Monotonic);
1541 case MSVCIntrin::_InterlockedAnd_acq:
1542 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1543 AtomicOrdering::Acquire);
1544 case MSVCIntrin::_InterlockedAnd_rel:
1545 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1546 AtomicOrdering::Release);
1547 case MSVCIntrin::_InterlockedAnd_nf:
1548 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1549 AtomicOrdering::Monotonic);
1550 case MSVCIntrin::_InterlockedIncrement_acq:
1551 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1552 case MSVCIntrin::_InterlockedIncrement_rel:
1553 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1554 case MSVCIntrin::_InterlockedIncrement_nf:
1555 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1556 case MSVCIntrin::_InterlockedDecrement_acq:
1557 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1558 case MSVCIntrin::_InterlockedDecrement_rel:
1559 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1560 case MSVCIntrin::_InterlockedDecrement_nf:
1561 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1562
1563 case MSVCIntrin::_InterlockedDecrement:
1564 return EmitAtomicDecrementValue(*this, E);
1565 case MSVCIntrin::_InterlockedIncrement:
1566 return EmitAtomicIncrementValue(*this, E);
1567
1568 case MSVCIntrin::__fastfail: {
1569 // Request immediate process termination from the kernel. The instruction
1570 // sequences to do this are documented on MSDN:
1571 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1572 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1573 StringRef Asm, Constraints;
1574 switch (ISA) {
1575 default:
1576 ErrorUnsupported(E, "__fastfail call for this architecture");
1577 break;
1578 case llvm::Triple::x86:
1579 case llvm::Triple::x86_64:
1580 Asm = "int $$0x29";
1581 Constraints = "{cx}";
1582 break;
1583 case llvm::Triple::thumb:
1584 Asm = "udf #251";
1585 Constraints = "{r0}";
1586 break;
1587 case llvm::Triple::aarch64:
1588 Asm = "brk #0xF003";
1589 Constraints = "{w0}";
1590 }
1591 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1592 llvm::InlineAsm *IA =
1593 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1594 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1595 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1596 llvm::Attribute::NoReturn);
1597 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1598 CI->setAttributes(NoReturnAttr);
1599 return CI;
1600 }
1601 }
1602 llvm_unreachable("Incorrect MSVC intrinsic!");
1603}
1604
1605namespace {
1606// ARC cleanup for __builtin_os_log_format
1607struct CallObjCArcUse final : EHScopeStack::Cleanup {
1608 CallObjCArcUse(llvm::Value *object) : object(object) {}
1609 llvm::Value *object;
1610
1611 void Emit(CodeGenFunction &CGF, Flags flags) override {
1612 CGF.EmitARCIntrinsicUse(object);
1613 }
1614};
1615}
1616
1617Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1618 BuiltinCheckKind Kind) {
1619 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
1620 && "Unsupported builtin check kind");
1621
1622 Value *ArgValue = EmitScalarExpr(E);
1623 if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1624 return ArgValue;
1625
1626 SanitizerScope SanScope(this);
1627 Value *Cond = Builder.CreateICmpNE(
1628 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1629 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1630 SanitizerHandler::InvalidBuiltin,
1631 {EmitCheckSourceLocation(E->getExprLoc()),
1632 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1633 None);
1634 return ArgValue;
1635}
1636
1637/// Get the argument type for arguments to os_log_helper.
1638static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1639 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1640 return C.getCanonicalType(UnsignedTy);
1641}
1642
1643llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1644 const analyze_os_log::OSLogBufferLayout &Layout,
1645 CharUnits BufferAlignment) {
1646 ASTContext &Ctx = getContext();
1647
1648 llvm::SmallString<64> Name;
1649 {
1650 raw_svector_ostream OS(Name);
1651 OS << "__os_log_helper";
1652 OS << "_" << BufferAlignment.getQuantity();
1653 OS << "_" << int(Layout.getSummaryByte());
1654 OS << "_" << int(Layout.getNumArgsByte());
1655 for (const auto &Item : Layout.Items)
1656 OS << "_" << int(Item.getSizeByte()) << "_"
1657 << int(Item.getDescriptorByte());
1658 }
1659
1660 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1661 return F;
1662
1663 llvm::SmallVector<QualType, 4> ArgTys;
1664 FunctionArgList Args;
1665 Args.push_back(ImplicitParamDecl::Create(
1666 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1667 ImplicitParamDecl::Other));
1668 ArgTys.emplace_back(Ctx.VoidPtrTy);
1669
1670 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1671 char Size = Layout.Items[I].getSizeByte();
1672 if (!Size)
1673 continue;
1674
1675 QualType ArgTy = getOSLogArgType(Ctx, Size);
1676 Args.push_back(ImplicitParamDecl::Create(
1677 Ctx, nullptr, SourceLocation(),
1678 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1679 ImplicitParamDecl::Other));
1680 ArgTys.emplace_back(ArgTy);
1681 }
1682
1683 QualType ReturnTy = Ctx.VoidTy;
1684 QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
1685
1686 // The helper function has linkonce_odr linkage to enable the linker to merge
1687 // identical functions. To ensure the merging always happens, 'noinline' is
1688 // attached to the function when compiling with -Oz.
1689 const CGFunctionInfo &FI =
1690 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1691 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1692 llvm::Function *Fn = llvm::Function::Create(
1693 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1694 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1695 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
1696 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1697 Fn->setDoesNotThrow();
1698
1699 // Attach 'noinline' at -Oz.
1700 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1701 Fn->addFnAttr(llvm::Attribute::NoInline);
1702
1703 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1704 IdentifierInfo *II = &Ctx.Idents.get(Name);
1705 FunctionDecl *FD = FunctionDecl::Create(
1706 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
1707 FuncionTy, nullptr, SC_PrivateExtern, false, false);
1708 // Avoid generating debug location info for the function.
1709 FD->setImplicit();
1710
1711 StartFunction(FD, ReturnTy, Fn, FI, Args);
1712
1713 // Create a scope with an artificial location for the body of this function.
1714 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1715
1716 CharUnits Offset;
1717 Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1718 BufferAlignment);
1719 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1720 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1721 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1722 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1723
1724 unsigned I = 1;
1725 for (const auto &Item : Layout.Items) {
1726 Builder.CreateStore(
1727 Builder.getInt8(Item.getDescriptorByte()),
1728 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1729 Builder.CreateStore(
1730 Builder.getInt8(Item.getSizeByte()),
1731 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1732
1733 CharUnits Size = Item.size();
1734 if (!Size.getQuantity())
1735 continue;
1736
1737 Address Arg = GetAddrOfLocalVar(Args[I]);
1738 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1739 Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1740 "argDataCast");
1741 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1742 Offset += Size;
1743 ++I;
1744 }
1745
1746 FinishFunction();
1747
1748 return Fn;
1749}
1750
1751RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1752 assert(E.getNumArgs() >= 2 &&
1753 "__builtin_os_log_format takes at least 2 arguments");
1754 ASTContext &Ctx = getContext();
1755 analyze_os_log::OSLogBufferLayout Layout;
1756 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1757 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1758 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1759
1760 // Ignore argument 1, the format string. It is not currently used.
1761 CallArgList Args;
1762 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1763
1764 for (const auto &Item : Layout.Items) {
1765 int Size = Item.getSizeByte();
1766 if (!Size)
1767 continue;
1768
1769 llvm::Value *ArgVal;
1770
1771 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1772 uint64_t Val = 0;
1773 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1774 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1775 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1776 } else if (const Expr *TheExpr = Item.getExpr()) {
1777 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1778
1779 // If a temporary object that requires destruction after the full
1780 // expression is passed, push a lifetime-extended cleanup to extend its
1781 // lifetime to the end of the enclosing block scope.
1782 auto LifetimeExtendObject = [&](const Expr *E) {
1783 E = E->IgnoreParenCasts();
1784 // Extend lifetimes of objects returned by function calls and message
1785 // sends.
1786
1787 // FIXME: We should do this in other cases in which temporaries are
1788 // created including arguments of non-ARC types (e.g., C++
1789 // temporaries).
1790 if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
1791 return true;
1792 return false;
1793 };
1794
1795 if (TheExpr->getType()->isObjCRetainableType() &&
1796 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
1797 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
1798 "Only scalar can be a ObjC retainable type");
1799 if (!isa<Constant>(ArgVal)) {
1800 CleanupKind Cleanup = getARCCleanupKind();
1801 QualType Ty = TheExpr->getType();
1802 Address Alloca = Address::invalid();
1803 Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
1804 ArgVal = EmitARCRetain(Ty, ArgVal);
1805 Builder.CreateStore(ArgVal, Addr);
1806 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
1807 CodeGenFunction::destroyARCStrongPrecise,
1808 Cleanup & EHCleanup);
1809
1810 // Push a clang.arc.use call to ensure ARC optimizer knows that the
1811 // argument has to be alive.
1812 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
1813 pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
1814 }
1815 }
1816 } else {
1817 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1818 }
1819
1820 unsigned ArgValSize =
1821 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1822 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1823 ArgValSize);
1824 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1825 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1826 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1827 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1828 Args.add(RValue::get(ArgVal), ArgTy);
1829 }
1830
1831 const CGFunctionInfo &FI =
1832 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1833 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1834 Layout, BufAddr.getAlignment());
1835 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1836 return RValue::get(BufAddr.getPointer());
1837}
1838
1839static bool isSpecialUnsignedMultiplySignedResult(
1840 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
1841 WidthAndSignedness ResultInfo) {
1842 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1843 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
1844 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
1845}
1846
1847static RValue