1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/DeclObjC.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/RecordLayout.h"
27#include "clang/AST/StmtVisitor.h"
28#include "clang/Basic/CodeGenOptions.h"
29#include "clang/Basic/TargetInfo.h"
30#include "llvm/ADT/APFixedPoint.h"
31#include "llvm/IR/CFG.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/DerivedTypes.h"
35#include "llvm/IR/FixedPointBuilder.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/GetElementPtrTypeIterator.h"
38#include "llvm/IR/GlobalVariable.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/IntrinsicsPowerPC.h"
41#include "llvm/IR/MatrixBuilder.h"
42#include "llvm/IR/Module.h"
43#include "llvm/Support/TypeSize.h"
44#include <cstdarg>
45#include <optional>
46
47using namespace clang;
48using namespace CodeGen;
49using llvm::Value;
50
51//===----------------------------------------------------------------------===//
52// Scalar Expression Emitter
53//===----------------------------------------------------------------------===//
54
55namespace {
56
57/// Determine whether the given binary operation may overflow.
58/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
59/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
60/// the returned overflow check is precise. The returned value is 'true' for
61/// all other opcodes, to be conservative.
62bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
63 BinaryOperator::Opcode Opcode, bool Signed,
64 llvm::APInt &Result) {
65 // Assume overflow is possible, unless we can prove otherwise.
66 bool Overflow = true;
67 const auto &LHSAP = LHS->getValue();
68 const auto &RHSAP = RHS->getValue();
69 if (Opcode == BO_Add) {
70 Result = Signed ? LHSAP.sadd_ov(RHS: RHSAP, Overflow)
71 : LHSAP.uadd_ov(RHS: RHSAP, Overflow);
72 } else if (Opcode == BO_Sub) {
73 Result = Signed ? LHSAP.ssub_ov(RHS: RHSAP, Overflow)
74 : LHSAP.usub_ov(RHS: RHSAP, Overflow);
75 } else if (Opcode == BO_Mul) {
76 Result = Signed ? LHSAP.smul_ov(RHS: RHSAP, Overflow)
77 : LHSAP.umul_ov(RHS: RHSAP, Overflow);
78 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
79 if (Signed && !RHS->isZero())
80 Result = LHSAP.sdiv_ov(RHS: RHSAP, Overflow);
81 else
82 return false;
83 }
84 return Overflow;
85}
86
87struct BinOpInfo {
88 Value *LHS;
89 Value *RHS;
90 QualType Ty; // Computation Type.
91 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
92 FPOptions FPFeatures;
93 const Expr *E; // Entire expr, for error unsupported. May not be binop.
94
95 /// Check if the binop can result in integer overflow.
96 bool mayHaveIntegerOverflow() const {
97 // Without constant input, we can't rule out overflow.
98 auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS);
99 auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS);
100 if (!LHSCI || !RHSCI)
101 return true;
102
103 llvm::APInt Result;
104 return ::mayHaveIntegerOverflow(
105 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
106 }
107
108 /// Check if the binop computes a division or a remainder.
109 bool isDivremOp() const {
110 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
111 Opcode == BO_RemAssign;
112 }
113
114 /// Check if the binop can result in an integer division by zero.
115 bool mayHaveIntegerDivisionByZero() const {
116 if (isDivremOp())
117 if (auto *CI = dyn_cast<llvm::ConstantInt>(Val: RHS))
118 return CI->isZero();
119 return true;
120 }
121
122 /// Check if the binop can result in a float division by zero.
123 bool mayHaveFloatDivisionByZero() const {
124 if (isDivremOp())
125 if (auto *CFP = dyn_cast<llvm::ConstantFP>(Val: RHS))
126 return CFP->isZero();
127 return true;
128 }
129
130 /// Check if at least one operand is a fixed point type. In such cases, this
131 /// operation did not follow usual arithmetic conversion and both operands
132 /// might not be of the same type.
133 bool isFixedPointOp() const {
134 // We cannot simply check the result type since comparison operations return
135 // an int.
136 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: E)) {
137 QualType LHSType = BinOp->getLHS()->getType();
138 QualType RHSType = BinOp->getRHS()->getType();
139 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
140 }
141 if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: E))
142 return UnOp->getSubExpr()->getType()->isFixedPointType();
143 return false;
144 }
145};
146
147static bool MustVisitNullValue(const Expr *E) {
148 // If a null pointer expression's type is the C++0x nullptr_t, then
149 // it's not necessarily a simple constant and it must be evaluated
150 // for its potential side effects.
151 return E->getType()->isNullPtrType();
152}
153
154/// If \p E is a widened promoted integer, get its base (unpromoted) type.
155static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
156 const Expr *E) {
157 const Expr *Base = E->IgnoreImpCasts();
158 if (E == Base)
159 return std::nullopt;
160
161 QualType BaseTy = Base->getType();
162 if (!Ctx.isPromotableIntegerType(T: BaseTy) ||
163 Ctx.getTypeSize(T: BaseTy) >= Ctx.getTypeSize(T: E->getType()))
164 return std::nullopt;
165
166 return BaseTy;
167}
168
169/// Check if \p E is a widened promoted integer.
170static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
171 return getUnwidenedIntegerType(Ctx, E).has_value();
172}
173
174/// Check if we can skip the overflow check for \p Op.
175static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
176 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
177 "Expected a unary or binary operator");
178
179 // If the binop has constant inputs and we can prove there is no overflow,
180 // we can elide the overflow check.
181 if (!Op.mayHaveIntegerOverflow())
182 return true;
183
184 // If a unary op has a widened operand, the op cannot overflow.
185 if (const auto *UO = dyn_cast<UnaryOperator>(Val: Op.E))
186 return !UO->canOverflow();
187
188 // We usually don't need overflow checks for binops with widened operands.
189 // Multiplication with promoted unsigned operands is a special case.
190 const auto *BO = cast<BinaryOperator>(Val: Op.E);
191 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
192 if (!OptionalLHSTy)
193 return false;
194
195 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
196 if (!OptionalRHSTy)
197 return false;
198
199 QualType LHSTy = *OptionalLHSTy;
200 QualType RHSTy = *OptionalRHSTy;
201
202 // This is the simple case: binops without unsigned multiplication, and with
203 // widened operands. No overflow check is needed here.
204 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
205 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
206 return true;
207
208 // For unsigned multiplication the overflow check can be elided if either one
209 // of the unpromoted types are less than half the size of the promoted type.
210 unsigned PromotedSize = Ctx.getTypeSize(T: Op.E->getType());
211 return (2 * Ctx.getTypeSize(T: LHSTy)) < PromotedSize ||
212 (2 * Ctx.getTypeSize(T: RHSTy)) < PromotedSize;
213}
214
215class ScalarExprEmitter
216 : public StmtVisitor<ScalarExprEmitter, Value*> {
217 CodeGenFunction &CGF;
218 CGBuilderTy &Builder;
219 bool IgnoreResultAssign;
220 llvm::LLVMContext &VMContext;
221public:
222
223 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
224 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
225 VMContext(cgf.getLLVMContext()) {
226 }
227
228 //===--------------------------------------------------------------------===//
229 // Utilities
230 //===--------------------------------------------------------------------===//
231
232 bool TestAndClearIgnoreResultAssign() {
233 bool I = IgnoreResultAssign;
234 IgnoreResultAssign = false;
235 return I;
236 }
237
238 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
239 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
240 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
241 return CGF.EmitCheckedLValue(E, TCK);
242 }
243
244 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
245 const BinOpInfo &Info);
246
247 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
248 return CGF.EmitLoadOfLValue(V: LV, Loc).getScalarVal();
249 }
250
251 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
252 const AlignValueAttr *AVAttr = nullptr;
253 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
254 const ValueDecl *VD = DRE->getDecl();
255
256 if (VD->getType()->isReferenceType()) {
257 if (const auto *TTy =
258 VD->getType().getNonReferenceType()->getAs<TypedefType>())
259 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
260 } else {
261 // Assumptions for function parameters are emitted at the start of the
262 // function, so there is no need to repeat that here,
263 // unless the alignment-assumption sanitizer is enabled,
264 // then we prefer the assumption over alignment attribute
265 // on IR function param.
266 if (isa<ParmVarDecl>(Val: VD) && !CGF.SanOpts.has(K: SanitizerKind::Alignment))
267 return;
268
269 AVAttr = VD->getAttr<AlignValueAttr>();
270 }
271 }
272
273 if (!AVAttr)
274 if (const auto *TTy = E->getType()->getAs<TypedefType>())
275 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
276
277 if (!AVAttr)
278 return;
279
280 Value *AlignmentValue = CGF.EmitScalarExpr(E: AVAttr->getAlignment());
281 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Val: AlignmentValue);
282 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
283 }
284
285 /// EmitLoadOfLValue - Given an expression with complex type that represents a
286 /// value l-value, this method emits the address of the l-value, then loads
287 /// and returns the result.
288 Value *EmitLoadOfLValue(const Expr *E) {
289 Value *V = EmitLoadOfLValue(LV: EmitCheckedLValue(E, TCK: CodeGenFunction::TCK_Load),
290 Loc: E->getExprLoc());
291
292 EmitLValueAlignmentAssumption(E, V);
293 return V;
294 }
295
296 /// EmitConversionToBool - Convert the specified expression value to a
297 /// boolean (i1) truth value. This is equivalent to "Val != 0".
298 Value *EmitConversionToBool(Value *Src, QualType DstTy);
299
300 /// Emit a check that a conversion from a floating-point type does not
301 /// overflow.
302 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
303 Value *Src, QualType SrcType, QualType DstType,
304 llvm::Type *DstTy, SourceLocation Loc);
305
306 /// Known implicit conversion check kinds.
307 /// Keep in sync with the enum of the same name in ubsan_handlers.h
308 enum ImplicitConversionCheckKind : unsigned char {
309 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
310 ICCK_UnsignedIntegerTruncation = 1,
311 ICCK_SignedIntegerTruncation = 2,
312 ICCK_IntegerSignChange = 3,
313 ICCK_SignedIntegerTruncationOrSignChange = 4,
314 };
315
316 /// Emit a check that an [implicit] truncation of an integer does not
317 /// discard any bits. It is not UB, so we use the value after truncation.
318 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
319 QualType DstType, SourceLocation Loc);
320
321 /// Emit a check that an [implicit] conversion of an integer does not change
322 /// the sign of the value. It is not UB, so we use the value after conversion.
323 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
324 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
325 QualType DstType, SourceLocation Loc);
326
327 /// Emit a conversion from the specified type to the specified destination
328 /// type, both of which are LLVM scalar types.
329 struct ScalarConversionOpts {
330 bool TreatBooleanAsSigned;
331 bool EmitImplicitIntegerTruncationChecks;
332 bool EmitImplicitIntegerSignChangeChecks;
333
334 ScalarConversionOpts()
335 : TreatBooleanAsSigned(false),
336 EmitImplicitIntegerTruncationChecks(false),
337 EmitImplicitIntegerSignChangeChecks(false) {}
338
339 ScalarConversionOpts(clang::SanitizerSet SanOpts)
340 : TreatBooleanAsSigned(false),
341 EmitImplicitIntegerTruncationChecks(
342 SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation)),
343 EmitImplicitIntegerSignChangeChecks(
344 SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange)) {}
345 };
346 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
347 llvm::Type *SrcTy, llvm::Type *DstTy,
348 ScalarConversionOpts Opts);
349 Value *
350 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
351 SourceLocation Loc,
352 ScalarConversionOpts Opts = ScalarConversionOpts());
353
354 /// Convert between either a fixed point and other fixed point or fixed point
355 /// and an integer.
356 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
357 SourceLocation Loc);
358
359 /// Emit a conversion from the specified complex type to the specified
360 /// destination type, where the destination type is an LLVM scalar type.
361 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
362 QualType SrcTy, QualType DstTy,
363 SourceLocation Loc);
364
365 /// EmitNullValue - Emit a value that corresponds to null for the given type.
366 Value *EmitNullValue(QualType Ty);
367
368 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
369 Value *EmitFloatToBoolConversion(Value *V) {
370 // Compare against 0.0 for fp scalars.
371 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: V->getType());
372 return Builder.CreateFCmpUNE(LHS: V, RHS: Zero, Name: "tobool");
373 }
374
375 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
376 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
377 Value *Zero = CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: V->getType()), QT);
378
379 return Builder.CreateICmpNE(LHS: V, RHS: Zero, Name: "tobool");
380 }
381
382 Value *EmitIntToBoolConversion(Value *V) {
383 // Because of the type rules of C, we often end up computing a
384 // logical value, then zero extending it to int, then wanting it
385 // as a logical value again. Optimize this common case.
386 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Val: V)) {
387 if (ZI->getOperand(i_nocapture: 0)->getType() == Builder.getInt1Ty()) {
388 Value *Result = ZI->getOperand(i_nocapture: 0);
389 // If there aren't any more uses, zap the instruction to save space.
390 // Note that there can be more uses, for example if this
391 // is the result of an assignment.
392 if (ZI->use_empty())
393 ZI->eraseFromParent();
394 return Result;
395 }
396 }
397
398 return Builder.CreateIsNotNull(Arg: V, Name: "tobool");
399 }
400
401 //===--------------------------------------------------------------------===//
402 // Visitor Methods
403 //===--------------------------------------------------------------------===//
404
405 Value *Visit(Expr *E) {
406 ApplyDebugLocation DL(CGF, E);
407 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
408 }
409
410 Value *VisitStmt(Stmt *S) {
411 S->dump(OS&: llvm::errs(), Context: CGF.getContext());
412 llvm_unreachable("Stmt can't have complex result type!");
413 }
414 Value *VisitExpr(Expr *S);
415
416 Value *VisitConstantExpr(ConstantExpr *E) {
417 // A constant expression of type 'void' generates no code and produces no
418 // value.
419 if (E->getType()->isVoidType())
420 return nullptr;
421
422 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(CE: E)) {
423 if (E->isGLValue())
424 return CGF.Builder.CreateLoad(Addr: Address(
425 Result, CGF.ConvertTypeForMem(T: E->getType()),
426 CGF.getContext().getTypeAlignInChars(E->getType())));
427 return Result;
428 }
429 return Visit(E: E->getSubExpr());
430 }
431 Value *VisitParenExpr(ParenExpr *PE) {
432 return Visit(E: PE->getSubExpr());
433 }
434 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
435 return Visit(E: E->getReplacement());
436 }
437 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
438 return Visit(E: GE->getResultExpr());
439 }
440 Value *VisitCoawaitExpr(CoawaitExpr *S) {
441 return CGF.EmitCoawaitExpr(E: *S).getScalarVal();
442 }
443 Value *VisitCoyieldExpr(CoyieldExpr *S) {
444 return CGF.EmitCoyieldExpr(E: *S).getScalarVal();
445 }
446 Value *VisitUnaryCoawait(const UnaryOperator *E) {
447 return Visit(E: E->getSubExpr());
448 }
449
450 // Leaves.
451 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
452 return Builder.getInt(AI: E->getValue());
453 }
454 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
455 return Builder.getInt(AI: E->getValue());
456 }
457 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
458 return llvm::ConstantFP::get(Context&: VMContext, V: E->getValue());
459 }
460 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
461 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
462 }
463 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
464 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
465 }
466 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
467 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
468 }
469 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
470 if (E->getType()->isVoidType())
471 return nullptr;
472
473 return EmitNullValue(Ty: E->getType());
474 }
475 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
476 return EmitNullValue(Ty: E->getType());
477 }
478 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
479 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
480 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
481 llvm::Value *V = CGF.GetAddrOfLabel(L: E->getLabel());
482 return Builder.CreateBitCast(V, DestTy: ConvertType(T: E->getType()));
483 }
484
485 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
486 return llvm::ConstantInt::get(ConvertType(T: E->getType()),E->getPackLength());
487 }
488
489 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
490 return CGF.EmitPseudoObjectRValue(e: E).getScalarVal();
491 }
492
493 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
494
495 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
496 if (E->isGLValue())
497 return EmitLoadOfLValue(LV: CGF.getOrCreateOpaqueLValueMapping(e: E),
498 Loc: E->getExprLoc());
499
500 // Otherwise, assume the mapping is the scalar directly.
501 return CGF.getOrCreateOpaqueRValueMapping(e: E).getScalarVal();
502 }
503
504 // l-values.
505 Value *VisitDeclRefExpr(DeclRefExpr *E) {
506 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(refExpr: E))
507 return CGF.emitScalarConstant(Constant, E);
508 return EmitLoadOfLValue(E);
509 }
510
511 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
512 return CGF.EmitObjCSelectorExpr(E);
513 }
514 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
515 return CGF.EmitObjCProtocolExpr(E);
516 }
517 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
518 return EmitLoadOfLValue(E);
519 }
520 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
521 if (E->getMethodDecl() &&
522 E->getMethodDecl()->getReturnType()->isReferenceType())
523 return EmitLoadOfLValue(E);
524 return CGF.EmitObjCMessageExpr(E).getScalarVal();
525 }
526
527 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
528 LValue LV = CGF.EmitObjCIsaExpr(E);
529 Value *V = CGF.EmitLoadOfLValue(V: LV, Loc: E->getExprLoc()).getScalarVal();
530 return V;
531 }
532
533 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
534 VersionTuple Version = E->getVersion();
535
536 // If we're checking for a platform older than our minimum deployment
537 // target, we can fold the check away.
538 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
539 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: 1);
540
541 return CGF.EmitBuiltinAvailable(Version);
542 }
543
544 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
545 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
546 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
547 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
548 Value *VisitMemberExpr(MemberExpr *E);
549 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
550 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
551 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
552 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
553 // literals aren't l-values in C++. We do so simply because that's the
554 // cleanest way to handle compound literals in C++.
555 // See the discussion here: https://reviews.llvm.org/D64464
556 return EmitLoadOfLValue(E);
557 }
558
559 Value *VisitInitListExpr(InitListExpr *E);
560
561 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
562 assert(CGF.getArrayInitIndex() &&
563 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
564 return CGF.getArrayInitIndex();
565 }
566
567 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
568 return EmitNullValue(Ty: E->getType());
569 }
570 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
571 CGF.CGM.EmitExplicitCastExprType(E, CGF: &CGF);
572 return VisitCastExpr(E);
573 }
574 Value *VisitCastExpr(CastExpr *E);
575
576 Value *VisitCallExpr(const CallExpr *E) {
577 if (E->getCallReturnType(Ctx: CGF.getContext())->isReferenceType())
578 return EmitLoadOfLValue(E);
579
580 Value *V = CGF.EmitCallExpr(E).getScalarVal();
581
582 EmitLValueAlignmentAssumption(E, V);
583 return V;
584 }
585
586 Value *VisitStmtExpr(const StmtExpr *E);
587
588 // Unary Operators.
589 Value *VisitUnaryPostDec(const UnaryOperator *E) {
590 LValue LV = EmitLValue(E: E->getSubExpr());
591 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: false);
592 }
593 Value *VisitUnaryPostInc(const UnaryOperator *E) {
594 LValue LV = EmitLValue(E: E->getSubExpr());
595 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: false);
596 }
597 Value *VisitUnaryPreDec(const UnaryOperator *E) {
598 LValue LV = EmitLValue(E: E->getSubExpr());
599 return EmitScalarPrePostIncDec(E, LV, isInc: false, isPre: true);
600 }
601 Value *VisitUnaryPreInc(const UnaryOperator *E) {
602 LValue LV = EmitLValue(E: E->getSubExpr());
603 return EmitScalarPrePostIncDec(E, LV, isInc: true, isPre: true);
604 }
605
606 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
607 llvm::Value *InVal,
608 bool IsInc);
609
610 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
611 bool isInc, bool isPre);
612
613
614 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
615 if (isa<MemberPointerType>(E->getType())) // never sugared
616 return CGF.CGM.getMemberPointerConstant(e: E);
617
618 return EmitLValue(E: E->getSubExpr()).getPointer(CGF);
619 }
620 Value *VisitUnaryDeref(const UnaryOperator *E) {
621 if (E->getType()->isVoidType())
622 return Visit(E: E->getSubExpr()); // the actual value should be unused
623 return EmitLoadOfLValue(E);
624 }
625
626 Value *VisitUnaryPlus(const UnaryOperator *E,
627 QualType PromotionType = QualType());
628 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
629 Value *VisitUnaryMinus(const UnaryOperator *E,
630 QualType PromotionType = QualType());
631 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
632
633 Value *VisitUnaryNot (const UnaryOperator *E);
634 Value *VisitUnaryLNot (const UnaryOperator *E);
635 Value *VisitUnaryReal(const UnaryOperator *E,
636 QualType PromotionType = QualType());
637 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
638 Value *VisitUnaryImag(const UnaryOperator *E,
639 QualType PromotionType = QualType());
640 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
641 Value *VisitUnaryExtension(const UnaryOperator *E) {
642 return Visit(E: E->getSubExpr());
643 }
644
645 // C++
646 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
647 return EmitLoadOfLValue(E);
648 }
649 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
650 auto &Ctx = CGF.getContext();
651 APValue Evaluated =
652 SLE->EvaluateInContext(Ctx, DefaultExpr: CGF.CurSourceLocExprScope.getDefaultExpr());
653 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
654 SLE->getType());
655 }
656
657 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
658 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
659 return Visit(E: DAE->getExpr());
660 }
661 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
662 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
663 return Visit(E: DIE->getExpr());
664 }
665 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
666 return CGF.LoadCXXThis();
667 }
668
669 Value *VisitExprWithCleanups(ExprWithCleanups *E);
670 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
671 return CGF.EmitCXXNewExpr(E);
672 }
673 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
674 CGF.EmitCXXDeleteExpr(E);
675 return nullptr;
676 }
677
678 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
679 return llvm::ConstantInt::get(ConvertType(T: E->getType()), E->getValue());
680 }
681
682 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
683 return Builder.getInt1(V: E->isSatisfied());
684 }
685
686 Value *VisitRequiresExpr(const RequiresExpr *E) {
687 return Builder.getInt1(V: E->isSatisfied());
688 }
689
690 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
691 return llvm::ConstantInt::get(Ty: Builder.getInt32Ty(), V: E->getValue());
692 }
693
694 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
695 return llvm::ConstantInt::get(Ty: Builder.getInt1Ty(), V: E->getValue());
696 }
697
698 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
699 // C++ [expr.pseudo]p1:
700 // The result shall only be used as the operand for the function call
701 // operator (), and the result of such a call has type void. The only
702 // effect is the evaluation of the postfix-expression before the dot or
703 // arrow.
704 CGF.EmitScalarExpr(E: E->getBase());
705 return nullptr;
706 }
707
708 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
709 return EmitNullValue(Ty: E->getType());
710 }
711
712 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
713 CGF.EmitCXXThrowExpr(E);
714 return nullptr;
715 }
716
717 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
718 return Builder.getInt1(V: E->getValue());
719 }
720
721 // Binary Operators.
722 Value *EmitMul(const BinOpInfo &Ops) {
723 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
724 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
725 case LangOptions::SOB_Defined:
726 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
727 case LangOptions::SOB_Undefined:
728 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
729 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
730 [[fallthrough]];
731 case LangOptions::SOB_Trapping:
732 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
733 return Builder.CreateNSWMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
734 return EmitOverflowCheckedBinOp(Ops);
735 }
736 }
737
738 if (Ops.Ty->isConstantMatrixType()) {
739 llvm::MatrixBuilder MB(Builder);
740 // We need to check the types of the operands of the operator to get the
741 // correct matrix dimensions.
742 auto *BO = cast<BinaryOperator>(Val: Ops.E);
743 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
744 Val: BO->getLHS()->getType().getCanonicalType());
745 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
746 Val: BO->getRHS()->getType().getCanonicalType());
747 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
748 if (LHSMatTy && RHSMatTy)
749 return MB.CreateMatrixMultiply(LHS: Ops.LHS, RHS: Ops.RHS, LHSRows: LHSMatTy->getNumRows(),
750 LHSColumns: LHSMatTy->getNumColumns(),
751 RHSColumns: RHSMatTy->getNumColumns());
752 return MB.CreateScalarMultiply(LHS: Ops.LHS, RHS: Ops.RHS);
753 }
754
755 if (Ops.Ty->isUnsignedIntegerType() &&
756 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
757 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: Ops))
758 return EmitOverflowCheckedBinOp(Ops);
759
760 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
761 // Preserve the old values
762 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
763 return Builder.CreateFMul(L: Ops.LHS, R: Ops.RHS, Name: "mul");
764 }
765 if (Ops.isFixedPointOp())
766 return EmitFixedPointBinOp(Ops);
767 return Builder.CreateMul(LHS: Ops.LHS, RHS: Ops.RHS, Name: "mul");
768 }
769 /// Create a binary op that checks for overflow.
770 /// Currently only supports +, - and *.
771 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
772
773 // Check for undefined division and modulus behaviors.
774 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
775 llvm::Value *Zero,bool isDiv);
776 // Common helper for getting how wide LHS of shift is.
777 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS);
778
779 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
780 // non powers of two.
781 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
782
783 Value *EmitDiv(const BinOpInfo &Ops);
784 Value *EmitRem(const BinOpInfo &Ops);
785 Value *EmitAdd(const BinOpInfo &Ops);
786 Value *EmitSub(const BinOpInfo &Ops);
787 Value *EmitShl(const BinOpInfo &Ops);
788 Value *EmitShr(const BinOpInfo &Ops);
789 Value *EmitAnd(const BinOpInfo &Ops) {
790 return Builder.CreateAnd(LHS: Ops.LHS, RHS: Ops.RHS, Name: "and");
791 }
792 Value *EmitXor(const BinOpInfo &Ops) {
793 return Builder.CreateXor(LHS: Ops.LHS, RHS: Ops.RHS, Name: "xor");
794 }
795 Value *EmitOr (const BinOpInfo &Ops) {
796 return Builder.CreateOr(LHS: Ops.LHS, RHS: Ops.RHS, Name: "or");
797 }
798
799 // Helper functions for fixed point binary operations.
800 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
801
802 BinOpInfo EmitBinOps(const BinaryOperator *E,
803 QualType PromotionTy = QualType());
804
805 Value *EmitPromotedValue(Value *result, QualType PromotionType);
806 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
807 Value *EmitPromoted(const Expr *E, QualType PromotionType);
808
809 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
810 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
811 Value *&Result);
812
813 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
814 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
815
816 QualType getPromotionType(QualType Ty) {
817 const auto &Ctx = CGF.getContext();
818 if (auto *CT = Ty->getAs<ComplexType>()) {
819 QualType ElementType = CT->getElementType();
820 if (ElementType.UseExcessPrecision(Ctx))
821 return Ctx.getComplexType(Ctx.FloatTy);
822 }
823
824 if (Ty.UseExcessPrecision(Ctx)) {
825 if (auto *VT = Ty->getAs<VectorType>()) {
826 unsigned NumElements = VT->getNumElements();
827 return Ctx.getVectorType(VectorType: Ctx.FloatTy, NumElts: NumElements, VecKind: VT->getVectorKind());
828 }
829 return Ctx.FloatTy;
830 }
831
832 return QualType();
833 }
834
835 // Binary operators and binary compound assignment operators.
836#define HANDLEBINOP(OP) \
837 Value *VisitBin##OP(const BinaryOperator *E) { \
838 QualType promotionTy = getPromotionType(E->getType()); \
839 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
840 if (result && !promotionTy.isNull()) \
841 result = EmitUnPromotedValue(result, E->getType()); \
842 return result; \
843 } \
844 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
845 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
846 }
847 HANDLEBINOP(Mul)
848 HANDLEBINOP(Div)
849 HANDLEBINOP(Rem)
850 HANDLEBINOP(Add)
851 HANDLEBINOP(Sub)
852 HANDLEBINOP(Shl)
853 HANDLEBINOP(Shr)
854 HANDLEBINOP(And)
855 HANDLEBINOP(Xor)
856 HANDLEBINOP(Or)
857#undef HANDLEBINOP
858
859 // Comparisons.
860 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
861 llvm::CmpInst::Predicate SICmpOpc,
862 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
863#define VISITCOMP(CODE, UI, SI, FP, SIG) \
864 Value *VisitBin##CODE(const BinaryOperator *E) { \
865 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
866 llvm::FCmpInst::FP, SIG); }
867 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
868 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
869 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
870 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
871 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
872 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
873#undef VISITCOMP
874
875 Value *VisitBinAssign (const BinaryOperator *E);
876
877 Value *VisitBinLAnd (const BinaryOperator *E);
878 Value *VisitBinLOr (const BinaryOperator *E);
879 Value *VisitBinComma (const BinaryOperator *E);
880
881 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
882 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
883
884 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
885 return Visit(E: E->getSemanticForm());
886 }
887
888 // Other Operators.
889 Value *VisitBlockExpr(const BlockExpr *BE);
890 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
891 Value *VisitChooseExpr(ChooseExpr *CE);
892 Value *VisitVAArgExpr(VAArgExpr *VE);
893 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
894 return CGF.EmitObjCStringLiteral(E);
895 }
896 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
897 return CGF.EmitObjCBoxedExpr(E);
898 }
899 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
900 return CGF.EmitObjCArrayLiteral(E);
901 }
902 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
903 return CGF.EmitObjCDictionaryLiteral(E);
904 }
905 Value *VisitAsTypeExpr(AsTypeExpr *CE);
906 Value *VisitAtomicExpr(AtomicExpr *AE);
907 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
908 return Visit(E: E->getSelectedExpr());
909 }
910};
911} // end anonymous namespace.
912
913//===----------------------------------------------------------------------===//
914// Utilities
915//===----------------------------------------------------------------------===//
916
917/// EmitConversionToBool - Convert the specified expression value to a
918/// boolean (i1) truth value. This is equivalent to "Val != 0".
919Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
920 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
921
922 if (SrcType->isRealFloatingType())
923 return EmitFloatToBoolConversion(V: Src);
924
925 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(Val&: SrcType))
926 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr: Src, MPT);
927
928 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
929 "Unknown scalar type to convert");
930
931 if (isa<llvm::IntegerType>(Val: Src->getType()))
932 return EmitIntToBoolConversion(V: Src);
933
934 assert(isa<llvm::PointerType>(Src->getType()));
935 return EmitPointerToBoolConversion(V: Src, QT: SrcType);
936}
937
938void ScalarExprEmitter::EmitFloatConversionCheck(
939 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
940 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
941 assert(SrcType->isFloatingType() && "not a conversion from floating point");
942 if (!isa<llvm::IntegerType>(Val: DstTy))
943 return;
944
945 CodeGenFunction::SanitizerScope SanScope(&CGF);
946 using llvm::APFloat;
947 using llvm::APSInt;
948
949 llvm::Value *Check = nullptr;
950 const llvm::fltSemantics &SrcSema =
951 CGF.getContext().getFloatTypeSemantics(T: OrigSrcType);
952
953 // Floating-point to integer. This has undefined behavior if the source is
954 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
955 // to an integer).
956 unsigned Width = CGF.getContext().getIntWidth(T: DstType);
957 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
958
959 APSInt Min = APSInt::getMinValue(numBits: Width, Unsigned);
960 APFloat MinSrc(SrcSema, APFloat::uninitialized);
961 if (MinSrc.convertFromAPInt(Input: Min, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
962 APFloat::opOverflow)
963 // Don't need an overflow check for lower bound. Just check for
964 // -Inf/NaN.
965 MinSrc = APFloat::getInf(Sem: SrcSema, Negative: true);
966 else
967 // Find the largest value which is too small to represent (before
968 // truncation toward zero).
969 MinSrc.subtract(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardNegative);
970
971 APSInt Max = APSInt::getMaxValue(numBits: Width, Unsigned);
972 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
973 if (MaxSrc.convertFromAPInt(Input: Max, IsSigned: !Unsigned, RM: APFloat::rmTowardZero) &
974 APFloat::opOverflow)
975 // Don't need an overflow check for upper bound. Just check for
976 // +Inf/NaN.
977 MaxSrc = APFloat::getInf(Sem: SrcSema, Negative: false);
978 else
979 // Find the smallest value which is too large to represent (before
980 // truncation toward zero).
981 MaxSrc.add(RHS: APFloat(SrcSema, 1), RM: APFloat::rmTowardPositive);
982
983 // If we're converting from __half, convert the range to float to match
984 // the type of src.
985 if (OrigSrcType->isHalfType()) {
986 const llvm::fltSemantics &Sema =
987 CGF.getContext().getFloatTypeSemantics(T: SrcType);
988 bool IsInexact;
989 MinSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
990 MaxSrc.convert(ToSemantics: Sema, RM: APFloat::rmTowardZero, losesInfo: &IsInexact);
991 }
992
993 llvm::Value *GE =
994 Builder.CreateFCmpOGT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MinSrc));
995 llvm::Value *LE =
996 Builder.CreateFCmpOLT(LHS: Src, RHS: llvm::ConstantFP::get(Context&: VMContext, V: MaxSrc));
997 Check = Builder.CreateAnd(LHS: GE, RHS: LE);
998
999 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1000 CGF.EmitCheckTypeDescriptor(T: OrigSrcType),
1001 CGF.EmitCheckTypeDescriptor(T: DstType)};
1002 CGF.EmitCheck(Checked: std::make_pair(x&: Check, y: SanitizerKind::FloatCastOverflow),
1003 Check: SanitizerHandler::FloatCastOverflow, StaticArgs, DynamicArgs: OrigSrc);
1004}
1005
1006// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1007// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1008static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1009 std::pair<llvm::Value *, SanitizerMask>>
1010EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1011 QualType DstType, CGBuilderTy &Builder) {
1012 llvm::Type *SrcTy = Src->getType();
1013 llvm::Type *DstTy = Dst->getType();
1014 (void)DstTy; // Only used in assert()
1015
1016 // This should be truncation of integral types.
1017 assert(Src != Dst);
1018 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1019 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1020 "non-integer llvm type");
1021
1022 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1023 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1024
1025 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1026 // Else, it is a signed truncation.
1027 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1028 SanitizerMask Mask;
1029 if (!SrcSigned && !DstSigned) {
1030 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1031 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
1032 } else {
1033 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1034 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
1035 }
1036
1037 llvm::Value *Check = nullptr;
1038 // 1. Extend the truncated value back to the same width as the Src.
1039 Check = Builder.CreateIntCast(V: Dst, DestTy: SrcTy, isSigned: DstSigned, Name: "anyext");
1040 // 2. Equality-compare with the original source value
1041 Check = Builder.CreateICmpEQ(LHS: Check, RHS: Src, Name: "truncheck");
1042 // If the comparison result is 'i1 false', then the truncation was lossy.
1043 return std::make_pair(x&: Kind, y: std::make_pair(x&: Check, y&: Mask));
1044}
1045
1046static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1047 QualType SrcType, QualType DstType) {
1048 return SrcType->isIntegerType() && DstType->isIntegerType();
1049}
1050
1051void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1052 Value *Dst, QualType DstType,
1053 SourceLocation Loc) {
1054 if (!CGF.SanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation))
1055 return;
1056
1057 // We only care about int->int conversions here.
1058 // We ignore conversions to/from pointer and/or bool.
1059 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1060 DstType))
1061 return;
1062
1063 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1064 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1065 // This must be truncation. Else we do not care.
1066 if (SrcBits <= DstBits)
1067 return;
1068
1069 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1070
1071 // If the integer sign change sanitizer is enabled,
1072 // and we are truncating from larger unsigned type to smaller signed type,
1073 // let that next sanitizer deal with it.
1074 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1075 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1076 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange) &&
1077 (!SrcSigned && DstSigned))
1078 return;
1079
1080 CodeGenFunction::SanitizerScope SanScope(&CGF);
1081
1082 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1083 std::pair<llvm::Value *, SanitizerMask>>
1084 Check =
1085 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1086 // If the comparison result is 'i1 false', then the truncation was lossy.
1087
1088 // Do we care about this type of truncation?
1089 if (!CGF.SanOpts.has(K: Check.second.second))
1090 return;
1091
1092 llvm::Constant *StaticArgs[] = {
1093 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1094 CGF.EmitCheckTypeDescriptor(T: DstType),
1095 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: Check.first)};
1096 CGF.EmitCheck(Checked: Check.second, Check: SanitizerHandler::ImplicitConversion, StaticArgs,
1097 DynamicArgs: {Src, Dst});
1098}
1099
1100// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1101// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1102static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1103 std::pair<llvm::Value *, SanitizerMask>>
1104EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1105 QualType DstType, CGBuilderTy &Builder) {
1106 llvm::Type *SrcTy = Src->getType();
1107 llvm::Type *DstTy = Dst->getType();
1108
1109 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1110 "non-integer llvm type");
1111
1112 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1113 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1114 (void)SrcSigned; // Only used in assert()
1115 (void)DstSigned; // Only used in assert()
1116 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1117 unsigned DstBits = DstTy->getScalarSizeInBits();
1118 (void)SrcBits; // Only used in assert()
1119 (void)DstBits; // Only used in assert()
1120
1121 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1122 "either the widths should be different, or the signednesses.");
1123
1124 // NOTE: zero value is considered to be non-negative.
1125 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1126 const char *Name) -> Value * {
1127 // Is this value a signed type?
1128 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1129 llvm::Type *VTy = V->getType();
1130 if (!VSigned) {
1131 // If the value is unsigned, then it is never negative.
1132 // FIXME: can we encounter non-scalar VTy here?
1133 return llvm::ConstantInt::getFalse(Context&: VTy->getContext());
1134 }
1135 // Get the zero of the same type with which we will be comparing.
1136 llvm::Constant *Zero = llvm::ConstantInt::get(Ty: VTy, V: 0);
1137 // %V.isnegative = icmp slt %V, 0
1138 // I.e is %V *strictly* less than zero, does it have negative value?
1139 return Builder.CreateICmp(P: llvm::ICmpInst::ICMP_SLT, LHS: V, RHS: Zero,
1140 Name: llvm::Twine(Name) + "." + V->getName() +
1141 ".negativitycheck");
1142 };
1143
1144 // 1. Was the old Value negative?
1145 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1146 // 2. Is the new Value negative?
1147 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1148 // 3. Now, was the 'negativity status' preserved during the conversion?
1149 // NOTE: conversion from negative to zero is considered to change the sign.
1150 // (We want to get 'false' when the conversion changed the sign)
1151 // So we should just equality-compare the negativity statuses.
1152 llvm::Value *Check = nullptr;
1153 Check = Builder.CreateICmpEQ(LHS: SrcIsNegative, RHS: DstIsNegative, Name: "signchangecheck");
1154 // If the comparison result is 'false', then the conversion changed the sign.
1155 return std::make_pair(
1156 x: ScalarExprEmitter::ICCK_IntegerSignChange,
1157 y: std::make_pair(x&: Check, y: SanitizerKind::ImplicitIntegerSignChange));
1158}
1159
1160void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1161 Value *Dst, QualType DstType,
1162 SourceLocation Loc) {
1163 if (!CGF.SanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange))
1164 return;
1165
1166 llvm::Type *SrcTy = Src->getType();
1167 llvm::Type *DstTy = Dst->getType();
1168
1169 // We only care about int->int conversions here.
1170 // We ignore conversions to/from pointer and/or bool.
1171 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1172 DstType))
1173 return;
1174
1175 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1176 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1177 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1178 unsigned DstBits = DstTy->getScalarSizeInBits();
1179
1180 // Now, we do not need to emit the check in *all* of the cases.
1181 // We can avoid emitting it in some obvious cases where it would have been
1182 // dropped by the opt passes (instcombine) always anyways.
1183 // If it's a cast between effectively the same type, no check.
1184 // NOTE: this is *not* equivalent to checking the canonical types.
1185 if (SrcSigned == DstSigned && SrcBits == DstBits)
1186 return;
1187 // At least one of the values needs to have signed type.
1188 // If both are unsigned, then obviously, neither of them can be negative.
1189 if (!SrcSigned && !DstSigned)
1190 return;
1191 // If the conversion is to *larger* *signed* type, then no check is needed.
1192 // Because either sign-extension happens (so the sign will remain),
1193 // or zero-extension will happen (the sign bit will be zero.)
1194 if ((DstBits > SrcBits) && DstSigned)
1195 return;
1196 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1197 (SrcBits > DstBits) && SrcSigned) {
1198 // If the signed integer truncation sanitizer is enabled,
1199 // and this is a truncation from signed type, then no check is needed.
1200 // Because here sign change check is interchangeable with truncation check.
1201 return;
1202 }
1203 // That's it. We can't rule out any more cases with the data we have.
1204
1205 CodeGenFunction::SanitizerScope SanScope(&CGF);
1206
1207 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1208 std::pair<llvm::Value *, SanitizerMask>>
1209 Check;
1210
1211 // Each of these checks needs to return 'false' when an issue was detected.
1212 ImplicitConversionCheckKind CheckKind;
1213 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1214 // So we can 'and' all the checks together, and still get 'false',
1215 // if at least one of the checks detected an issue.
1216
1217 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1218 CheckKind = Check.first;
1219 Checks.emplace_back(Args&: Check.second);
1220
1221 if (CGF.SanOpts.has(K: SanitizerKind::ImplicitSignedIntegerTruncation) &&
1222 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1223 // If the signed integer truncation sanitizer was enabled,
1224 // and we are truncating from larger unsigned type to smaller signed type,
1225 // let's handle the case we skipped in that check.
1226 Check =
1227 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1228 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1229 Checks.emplace_back(Args&: Check.second);
1230 // If the comparison result is 'i1 false', then the truncation was lossy.
1231 }
1232
1233 llvm::Constant *StaticArgs[] = {
1234 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(T: SrcType),
1235 CGF.EmitCheckTypeDescriptor(T: DstType),
1236 llvm::ConstantInt::get(Ty: Builder.getInt8Ty(), V: CheckKind)};
1237 // EmitCheck() will 'and' all the checks together.
1238 CGF.EmitCheck(Checked: Checks, Check: SanitizerHandler::ImplicitConversion, StaticArgs,
1239 DynamicArgs: {Src, Dst});
1240}
1241
1242Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1243 QualType DstType, llvm::Type *SrcTy,
1244 llvm::Type *DstTy,
1245 ScalarConversionOpts Opts) {
1246 // The Element types determine the type of cast to perform.
1247 llvm::Type *SrcElementTy;
1248 llvm::Type *DstElementTy;
1249 QualType SrcElementType;
1250 QualType DstElementType;
1251 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1252 SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1253 DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1254 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1255 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1256 } else {
1257 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1258 "cannot cast between matrix and non-matrix types");
1259 SrcElementTy = SrcTy;
1260 DstElementTy = DstTy;
1261 SrcElementType = SrcType;
1262 DstElementType = DstType;
1263 }
1264
1265 if (isa<llvm::IntegerType>(Val: SrcElementTy)) {
1266 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1267 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1268 InputSigned = true;
1269 }
1270
1271 if (isa<llvm::IntegerType>(Val: DstElementTy))
1272 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1273 if (InputSigned)
1274 return Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1275 return Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
1276 }
1277
1278 if (isa<llvm::IntegerType>(Val: DstElementTy)) {
1279 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1280 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1281
1282 // If we can't recognize overflow as undefined behavior, assume that
1283 // overflow saturates. This protects against normal optimizations if we are
1284 // compiling with non-standard FP semantics.
1285 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1286 llvm::Intrinsic::ID IID =
1287 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1288 return Builder.CreateCall(Callee: CGF.CGM.getIntrinsic(IID, Tys: {DstTy, SrcTy}), Args: Src);
1289 }
1290
1291 if (IsSigned)
1292 return Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
1293 return Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
1294 }
1295
1296 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1297 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1298 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1299}
1300
1301/// Emit a conversion from the specified type to the specified destination type,
1302/// both of which are LLVM scalar types.
1303Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1304 QualType DstType,
1305 SourceLocation Loc,
1306 ScalarConversionOpts Opts) {
1307 // All conversions involving fixed point types should be handled by the
1308 // EmitFixedPoint family functions. This is done to prevent bloating up this
1309 // function more, and although fixed point numbers are represented by
1310 // integers, we do not want to follow any logic that assumes they should be
1311 // treated as integers.
1312 // TODO(leonardchan): When necessary, add another if statement checking for
1313 // conversions to fixed point types from other types.
1314 if (SrcType->isFixedPointType()) {
1315 if (DstType->isBooleanType())
1316 // It is important that we check this before checking if the dest type is
1317 // an integer because booleans are technically integer types.
1318 // We do not need to check the padding bit on unsigned types if unsigned
1319 // padding is enabled because overflow into this bit is undefined
1320 // behavior.
1321 return Builder.CreateIsNotNull(Arg: Src, Name: "tobool");
1322 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1323 DstType->isRealFloatingType())
1324 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1325
1326 llvm_unreachable(
1327 "Unhandled scalar conversion from a fixed point type to another type.");
1328 } else if (DstType->isFixedPointType()) {
1329 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1330 // This also includes converting booleans and enums to fixed point types.
1331 return EmitFixedPointConversion(Src, SrcTy: SrcType, DstTy: DstType, Loc);
1332
1333 llvm_unreachable(
1334 "Unhandled scalar conversion to a fixed point type from another type.");
1335 }
1336
1337 QualType NoncanonicalSrcType = SrcType;
1338 QualType NoncanonicalDstType = DstType;
1339
1340 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1341 DstType = CGF.getContext().getCanonicalType(T: DstType);
1342 if (SrcType == DstType) return Src;
1343
1344 if (DstType->isVoidType()) return nullptr;
1345
1346 llvm::Value *OrigSrc = Src;
1347 QualType OrigSrcType = SrcType;
1348 llvm::Type *SrcTy = Src->getType();
1349
1350 // Handle conversions to bool first, they are special: comparisons against 0.
1351 if (DstType->isBooleanType())
1352 return EmitConversionToBool(Src, SrcType);
1353
1354 llvm::Type *DstTy = ConvertType(T: DstType);
1355
1356 // Cast from half through float if half isn't a native type.
1357 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1358 // Cast to FP using the intrinsic if the half type itself isn't supported.
1359 if (DstTy->isFloatingPointTy()) {
1360 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1361 return Builder.CreateCall(
1362 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1363 Src);
1364 } else {
1365 // Cast to other types through float, using either the intrinsic or FPExt,
1366 // depending on whether the half type itself is supported
1367 // (as opposed to operations on half, available with NativeHalfType).
1368 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1369 Src = Builder.CreateCall(
1370 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1371 CGF.CGM.FloatTy),
1372 Src);
1373 } else {
1374 Src = Builder.CreateFPExt(V: Src, DestTy: CGF.CGM.FloatTy, Name: "conv");
1375 }
1376 SrcType = CGF.getContext().FloatTy;
1377 SrcTy = CGF.FloatTy;
1378 }
1379 }
1380
1381 // Ignore conversions like int -> uint.
1382 if (SrcTy == DstTy) {
1383 if (Opts.EmitImplicitIntegerSignChangeChecks)
1384 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Src,
1385 DstType: NoncanonicalDstType, Loc);
1386
1387 return Src;
1388 }
1389
1390 // Handle pointer conversions next: pointers can only be converted to/from
1391 // other pointers and integers. Check for pointer types in terms of LLVM, as
1392 // some native types (like Obj-C id) may map to a pointer type.
1393 if (auto DstPT = dyn_cast<llvm::PointerType>(Val: DstTy)) {
1394 // The source value may be an integer, or a pointer.
1395 if (isa<llvm::PointerType>(Val: SrcTy))
1396 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name: "conv");
1397
1398 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1399 // First, convert to the correct width so that we control the kind of
1400 // extension.
1401 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1402 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1403 llvm::Value* IntResult =
1404 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
1405 // Then, cast to pointer.
1406 return Builder.CreateIntToPtr(V: IntResult, DestTy: DstTy, Name: "conv");
1407 }
1408
1409 if (isa<llvm::PointerType>(Val: SrcTy)) {
1410 // Must be an ptr to int cast.
1411 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1412 return Builder.CreatePtrToInt(V: Src, DestTy: DstTy, Name: "conv");
1413 }
1414
1415 // A scalar can be splatted to an extended vector of the same element type
1416 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1417 // Sema should add casts to make sure that the source expression's type is
1418 // the same as the vector's element type (sans qualifiers)
1419 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1420 SrcType.getTypePtr() &&
1421 "Splatted expr doesn't match with vector element type?");
1422
1423 // Splat the element across to all elements
1424 unsigned NumElements = cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements();
1425 return Builder.CreateVectorSplat(NumElts: NumElements, V: Src, Name: "splat");
1426 }
1427
1428 if (SrcType->isMatrixType() && DstType->isMatrixType())
1429 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1430
1431 if (isa<llvm::VectorType>(Val: SrcTy) || isa<llvm::VectorType>(Val: DstTy)) {
1432 // Allow bitcast from vector to integer/fp of the same size.
1433 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1434 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1435 if (SrcSize == DstSize)
1436 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name: "conv");
1437
1438 // Conversions between vectors of different sizes are not allowed except
1439 // when vectors of half are involved. Operations on storage-only half
1440 // vectors require promoting half vector operands to float vectors and
1441 // truncating the result, which is either an int or float vector, to a
1442 // short or half vector.
1443
1444 // Source and destination are both expected to be vectors.
1445 llvm::Type *SrcElementTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType();
1446 llvm::Type *DstElementTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1447 (void)DstElementTy;
1448
1449 assert(((SrcElementTy->isIntegerTy() &&
1450 DstElementTy->isIntegerTy()) ||
1451 (SrcElementTy->isFloatingPointTy() &&
1452 DstElementTy->isFloatingPointTy())) &&
1453 "unexpected conversion between a floating-point vector and an "
1454 "integer vector");
1455
1456 // Truncate an i32 vector to an i16 vector.
1457 if (SrcElementTy->isIntegerTy())
1458 return Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: false, Name: "conv");
1459
1460 // Truncate a float vector to a half vector.
1461 if (SrcSize > DstSize)
1462 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1463
1464 // Promote a half vector to a float vector.
1465 return Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1466 }
1467
1468 // Finally, we have the arithmetic types: real int/float.
1469 Value *Res = nullptr;
1470 llvm::Type *ResTy = DstTy;
1471
1472 // An overflowing conversion has undefined behavior if either the source type
1473 // or the destination type is a floating-point type. However, we consider the
1474 // range of representable values for all floating-point types to be
1475 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1476 // floating-point type.
1477 if (CGF.SanOpts.has(K: SanitizerKind::FloatCastOverflow) &&
1478 OrigSrcType->isFloatingType())
1479 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1480 Loc);
1481
1482 // Cast to half through float if half isn't a native type.
1483 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1484 // Make sure we cast in a single step if from another FP type.
1485 if (SrcTy->isFloatingPointTy()) {
1486 // Use the intrinsic if the half type itself isn't supported
1487 // (as opposed to operations on half, available with NativeHalfType).
1488 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1489 return Builder.CreateCall(
1490 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1491 // If the half type is supported, just use an fptrunc.
1492 return Builder.CreateFPTrunc(V: Src, DestTy: DstTy);
1493 }
1494 DstTy = CGF.FloatTy;
1495 }
1496
1497 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1498
1499 if (DstTy != ResTy) {
1500 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1501 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1502 Res = Builder.CreateCall(
1503 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1504 Res);
1505 } else {
1506 Res = Builder.CreateFPTrunc(V: Res, DestTy: ResTy, Name: "conv");
1507 }
1508 }
1509
1510 if (Opts.EmitImplicitIntegerTruncationChecks)
1511 EmitIntegerTruncationCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1512 DstType: NoncanonicalDstType, Loc);
1513
1514 if (Opts.EmitImplicitIntegerSignChangeChecks)
1515 EmitIntegerSignChangeCheck(Src, SrcType: NoncanonicalSrcType, Dst: Res,
1516 DstType: NoncanonicalDstType, Loc);
1517
1518 return Res;
1519}
1520
1521Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1522 QualType DstTy,
1523 SourceLocation Loc) {
1524 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1525 llvm::Value *Result;
1526 if (SrcTy->isRealFloatingType())
1527 Result = FPBuilder.CreateFloatingToFixed(Src,
1528 DstSema: CGF.getContext().getFixedPointSemantics(Ty: DstTy));
1529 else if (DstTy->isRealFloatingType())
1530 Result = FPBuilder.CreateFixedToFloating(Src,
1531 SrcSema: CGF.getContext().getFixedPointSemantics(Ty: SrcTy),
1532 DstTy: ConvertType(T: DstTy));
1533 else {
1534 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(Ty: SrcTy);
1535 auto DstFPSema = CGF.getContext().getFixedPointSemantics(Ty: DstTy);
1536
1537 if (DstTy->isIntegerType())
1538 Result = FPBuilder.CreateFixedToInteger(Src, SrcSema: SrcFPSema,
1539 DstWidth: DstFPSema.getWidth(),
1540 DstIsSigned: DstFPSema.isSigned());
1541 else if (SrcTy->isIntegerType())
1542 Result = FPBuilder.CreateIntegerToFixed(Src, SrcIsSigned: SrcFPSema.isSigned(),
1543 DstSema: DstFPSema);
1544 else
1545 Result = FPBuilder.CreateFixedToFixed(Src, SrcSema: SrcFPSema, DstSema: DstFPSema);
1546 }
1547 return Result;
1548}
1549
1550/// Emit a conversion from the specified complex type to the specified
1551/// destination type, where the destination type is an LLVM scalar type.
1552Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1553 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1554 SourceLocation Loc) {
1555 // Get the source element type.
1556 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1557
1558 // Handle conversions to bool first, they are special: comparisons against 0.
1559 if (DstTy->isBooleanType()) {
1560 // Complex != 0 -> (Real != 0) | (Imag != 0)
1561 Src.first = EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1562 Src.second = EmitScalarConversion(Src: Src.second, SrcType: SrcTy, DstType: DstTy, Loc);
1563 return Builder.CreateOr(LHS: Src.first, RHS: Src.second, Name: "tobool");
1564 }
1565
1566 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1567 // the imaginary part of the complex value is discarded and the value of the
1568 // real part is converted according to the conversion rules for the
1569 // corresponding real type.
1570 return EmitScalarConversion(Src: Src.first, SrcType: SrcTy, DstType: DstTy, Loc);
1571}
1572
1573Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1574 return CGF.EmitFromMemory(Value: CGF.CGM.EmitNullConstant(T: Ty), Ty);
1575}
1576
1577/// Emit a sanitization check for the given "binary" operation (which
1578/// might actually be a unary increment which has been lowered to a binary
1579/// operation). The check passes if all values in \p Checks (which are \c i1),
1580/// are \c true.
1581void ScalarExprEmitter::EmitBinOpCheck(
1582 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1583 assert(CGF.IsSanitizerScope);
1584 SanitizerHandler Check;
1585 SmallVector<llvm::Constant *, 4> StaticData;
1586 SmallVector<llvm::Value *, 2> DynamicData;
1587
1588 BinaryOperatorKind Opcode = Info.Opcode;
1589 if (BinaryOperator::isCompoundAssignmentOp(Opc: Opcode))
1590 Opcode = BinaryOperator::getOpForCompoundAssignment(Opc: Opcode);
1591
1592 StaticData.push_back(Elt: CGF.EmitCheckSourceLocation(Loc: Info.E->getExprLoc()));
1593 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: Info.E);
1594 if (UO && UO->getOpcode() == UO_Minus) {
1595 Check = SanitizerHandler::NegateOverflow;
1596 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: UO->getType()));
1597 DynamicData.push_back(Elt: Info.RHS);
1598 } else {
1599 if (BinaryOperator::isShiftOp(Opc: Opcode)) {
1600 // Shift LHS negative or too large, or RHS out of bounds.
1601 Check = SanitizerHandler::ShiftOutOfBounds;
1602 const BinaryOperator *BO = cast<BinaryOperator>(Val: Info.E);
1603 StaticData.push_back(
1604 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getLHS()->getType()));
1605 StaticData.push_back(
1606 Elt: CGF.EmitCheckTypeDescriptor(T: BO->getRHS()->getType()));
1607 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1608 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1609 Check = SanitizerHandler::DivremOverflow;
1610 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1611 } else {
1612 // Arithmetic overflow (+, -, *).
1613 switch (Opcode) {
1614 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1615 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1616 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1617 default: llvm_unreachable("unexpected opcode for bin op check");
1618 }
1619 StaticData.push_back(Elt: CGF.EmitCheckTypeDescriptor(T: Info.Ty));
1620 }
1621 DynamicData.push_back(Elt: Info.LHS);
1622 DynamicData.push_back(Elt: Info.RHS);
1623 }
1624
1625 CGF.EmitCheck(Checked: Checks, Check, StaticArgs: StaticData, DynamicArgs: DynamicData);
1626}
1627
1628//===----------------------------------------------------------------------===//
1629// Visitor Methods
1630//===----------------------------------------------------------------------===//
1631
1632Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1633 CGF.ErrorUnsupported(E, "scalar expression");
1634 if (E->getType()->isVoidType())
1635 return nullptr;
1636 return llvm::UndefValue::get(T: CGF.ConvertType(T: E->getType()));
1637}
1638
1639Value *
1640ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1641 ASTContext &Context = CGF.getContext();
1642 unsigned AddrSpace =
1643 Context.getTargetAddressSpace(AS: CGF.CGM.GetGlobalConstantAddressSpace());
1644 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
1645 Str: E->ComputeName(Context), Name: "__usn_str", AddressSpace: AddrSpace);
1646
1647 llvm::Type *ExprTy = ConvertType(T: E->getType());
1648 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: GlobalConstStr, DestTy: ExprTy,
1649 Name: "usn_addr_cast");
1650}
1651
1652Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1653 // Vector Mask Case
1654 if (E->getNumSubExprs() == 2) {
1655 Value *LHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1656 Value *RHS = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1657 Value *Mask;
1658
1659 auto *LTy = cast<llvm::FixedVectorType>(Val: LHS->getType());
1660 unsigned LHSElts = LTy->getNumElements();
1661
1662 Mask = RHS;
1663
1664 auto *MTy = cast<llvm::FixedVectorType>(Val: Mask->getType());
1665
1666 // Mask off the high bits of each shuffle index.
1667 Value *MaskBits =
1668 llvm::ConstantInt::get(Ty: MTy, V: llvm::NextPowerOf2(A: LHSElts - 1) - 1);
1669 Mask = Builder.CreateAnd(LHS: Mask, RHS: MaskBits, Name: "mask");
1670
1671 // newv = undef
1672 // mask = mask & maskbits
1673 // for each elt
1674 // n = extract mask i
1675 // x = extract val n
1676 // newv = insert newv, x, i
1677 auto *RTy = llvm::FixedVectorType::get(ElementType: LTy->getElementType(),
1678 NumElts: MTy->getNumElements());
1679 Value* NewV = llvm::PoisonValue::get(T: RTy);
1680 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1681 Value *IIndx = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: i);
1682 Value *Indx = Builder.CreateExtractElement(Vec: Mask, Idx: IIndx, Name: "shuf_idx");
1683
1684 Value *VExt = Builder.CreateExtractElement(Vec: LHS, Idx: Indx, Name: "shuf_elt");
1685 NewV = Builder.CreateInsertElement(Vec: NewV, NewElt: VExt, Idx: IIndx, Name: "shuf_ins");
1686 }
1687 return NewV;
1688 }
1689
1690 Value* V1 = CGF.EmitScalarExpr(E: E->getExpr(Index: 0));
1691 Value* V2 = CGF.EmitScalarExpr(E: E->getExpr(Index: 1));
1692
1693 SmallVector<int, 32> Indices;
1694 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1695 llvm::APSInt Idx = E->getShuffleMaskIdx(Ctx: CGF.getContext(), N: i-2);
1696 // Check for -1 and output it as undef in the IR.
1697 if (Idx.isSigned() && Idx.isAllOnes())
1698 Indices.push_back(Elt: -1);
1699 else
1700 Indices.push_back(Elt: Idx.getZExtValue());
1701 }
1702
1703 return Builder.CreateShuffleVector(V1, V2, Mask: Indices, Name: "shuffle");
1704}
1705
1706Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1707 QualType SrcType = E->getSrcExpr()->getType(),
1708 DstType = E->getType();
1709
1710 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
1711
1712 SrcType = CGF.getContext().getCanonicalType(T: SrcType);
1713 DstType = CGF.getContext().getCanonicalType(T: DstType);
1714 if (SrcType == DstType) return Src;
1715
1716 assert(SrcType->isVectorType() &&
1717 "ConvertVector source type must be a vector");
1718 assert(DstType->isVectorType() &&
1719 "ConvertVector destination type must be a vector");
1720
1721 llvm::Type *SrcTy = Src->getType();
1722 llvm::Type *DstTy = ConvertType(T: DstType);
1723
1724 // Ignore conversions like int -> uint.
1725 if (SrcTy == DstTy)
1726 return Src;
1727
1728 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1729 DstEltType = DstType->castAs<VectorType>()->getElementType();
1730
1731 assert(SrcTy->isVectorTy() &&
1732 "ConvertVector source IR type must be a vector");
1733 assert(DstTy->isVectorTy() &&
1734 "ConvertVector destination IR type must be a vector");
1735
1736 llvm::Type *SrcEltTy = cast<llvm::VectorType>(Val: SrcTy)->getElementType(),
1737 *DstEltTy = cast<llvm::VectorType>(Val: DstTy)->getElementType();
1738
1739 if (DstEltType->isBooleanType()) {
1740 assert((SrcEltTy->isFloatingPointTy() ||
1741 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1742
1743 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: SrcTy);
1744 if (SrcEltTy->isFloatingPointTy()) {
1745 return Builder.CreateFCmpUNE(LHS: Src, RHS: Zero, Name: "tobool");
1746 } else {
1747 return Builder.CreateICmpNE(LHS: Src, RHS: Zero, Name: "tobool");
1748 }
1749 }
1750
1751 // We have the arithmetic types: real int/float.
1752 Value *Res = nullptr;
1753
1754 if (isa<llvm::IntegerType>(Val: SrcEltTy)) {
1755 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1756 if (isa<llvm::IntegerType>(Val: DstEltTy))
1757 Res = Builder.CreateIntCast(V: Src, DestTy: DstTy, isSigned: InputSigned, Name: "conv");
1758 else if (InputSigned)
1759 Res = Builder.CreateSIToFP(V: Src, DestTy: DstTy, Name: "conv");
1760 else
1761 Res = Builder.CreateUIToFP(V: Src, DestTy: DstTy, Name: "conv");
1762 } else if (isa<llvm::IntegerType>(Val: DstEltTy)) {
1763 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1764 if (DstEltType->isSignedIntegerOrEnumerationType())
1765 Res = Builder.CreateFPToSI(V: Src, DestTy: DstTy, Name: "conv");
1766 else
1767 Res = Builder.CreateFPToUI(V: Src, DestTy: DstTy, Name: "conv");
1768 } else {
1769 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1770 "Unknown real conversion");
1771 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1772 Res = Builder.CreateFPTrunc(V: Src, DestTy: DstTy, Name: "conv");
1773 else
1774 Res = Builder.CreateFPExt(V: Src, DestTy: DstTy, Name: "conv");
1775 }
1776
1777 return Res;
1778}
1779
1780Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1781 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(ME: E)) {
1782 CGF.EmitIgnoredExpr(E: E->getBase());
1783 return CGF.emitScalarConstant(Constant, E);
1784 } else {
1785 Expr::EvalResult Result;
1786 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1787 llvm::APSInt Value = Result.Val.getInt();
1788 CGF.EmitIgnoredExpr(E: E->getBase());
1789 return Builder.getInt(AI: Value);
1790 }
1791 }
1792
1793 return EmitLoadOfLValue(E);
1794}
1795
1796Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1797 TestAndClearIgnoreResultAssign();
1798
1799 // Emit subscript expressions in rvalue context's. For most cases, this just
1800 // loads the lvalue formed by the subscript expr. However, we have to be
1801 // careful, because the base of a vector subscript is occasionally an rvalue,
1802 // so we can't get it as an lvalue.
1803 if (!E->getBase()->getType()->isVectorType() &&
1804 !E->getBase()->getType()->isSveVLSBuiltinType())
1805 return EmitLoadOfLValue(E);
1806
1807 // Handle the vector case. The base must be a vector, the index must be an
1808 // integer value.
1809 Value *Base = Visit(E: E->getBase());
1810 Value *Idx = Visit(E: E->getIdx());
1811 QualType IdxTy = E->getIdx()->getType();
1812
1813 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
1814 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1815
1816 return Builder.CreateExtractElement(Vec: Base, Idx, Name: "vecext");
1817}
1818
1819Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1820 TestAndClearIgnoreResultAssign();
1821
1822 // Handle the vector case. The base must be a vector, the index must be an
1823 // integer value.
1824 Value *RowIdx = Visit(E: E->getRowIdx());
1825 Value *ColumnIdx = Visit(E: E->getColumnIdx());
1826
1827 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
1828 unsigned NumRows = MatrixTy->getNumRows();
1829 llvm::MatrixBuilder MB(Builder);
1830 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
1831 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
1832 MB.CreateIndexAssumption(Idx, NumElements: MatrixTy->getNumElementsFlattened());
1833
1834 Value *Matrix = Visit(E: E->getBase());
1835
1836 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1837 return Builder.CreateExtractElement(Vec: Matrix, Idx, Name: "matrixext");
1838}
1839
1840static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1841 unsigned Off) {
1842 int MV = SVI->getMaskValue(Elt: Idx);
1843 if (MV == -1)
1844 return -1;
1845 return Off + MV;
1846}
1847
1848static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1849 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
1850 "Index operand too large for shufflevector mask!");
1851 return C->getZExtValue();
1852}
1853
1854Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1855 bool Ignore = TestAndClearIgnoreResultAssign();
1856 (void)Ignore;
1857 assert (Ignore == false && "init list ignored");
1858 unsigned NumInitElements = E->getNumInits();
1859
1860 if (E->hadArrayRangeDesignator())
1861 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1862
1863 llvm::VectorType *VType =
1864 dyn_cast<llvm::VectorType>(ConvertType(T: E->getType()));
1865
1866 if (!VType) {
1867 if (NumInitElements == 0) {
1868 // C++11 value-initialization for the scalar.
1869 return EmitNullValue(Ty: E->getType());
1870 }
1871 // We have a scalar in braces. Just use the first element.
1872 return Visit(E: E->getInit(Init: 0));
1873 }
1874
1875 if (isa<llvm::ScalableVectorType>(Val: VType)) {
1876 if (NumInitElements == 0) {
1877 // C++11 value-initialization for the vector.
1878 return EmitNullValue(Ty: E->getType());
1879 }
1880
1881 if (NumInitElements == 1) {
1882 Expr *InitVector = E->getInit(Init: 0);
1883
1884 // Initialize from another scalable vector of the same type.
1885 if (InitVector->getType() == E->getType())
1886 return Visit(E: InitVector);
1887 }
1888
1889 llvm_unreachable("Unexpected initialization of a scalable vector!");
1890 }
1891
1892 unsigned ResElts = cast<llvm::FixedVectorType>(Val: VType)->getNumElements();
1893
1894 // Loop over initializers collecting the Value for each, and remembering
1895 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1896 // us to fold the shuffle for the swizzle into the shuffle for the vector
1897 // initializer, since LLVM optimizers generally do not want to touch
1898 // shuffles.
1899 unsigned CurIdx = 0;
1900 bool VIsPoisonShuffle = false;
1901 llvm::Value *V = llvm::PoisonValue::get(T: VType);
1902 for (unsigned i = 0; i != NumInitElements; ++i) {
1903 Expr *IE = E->getInit(Init: i);
1904 Value *Init = Visit(E: IE);
1905 SmallVector<int, 16> Args;
1906
1907 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Val: Init->getType());
1908
1909 // Handle scalar elements. If the scalar initializer is actually one
1910 // element of a different vector of the same width, use shuffle instead of
1911 // extract+insert.
1912 if (!VVT) {
1913 if (isa<ExtVectorElementExpr>(Val: IE)) {
1914 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Val: Init);
1915
1916 if (cast<llvm::FixedVectorType>(Val: EI->getVectorOperandType())
1917 ->getNumElements() == ResElts) {
1918 llvm::ConstantInt *C = cast<llvm::ConstantInt>(Val: EI->getIndexOperand());
1919 Value *LHS = nullptr, *RHS = nullptr;
1920 if (CurIdx == 0) {
1921 // insert into poison -> shuffle (src, poison)
1922 // shufflemask must use an i32
1923 Args.push_back(Elt: getAsInt32(C, I32Ty: CGF.Int32Ty));
1924 Args.resize(N: ResElts, NV: -1);
1925
1926 LHS = EI->getVectorOperand();
1927 RHS = V;
1928 VIsPoisonShuffle = true;
1929 } else if (VIsPoisonShuffle) {
1930 // insert into poison shuffle && size match -> shuffle (v, src)
1931 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(Val: V);
1932 for (unsigned j = 0; j != CurIdx; ++j)
1933 Args.push_back(Elt: getMaskElt(SVI: SVV, Idx: j, Off: 0));
1934 Args.push_back(Elt: ResElts + C->getZExtValue());
1935 Args.resize(N: ResElts, NV: -1);
1936
1937 LHS = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
1938 RHS = EI->getVectorOperand();
1939 VIsPoisonShuffle = false;
1940 }
1941 if (!Args.empty()) {
1942 V = Builder.CreateShuffleVector(V1: LHS, V2: RHS, Mask: Args);
1943 ++CurIdx;
1944 continue;
1945 }
1946 }
1947 }
1948 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx: Builder.getInt32(C: CurIdx),
1949 Name: "vecinit");
1950 VIsPoisonShuffle = false;
1951 ++CurIdx;
1952 continue;
1953 }
1954
1955 unsigned InitElts = cast<llvm::FixedVectorType>(Val: VVT)->getNumElements();
1956
1957 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1958 // input is the same width as the vector being constructed, generate an
1959 // optimized shuffle of the swizzle input into the result.
1960 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1961 if (isa<ExtVectorElementExpr>(Val: IE)) {
1962 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Val: Init);
1963 Value *SVOp = SVI->getOperand(i_nocapture: 0);
1964 auto *OpTy = cast<llvm::FixedVectorType>(Val: SVOp->getType());
1965
1966 if (OpTy->getNumElements() == ResElts) {
1967 for (unsigned j = 0; j != CurIdx; ++j) {
1968 // If the current vector initializer is a shuffle with poison, merge
1969 // this shuffle directly into it.
1970 if (VIsPoisonShuffle) {
1971 Args.push_back(Elt: getMaskElt(SVI: cast<llvm::ShuffleVectorInst>(Val: V), Idx: j, Off: 0));
1972 } else {
1973 Args.push_back(Elt: j);
1974 }
1975 }
1976 for (unsigned j = 0, je = InitElts; j != je; ++j)
1977 Args.push_back(Elt: getMaskElt(SVI, Idx: j, Off: Offset));
1978 Args.resize(N: ResElts, NV: -1);
1979
1980 if (VIsPoisonShuffle)
1981 V = cast<llvm::ShuffleVectorInst>(Val: V)->getOperand(i_nocapture: 0);
1982
1983 Init = SVOp;
1984 }
1985 }
1986
1987 // Extend init to result vector length, and then shuffle its contribution
1988 // to the vector initializer into V.
1989 if (Args.empty()) {
1990 for (unsigned j = 0; j != InitElts; ++j)
1991 Args.push_back(Elt: j);
1992 Args.resize(N: ResElts, NV: -1);
1993 Init = Builder.CreateShuffleVector(V: Init, Mask: Args, Name: "vext");
1994
1995 Args.clear();
1996 for (unsigned j = 0; j != CurIdx; ++j)
1997 Args.push_back(Elt: j);
1998 for (unsigned j = 0; j != InitElts; ++j)
1999 Args.push_back(Elt: j + Offset);
2000 Args.resize(N: ResElts, NV: -1);
2001 }
2002
2003 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2004 // merging subsequent shuffles into this one.
2005 if (CurIdx == 0)
2006 std::swap(a&: V, b&: Init);
2007 V = Builder.CreateShuffleVector(V1: V, V2: Init, Mask: Args, Name: "vecinit");
2008 VIsPoisonShuffle = isa<llvm::PoisonValue>(Val: Init);
2009 CurIdx += InitElts;
2010 }
2011
2012 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2013 // Emit remaining default initializers.
2014 llvm::Type *EltTy = VType->getElementType();
2015
2016 // Emit remaining default initializers
2017 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2018 Value *Idx = Builder.getInt32(C: CurIdx);
2019 llvm::Value *Init = llvm::Constant::getNullValue(Ty: EltTy);
2020 V = Builder.CreateInsertElement(Vec: V, NewElt: Init, Idx, Name: "vecinit");
2021 }
2022 return V;
2023}
2024
2025bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
2026 const Expr *E = CE->getSubExpr();
2027
2028 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2029 return false;
2030
2031 if (isa<CXXThisExpr>(Val: E->IgnoreParens())) {
2032 // We always assume that 'this' is never null.
2033 return false;
2034 }
2035
2036 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2037 // And that glvalue casts are never null.
2038 if (ICE->isGLValue())
2039 return false;
2040 }
2041
2042 return true;
2043}
2044
2045// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2046// have to handle a more broad range of conversions than explicit casts, as they
2047// handle things like function to ptr-to-function decay etc.
2048Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2049 Expr *E = CE->getSubExpr();
2050 QualType DestTy = CE->getType();
2051 CastKind Kind = CE->getCastKind();
2052 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2053
2054 // These cases are generally not written to ignore the result of
2055 // evaluating their sub-expressions, so we clear this now.
2056 bool Ignored = TestAndClearIgnoreResultAssign();
2057
2058 // Since almost all cast kinds apply to scalars, this switch doesn't have
2059 // a default case, so the compiler will warn on a missing case. The cases
2060 // are in the same order as in the CastKind enum.
2061 switch (Kind) {
2062 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2063 case CK_BuiltinFnToFnPtr:
2064 llvm_unreachable("builtin functions are handled elsewhere");
2065
2066 case CK_LValueBitCast:
2067 case CK_ObjCObjectLValueCast: {
2068 Address Addr = EmitLValue(E).getAddress(CGF);
2069 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2070 LValue LV = CGF.MakeAddrLValue(Addr, T: DestTy);
2071 return EmitLoadOfLValue(LV, CE->getExprLoc());
2072 }
2073
2074 case CK_LValueToRValueBitCast: {
2075 LValue SourceLVal = CGF.EmitLValue(E);
2076 Address Addr = SourceLVal.getAddress(CGF).withElementType(
2077 ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2078 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2079 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2080 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2081 }
2082
2083 case CK_CPointerToObjCPointerCast:
2084 case CK_BlockPointerToObjCPointerCast:
2085 case CK_AnyPointerToBlockPointerCast:
2086 case CK_BitCast: {
2087 Value *Src = Visit(E: const_cast<Expr*>(E));
2088 llvm::Type *SrcTy = Src->getType();
2089 llvm::Type *DstTy = ConvertType(T: DestTy);
2090 assert(
2091 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2092 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2093 "Address-space cast must be used to convert address spaces");
2094
2095 if (CGF.SanOpts.has(K: SanitizerKind::CFIUnrelatedCast)) {
2096 if (auto *PT = DestTy->getAs<PointerType>()) {
2097 CGF.EmitVTablePtrCheckForCast(
2098 T: PT->getPointeeType(),
2099 Derived: Address(Src,
2100 CGF.ConvertTypeForMem(
2101 T: E->getType()->castAs<PointerType>()->getPointeeType()),
2102 CGF.getPointerAlign()),
2103 /*MayBeNull=*/true, TCK: CodeGenFunction::CFITCK_UnrelatedCast,
2104 Loc: CE->getBeginLoc());
2105 }
2106 }
2107
2108 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2109 const QualType SrcType = E->getType();
2110
2111 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2112 // Casting to pointer that could carry dynamic information (provided by
2113 // invariant.group) requires launder.
2114 Src = Builder.CreateLaunderInvariantGroup(Ptr: Src);
2115 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2116 // Casting to pointer that does not carry dynamic information (provided
2117 // by invariant.group) requires stripping it. Note that we don't do it
2118 // if the source could not be dynamic type and destination could be
2119 // dynamic because dynamic information is already laundered. It is
2120 // because launder(strip(src)) == launder(src), so there is no need to
2121 // add extra strip before launder.
2122 Src = Builder.CreateStripInvariantGroup(Ptr: Src);
2123 }
2124 }
2125
2126 // Update heapallocsite metadata when there is an explicit pointer cast.
2127 if (auto *CI = dyn_cast<llvm::CallBase>(Val: Src)) {
2128 if (CI->getMetadata(Kind: "heapallocsite") && isa<ExplicitCastExpr>(Val: CE) &&
2129 !isa<CastExpr>(Val: E)) {
2130 QualType PointeeType = DestTy->getPointeeType();
2131 if (!PointeeType.isNull())
2132 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CallSite: CI, AllocatedTy: PointeeType,
2133 Loc: CE->getExprLoc());
2134 }
2135 }
2136
2137 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2138 // same element type, use the llvm.vector.insert intrinsic to perform the
2139 // bitcast.
2140 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(Val: SrcTy)) {
2141 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2142 // If we are casting a fixed i8 vector to a scalable i1 predicate
2143 // vector, use a vector insert and bitcast the result.
2144 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2145 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
2146 FixedSrcTy->getElementType()->isIntegerTy(Bitwidth: 8)) {
2147 ScalableDstTy = llvm::ScalableVectorType::get(
2148 FixedSrcTy->getElementType(),
2149 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
2150 }
2151 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2152 llvm::Value *UndefVec = llvm::UndefValue::get(T: ScalableDstTy);
2153 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGF.CGM.Int64Ty);
2154 llvm::Value *Result = Builder.CreateInsertVector(
2155 DstType: ScalableDstTy, SrcVec: UndefVec, SubVec: Src, Idx: Zero, Name: "cast.scalable");
2156 if (Result->getType() != DstTy)
2157 Result = Builder.CreateBitCast(V: Result, DestTy: DstTy);
2158 return Result;
2159 }
2160 }
2161 }
2162
2163 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2164 // same element type, use the llvm.vector.extract intrinsic to perform the
2165 // bitcast.
2166 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(Val: SrcTy)) {
2167 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2168 // If we are casting a scalable i1 predicate vector to a fixed i8
2169 // vector, bitcast the source and use a vector extract.
2170 if (ScalableSrcTy->getElementType()->isIntegerTy(Bitwidth: 1) &&
2171 ScalableSrcTy->getElementCount().isKnownMultipleOf(RHS: 8) &&
2172 FixedDstTy->getElementType()->isIntegerTy(8)) {
2173 ScalableSrcTy = llvm::ScalableVectorType::get(
2174 FixedDstTy->getElementType(),
2175 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2176 Src = Builder.CreateBitCast(V: Src, DestTy: ScalableSrcTy);
2177 }
2178 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType()) {
2179 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: CGF.CGM.Int64Ty);
2180 return Builder.CreateExtractVector(DstType: DstTy, SrcVec: Src, Idx: Zero, Name: "cast.fixed");
2181 }
2182 }
2183 }
2184
2185 // Perform VLAT <-> VLST bitcast through memory.
2186 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2187 // require the element types of the vectors to be the same, we
2188 // need to keep this around for bitcasts between VLAT <-> VLST where
2189 // the element types of the vectors are not the same, until we figure
2190 // out a better way of doing these casts.
2191 if ((isa<llvm::FixedVectorType>(Val: SrcTy) &&
2192 isa<llvm::ScalableVectorType>(Val: DstTy)) ||
2193 (isa<llvm::ScalableVectorType>(Val: SrcTy) &&
2194 isa<llvm::FixedVectorType>(Val: DstTy))) {
2195 Address Addr = CGF.CreateDefaultAlignTempAlloca(Ty: SrcTy, Name: "saved-value");
2196 LValue LV = CGF.MakeAddrLValue(Addr, T: E->getType());
2197 CGF.EmitStoreOfScalar(value: Src, lvalue: LV);
2198 Addr = Addr.withElementType(ElemTy: CGF.ConvertTypeForMem(T: DestTy));
2199 LValue DestLV = CGF.MakeAddrLValue(Addr, T: DestTy);
2200 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2201 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2202 }
2203 return Builder.CreateBitCast(V: Src, DestTy: DstTy);
2204 }
2205 case CK_AddressSpaceConversion: {
2206 Expr::EvalResult Result;
2207 if (E->EvaluateAsRValue(Result, Ctx: CGF.getContext()) &&
2208 Result.Val.isNullPointer()) {
2209 // If E has side effect, it is emitted even if its final result is a
2210 // null pointer. In that case, a DCE pass should be able to
2211 // eliminate the useless instructions emitted during translating E.
2212 if (Result.HasSideEffects)
2213 Visit(E);
2214 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(
2215 Val: ConvertType(T: DestTy)), QT: DestTy);
2216 }
2217 // Since target may map different address spaces in AST to the same address
2218 // space, an address space conversion may end up as a bitcast.
2219 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2220 CGF, V: Visit(E), SrcAddr: E->getType()->getPointeeType().getAddressSpace(),
2221 DestAddr: DestTy->getPointeeType().getAddressSpace(), DestTy: ConvertType(T: DestTy));
2222 }
2223 case CK_AtomicToNonAtomic:
2224 case CK_NonAtomicToAtomic:
2225 case CK_UserDefinedConversion:
2226 return Visit(E: const_cast<Expr*>(E));
2227
2228 case CK_NoOp: {
2229 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
2230 : Visit(E: const_cast<Expr *>(E));
2231 }
2232
2233 case CK_BaseToDerived: {
2234 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2235 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2236
2237 Address Base = CGF.EmitPointerWithAlignment(Addr: E);
2238 Address Derived =
2239 CGF.GetAddressOfDerivedClass(Value: Base, Derived: DerivedClassDecl,
2240 PathBegin: CE->path_begin(), PathEnd: CE->path_end(),
2241 NullCheckValue: CGF.ShouldNullCheckClassCastValue(CE));
2242
2243 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2244 // performed and the object is not of the derived type.
2245 if (CGF.sanitizePerformTypeCheck())
2246 CGF.EmitTypeCheck(TCK: CodeGenFunction::TCK_DowncastPointer, Loc: CE->getExprLoc(),
2247 V: Derived.getPointer(), Type: DestTy->getPointeeType());
2248
2249 if (CGF.SanOpts.has(K: SanitizerKind::CFIDerivedCast))
2250 CGF.EmitVTablePtrCheckForCast(T: DestTy->getPointeeType(), Derived,
2251 /*MayBeNull=*/true,
2252 TCK: CodeGenFunction::CFITCK_DerivedCast,
2253 Loc: CE->getBeginLoc());
2254
2255 return Derived.getPointer();
2256 }
2257 case CK_UncheckedDerivedToBase:
2258 case CK_DerivedToBase: {
2259 // The EmitPointerWithAlignment path does this fine; just discard
2260 // the alignment.
2261 return CGF.EmitPointerWithAlignment(CE).getPointer();
2262 }
2263
2264 case CK_Dynamic: {
2265 Address V = CGF.EmitPointerWithAlignment(Addr: E);
2266 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(Val: CE);
2267 return CGF.EmitDynamicCast(V, DCE);
2268 }
2269
2270 case CK_ArrayToPointerDecay:
2271 return CGF.EmitArrayToPointerDecay(Array: E).getPointer();
2272 case CK_FunctionToPointerDecay:
2273 return EmitLValue(E).getPointer(CGF);
2274
2275 case CK_NullToPointer:
2276 if (MustVisitNullValue(E))
2277 CGF.EmitIgnoredExpr(E);
2278
2279 return CGF.CGM.getNullPointer(T: cast<llvm::PointerType>(Val: ConvertType(T: DestTy)),
2280 QT: DestTy);
2281
2282 case CK_NullToMemberPointer: {
2283 if (MustVisitNullValue(E))
2284 CGF.EmitIgnoredExpr(E);
2285
2286 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2287 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2288 }
2289
2290 case CK_ReinterpretMemberPointer:
2291 case CK_BaseToDerivedMemberPointer:
2292 case CK_DerivedToBaseMemberPointer: {
2293 Value *Src = Visit(E);
2294
2295 // Note that the AST doesn't distinguish between checked and
2296 // unchecked member pointer conversions, so we always have to
2297 // implement checked conversions here. This is inefficient when
2298 // actual control flow may be required in order to perform the
2299 // check, which it is for data member pointers (but not member
2300 // function pointers on Itanium and ARM).
2301 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, E: CE, Src);
2302 }
2303
2304 case CK_ARCProduceObject:
2305 return CGF.EmitARCRetainScalarExpr(expr: E);
2306 case CK_ARCConsumeObject:
2307 return CGF.EmitObjCConsumeObject(T: E->getType(), Ptr: Visit(E));
2308 case CK_ARCReclaimReturnedObject:
2309 return CGF.EmitARCReclaimReturnedObject(e: E, /*allowUnsafe*/ allowUnsafeClaim: Ignored);
2310 case CK_ARCExtendBlockObject:
2311 return CGF.EmitARCExtendBlockObject(expr: E);
2312
2313 case CK_CopyAndAutoreleaseBlockObject:
2314 return CGF.EmitBlockCopyAndAutorelease(Block: Visit(E), Ty: E->getType());
2315
2316 case CK_FloatingRealToComplex:
2317 case CK_FloatingComplexCast:
2318 case CK_IntegralRealToComplex:
2319 case CK_IntegralComplexCast:
2320 case CK_IntegralComplexToFloatingComplex:
2321 case CK_FloatingComplexToIntegralComplex:
2322 case CK_ConstructorConversion:
2323 case CK_ToUnion:
2324 llvm_unreachable("scalar cast to non-scalar value");
2325
2326 case CK_LValueToRValue:
2327 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2328 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2329 return Visit(E: const_cast<Expr*>(E));
2330
2331 case CK_IntegralToPointer: {
2332 Value *Src = Visit(E: const_cast<Expr*>(E));
2333
2334 // First, convert to the correct width so that we control the kind of
2335 // extension.
2336 auto DestLLVMTy = ConvertType(T: DestTy);
2337 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2338 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2339 llvm::Value* IntResult =
2340 Builder.CreateIntCast(V: Src, DestTy: MiddleTy, isSigned: InputSigned, Name: "conv");
2341
2342 auto *IntToPtr = Builder.CreateIntToPtr(V: IntResult, DestTy: DestLLVMTy);
2343
2344 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2345 // Going from integer to pointer that could be dynamic requires reloading
2346 // dynamic information from invariant.group.
2347 if (DestTy.mayBeDynamicClass())
2348 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2349 }
2350 return IntToPtr;
2351 }
2352 case CK_PointerToIntegral: {
2353 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2354 auto *PtrExpr = Visit(E);
2355
2356 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2357 const QualType SrcType = E->getType();
2358
2359 // Casting to integer requires stripping dynamic information as it does
2360 // not carries it.
2361 if (SrcType.mayBeDynamicClass())
2362 PtrExpr = Builder.CreateStripInvariantGroup(Ptr: PtrExpr);
2363 }
2364
2365 return Builder.CreatePtrToInt(V: PtrExpr, DestTy: ConvertType(T: DestTy));
2366 }
2367 case CK_ToVoid: {
2368 CGF.EmitIgnoredExpr(E);
2369 return nullptr;
2370 }
2371 case CK_MatrixCast: {
2372 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2373 Loc: CE->getExprLoc());
2374 }
2375 case CK_VectorSplat: {
2376 llvm::Type *DstTy = ConvertType(T: DestTy);
2377 Value *Elt = Visit(E: const_cast<Expr *>(E));
2378 // Splat the element across to all elements
2379 llvm::ElementCount NumElements =
2380 cast<llvm::VectorType>(Val: DstTy)->getElementCount();
2381 return Builder.CreateVectorSplat(EC: NumElements, V: Elt, Name: "splat");
2382 }
2383
2384 case CK_FixedPointCast:
2385 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2386 Loc: CE->getExprLoc());
2387
2388 case CK_FixedPointToBoolean:
2389 assert(E->getType()->isFixedPointType() &&
2390 "Expected src type to be fixed point type");
2391 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2392 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2393 Loc: CE->getExprLoc());
2394
2395 case CK_FixedPointToIntegral:
2396 assert(E->getType()->isFixedPointType() &&
2397 "Expected src type to be fixed point type");
2398 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2399 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2400 Loc: CE->getExprLoc());
2401
2402 case CK_IntegralToFixedPoint:
2403 assert(E->getType()->isIntegerType() &&
2404 "Expected src type to be an integer");
2405 assert(DestTy->isFixedPointType() &&
2406 "Expected dest type to be fixed point type");
2407 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2408 Loc: CE->getExprLoc());
2409
2410 case CK_IntegralCast: {
2411 ScalarConversionOpts Opts;
2412 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: CE)) {
2413 if (!ICE->isPartOfExplicitCast())
2414 Opts = ScalarConversionOpts(CGF.SanOpts);
2415 }
2416 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2417 Loc: CE->getExprLoc(), Opts);
2418 }
2419 case CK_IntegralToFloating:
2420 case CK_FloatingToIntegral:
2421 case CK_FloatingCast:
2422 case CK_FixedPointToFloating:
2423 case CK_FloatingToFixedPoint: {
2424 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2425 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2426 Loc: CE->getExprLoc());
2427 }
2428 case CK_BooleanToSignedIntegral: {
2429 ScalarConversionOpts Opts;
2430 Opts.TreatBooleanAsSigned = true;
2431 return EmitScalarConversion(Src: Visit(E), SrcType: E->getType(), DstType: DestTy,
2432 Loc: CE->getExprLoc(), Opts);
2433 }
2434 case CK_IntegralToBoolean:
2435 return EmitIntToBoolConversion(V: Visit(E));
2436 case CK_PointerToBoolean:
2437 return EmitPointerToBoolConversion(V: Visit(E), QT: E->getType());
2438 case CK_FloatingToBoolean: {
2439 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2440 return EmitFloatToBoolConversion(V: Visit(E));
2441 }
2442 case CK_MemberPointerToBoolean: {
2443 llvm::Value *MemPtr = Visit(E);
2444 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2445 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2446 }
2447
2448 case CK_FloatingComplexToReal:
2449 case CK_IntegralComplexToReal:
2450 return CGF.EmitComplexExpr(E, IgnoreReal: false, IgnoreImag: true).first;
2451
2452 case CK_FloatingComplexToBoolean:
2453 case CK_IntegralComplexToBoolean: {
2454 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2455
2456 // TODO: kill this function off, inline appropriate case here
2457 return EmitComplexToScalarConversion(Src: V, SrcTy: E->getType(), DstTy: DestTy,
2458 Loc: CE->getExprLoc());
2459 }
2460
2461 case CK_ZeroToOCLOpaqueType: {
2462 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2463 DestTy->isOCLIntelSubgroupAVCType()) &&
2464 "CK_ZeroToOCLEvent cast on non-event type");
2465 return llvm::Constant::getNullValue(Ty: ConvertType(T: DestTy));
2466 }
2467
2468 case CK_IntToOCLSampler:
2469 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2470
2471 } // end of switch
2472
2473 llvm_unreachable("unknown scalar cast");
2474}
2475
2476Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2477 CodeGenFunction::StmtExprEvaluation eval(CGF);
2478 Address RetAlloca = CGF.EmitCompoundStmt(S: *E->getSubStmt(),
2479 GetLast: !E->getType()->isVoidType());
2480 if (!RetAlloca.isValid())
2481 return nullptr;
2482 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2483 E->getExprLoc());
2484}
2485
2486Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2487 CodeGenFunction::RunCleanupsScope Scope(CGF);
2488 Value *V = Visit(E: E->getSubExpr());
2489 // Defend against dominance problems caused by jumps out of expression
2490 // evaluation through the shared cleanup block.
2491 Scope.ForceCleanup(ValuesToReload: {&V});
2492 return V;
2493}
2494
2495//===----------------------------------------------------------------------===//
2496// Unary Operators
2497//===----------------------------------------------------------------------===//
2498
2499static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2500 llvm::Value *InVal, bool IsInc,
2501 FPOptions FPFeatures) {
2502 BinOpInfo BinOp;
2503 BinOp.LHS = InVal;
2504 BinOp.RHS = llvm::ConstantInt::get(Ty: InVal->getType(), V: 1, IsSigned: false);
2505 BinOp.Ty = E->getType();
2506 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2507 BinOp.FPFeatures = FPFeatures;
2508 BinOp.E = E;
2509 return BinOp;
2510}
2511
2512llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2513 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2514 llvm::Value *Amount =
2515 llvm::ConstantInt::get(Ty: InVal->getType(), V: IsInc ? 1 : -1, IsSigned: true);
2516 StringRef Name = IsInc ? "inc" : "dec";
2517 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2518 case LangOptions::SOB_Defined:
2519 return Builder.CreateAdd(LHS: InVal, RHS: Amount, Name);
2520 case LangOptions::SOB_Undefined:
2521 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
2522 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2523 [[fallthrough]];
2524 case LangOptions::SOB_Trapping:
2525 if (!E->canOverflow())
2526 return Builder.CreateNSWAdd(LHS: InVal, RHS: Amount, Name);
2527 return EmitOverflowCheckedBinOp(Ops: createBinOpInfoFromIncDec(
2528 E, InVal, IsInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts())));
2529 }
2530 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2531}
2532
2533namespace {
2534/// Handles check and update for lastprivate conditional variables.
2535class OMPLastprivateConditionalUpdateRAII {
2536private:
2537 CodeGenFunction &CGF;
2538 const UnaryOperator *E;
2539
2540public:
2541 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2542 const UnaryOperator *E)
2543 : CGF(CGF), E(E) {}
2544 ~OMPLastprivateConditionalUpdateRAII() {
2545 if (CGF.getLangOpts().OpenMP)
2546 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2547 CGF, LHS: E->getSubExpr());
2548 }
2549};
2550} // namespace
2551
2552llvm::Value *
2553ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2554 bool isInc, bool isPre) {
2555 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2556 QualType type = E->getSubExpr()->getType();
2557 llvm::PHINode *atomicPHI = nullptr;
2558 llvm::Value *value;
2559 llvm::Value *input;
2560
2561 int amount = (isInc ? 1 : -1);
2562 bool isSubtraction = !isInc;
2563
2564 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2565 type = atomicTy->getValueType();
2566 if (isInc && type->isBooleanType()) {
2567 llvm::Value *True = CGF.EmitToMemory(Value: Builder.getTrue(), Ty: type);
2568 if (isPre) {
2569 Builder.CreateStore(Val: True, Addr: LV.getAddress(CGF), IsVolatile: LV.isVolatileQualified())
2570 ->setAtomic(Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
2571 return Builder.getTrue();
2572 }
2573 // For atomic bool increment, we just store true and return it for
2574 // preincrement, do an atomic swap with true for postincrement
2575 return Builder.CreateAtomicRMW(
2576 Op: llvm::AtomicRMWInst::Xchg, Addr: LV.getAddress(CGF), Val: True,
2577 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
2578 }
2579 // Special case for atomic increment / decrement on integers, emit
2580 // atomicrmw instructions. We skip this if we want to be doing overflow
2581 // checking, and fall into the slow path with the atomic cmpxchg loop.
2582 if (!type->isBooleanType() && type->isIntegerType() &&
2583 !(type->isUnsignedIntegerType() &&
2584 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
2585 CGF.getLangOpts().getSignedOverflowBehavior() !=
2586 LangOptions::SOB_Trapping) {
2587 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2588 llvm::AtomicRMWInst::Sub;
2589 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2590 llvm::Instruction::Sub;
2591 llvm::Value *amt = CGF.EmitToMemory(
2592 Value: llvm::ConstantInt::get(Ty: ConvertType(T: type), V: 1, IsSigned: true), Ty: type);
2593 llvm::Value *old =
2594 Builder.CreateAtomicRMW(Op: aop, Addr: LV.getAddress(CGF), Val: amt,
2595 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
2596 return isPre ? Builder.CreateBinOp(Opc: op, LHS: old, RHS: amt) : old;
2597 }
2598 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
2599 input = value;
2600 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2601 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2602 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
2603 value = CGF.EmitToMemory(Value: value, Ty: type);
2604 Builder.CreateBr(Dest: opBB);
2605 Builder.SetInsertPoint(opBB);
2606 atomicPHI = Builder.CreatePHI(Ty: value->getType(), NumReservedValues: 2);
2607 atomicPHI->addIncoming(V: value, BB: startBB);
2608 value = atomicPHI;
2609 } else {
2610 value = EmitLoadOfLValue(LV, Loc: E->getExprLoc());
2611 input = value;
2612 }
2613
2614 // Special case of integer increment that we have to check first: bool++.
2615 // Due to promotion rules, we get:
2616 // bool++ -> bool = bool + 1
2617 // -> bool = (int)bool + 1
2618 // -> bool = ((int)bool + 1 != 0)
2619 // An interesting aspect of this is that increment is always true.
2620 // Decrement does not have this property.
2621 if (isInc && type->isBooleanType()) {
2622 value = Builder.getTrue();
2623
2624 // Most common case by far: integer increment.
2625 } else if (type->isIntegerType()) {
2626 QualType promotedType;
2627 bool canPerformLossyDemotionCheck = false;
2628 if (CGF.getContext().isPromotableIntegerType(T: type)) {
2629 promotedType = CGF.getContext().getPromotedIntegerType(PromotableType: type);
2630 assert(promotedType != type && "Shouldn't promote to the same type.");
2631 canPerformLossyDemotionCheck = true;
2632 canPerformLossyDemotionCheck &=
2633 CGF.getContext().getCanonicalType(T: type) !=
2634 CGF.getContext().getCanonicalType(T: promotedType);
2635 canPerformLossyDemotionCheck &=
2636 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2637 SrcType: type, DstType: promotedType);
2638 assert((!canPerformLossyDemotionCheck ||
2639 type->isSignedIntegerOrEnumerationType() ||
2640 promotedType->isSignedIntegerOrEnumerationType() ||
2641 ConvertType(type)->getScalarSizeInBits() ==
2642 ConvertType(promotedType)->getScalarSizeInBits()) &&
2643 "The following check expects that if we do promotion to different "
2644 "underlying canonical type, at least one of the types (either "
2645 "base or promoted) will be signed, or the bitwidths will match.");
2646 }
2647 if (CGF.SanOpts.hasOneOf(
2648 K: SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2649 canPerformLossyDemotionCheck) {
2650 // While `x += 1` (for `x` with width less than int) is modeled as
2651 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2652 // ease; inc/dec with width less than int can't overflow because of
2653 // promotion rules, so we omit promotion+demotion, which means that we can
2654 // not catch lossy "demotion". Because we still want to catch these cases
2655 // when the sanitizer is enabled, we perform the promotion, then perform
2656 // the increment/decrement in the wider type, and finally
2657 // perform the demotion. This will catch lossy demotions.
2658
2659 value = EmitScalarConversion(Src: value, SrcType: type, DstType: promotedType, Loc: E->getExprLoc());
2660 Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
2661 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
2662 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2663 // emitted.
2664 value = EmitScalarConversion(Src: value, SrcType: promotedType, DstType: type, Loc: E->getExprLoc(),
2665 Opts: ScalarConversionOpts(CGF.SanOpts));
2666
2667 // Note that signed integer inc/dec with width less than int can't
2668 // overflow because of promotion rules; we're just eliding a few steps
2669 // here.
2670 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2671 value = EmitIncDecConsiderOverflowBehavior(E, InVal: value, IsInc: isInc);
2672 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2673 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) {
2674 value = EmitOverflowCheckedBinOp(Ops: createBinOpInfoFromIncDec(
2675 E, InVal: value, IsInc: isInc, FPFeatures: E->getFPFeaturesInEffect(LO: CGF.getLangOpts())));
2676 } else {
2677 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount, IsSigned: true);
2678 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
2679 }
2680
2681 // Next most common: pointer increment.
2682 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2683 QualType type = ptr->getPointeeType();
2684
2685 // VLA types don't have constant size.
2686 if (const VariableArrayType *vla
2687 = CGF.getContext().getAsVariableArrayType(T: type)) {
2688 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2689 if (!isInc) numElts = Builder.CreateNSWNeg(V: numElts, Name: "vla.negsize");
2690 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
2691 if (CGF.getLangOpts().isSignedOverflowDefined())
2692 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: numElts, Name: "vla.inc");
2693 else
2694 value = CGF.EmitCheckedInBoundsGEP(
2695 ElemTy: elemTy, Ptr: value, IdxList: numElts, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
2696 Loc: E->getExprLoc(), Name: "vla.inc");
2697
2698 // Arithmetic on function pointers (!) is just +-1.
2699 } else if (type->isFunctionType()) {
2700 llvm::Value *amt = Builder.getInt32(C: amount);
2701
2702 if (CGF.getLangOpts().isSignedOverflowDefined())
2703 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: amt, Name: "incdec.funcptr");
2704 else
2705 value =
2706 CGF.EmitCheckedInBoundsGEP(ElemTy: CGF.Int8Ty, Ptr: value, IdxList: amt,
2707 /*SignedIndices=*/false, IsSubtraction: isSubtraction,
2708 Loc: E->getExprLoc(), Name: "incdec.funcptr");
2709
2710 // For everything else, we can just do a simple increment.
2711 } else {
2712 llvm::Value *amt = Builder.getInt32(C: amount);
2713 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: type);
2714 if (CGF.getLangOpts().isSignedOverflowDefined())
2715 value = Builder.CreateGEP(Ty: elemTy, Ptr: value, IdxList: amt, Name: "incdec.ptr");
2716 else
2717 value = CGF.EmitCheckedInBoundsGEP(
2718 ElemTy: elemTy, Ptr: value, IdxList: amt, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
2719 Loc: E->getExprLoc(), Name: "incdec.ptr");
2720 }
2721
2722 // Vector increment/decrement.
2723 } else if (type->isVectorType()) {
2724 if (type->hasIntegerRepresentation()) {
2725 llvm::Value *amt = llvm::ConstantInt::get(Ty: value->getType(), V: amount);
2726
2727 value = Builder.CreateAdd(LHS: value, RHS: amt, Name: isInc ? "inc" : "dec");
2728 } else {
2729 value = Builder.CreateFAdd(
2730 L: value,
2731 R: llvm::ConstantFP::get(Ty: value->getType(), V: amount),
2732 Name: isInc ? "inc" : "dec");
2733 }
2734
2735 // Floating point.
2736 } else if (type->isRealFloatingType()) {
2737 // Add the inc/dec to the real part.
2738 llvm::Value *amt;
2739 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2740
2741 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2742 // Another special case: half FP increment should be done via float
2743 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2744 value = Builder.CreateCall(
2745 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2746 CGF.CGM.FloatTy),
2747 input, "incdec.conv");
2748 } else {
2749 value = Builder.CreateFPExt(V: input, DestTy: CGF.CGM.FloatTy, Name: "incdec.conv");
2750 }
2751 }
2752
2753 if (value->getType()->isFloatTy())
2754 amt = llvm::ConstantFP::get(Context&: VMContext,
2755 V: llvm::APFloat(static_cast<float>(amount)));
2756 else if (value->getType()->isDoubleTy())
2757 amt = llvm::ConstantFP::get(Context&: VMContext,
2758 V: llvm::APFloat(static_cast<double>(amount)));
2759 else {
2760 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
2761 // Convert from float.
2762 llvm::APFloat F(static_cast<float>(amount));
2763 bool ignored;
2764 const llvm::fltSemantics *FS;
2765 // Don't use getFloatTypeSemantics because Half isn't
2766 // necessarily represented using the "half" LLVM type.
2767 if (value->getType()->isFP128Ty())
2768 FS = &CGF.getTarget().getFloat128Format();
2769 else if (value->getType()->isHalfTy())
2770 FS = &CGF.getTarget().getHalfFormat();
2771 else if (value->getType()->isBFloatTy())
2772 FS = &CGF.getTarget().getBFloat16Format();
2773 else if (value->getType()->isPPC_FP128Ty())
2774 FS = &CGF.getTarget().getIbm128Format();
2775 else
2776 FS = &CGF.getTarget().getLongDoubleFormat();
2777 F.convert(ToSemantics: *FS, RM: llvm::APFloat::rmTowardZero, losesInfo: &ignored);
2778 amt = llvm::ConstantFP::get(Context&: VMContext, V: F);
2779 }
2780 value = Builder.CreateFAdd(L: value, R: amt, Name: isInc ? "inc" : "dec");
2781
2782 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2783 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2784 value = Builder.CreateCall(
2785 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2786 CGF.CGM.FloatTy),
2787 value, "incdec.conv");
2788 } else {
2789 value = Builder.CreateFPTrunc(V: value, DestTy: input->getType(), Name: "incdec.conv");
2790 }
2791 }
2792
2793 // Fixed-point types.
2794 } else if (type->isFixedPointType()) {
2795 // Fixed-point types are tricky. In some cases, it isn't possible to
2796 // represent a 1 or a -1 in the type at all. Piggyback off of
2797 // EmitFixedPointBinOp to avoid having to reimplement saturation.
2798 BinOpInfo Info;
2799 Info.E = E;
2800 Info.Ty = E->getType();
2801 Info.Opcode = isInc ? BO_Add : BO_Sub;
2802 Info.LHS = value;
2803 Info.RHS = llvm::ConstantInt::get(Ty: value->getType(), V: 1, IsSigned: false);
2804 // If the type is signed, it's better to represent this as +(-1) or -(-1),
2805 // since -1 is guaranteed to be representable.
2806 if (type->isSignedFixedPointType()) {
2807 Info.Opcode = isInc ? BO_Sub : BO_Add;
2808 Info.RHS = Builder.CreateNeg(V: Info.RHS);
2809 }
2810 // Now, convert from our invented integer literal to the type of the unary
2811 // op. This will upscale and saturate if necessary. This value can become
2812 // undef in some cases.
2813 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
2814 auto DstSema = CGF.getContext().getFixedPointSemantics(Ty: Info.Ty);
2815 Info.RHS = FPBuilder.CreateIntegerToFixed(Src: Info.RHS, SrcIsSigned: true, DstSema: DstSema);
2816 value = EmitFixedPointBinOp(Ops: Info);
2817
2818 // Objective-C pointer types.
2819 } else {
2820 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2821
2822 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2823 if (!isInc) size = -size;
2824 llvm::Value *sizeValue =
2825 llvm::ConstantInt::get(Ty: CGF.SizeTy, V: size.getQuantity());
2826
2827 if (CGF.getLangOpts().isSignedOverflowDefined())
2828 value = Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, Name: "incdec.objptr");
2829 else
2830 value = CGF.EmitCheckedInBoundsGEP(
2831 ElemTy: CGF.Int8Ty, Ptr: value, IdxList: sizeValue, /*SignedIndices=*/false, IsSubtraction: isSubtraction,
2832 Loc: E->getExprLoc(), Name: "incdec.objptr");
2833 value = Builder.CreateBitCast(V: value, DestTy: input->getType());
2834 }
2835
2836 if (atomicPHI) {
2837 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2838 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
2839 auto Pair = CGF.EmitAtomicCompareExchange(
2840 Obj: LV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: value), Loc: E->getExprLoc());
2841 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: type);
2842 llvm::Value *success = Pair.second;
2843 atomicPHI->addIncoming(V: old, BB: curBlock);
2844 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
2845 Builder.SetInsertPoint(contBB);
2846 return isPre ? value : input;
2847 }
2848
2849 // Store the updated result through the lvalue.
2850 if (LV.isBitField())
2851 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: value), Dst: LV, Result: &value);
2852 else
2853 CGF.EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: LV);
2854
2855 // If this is a postinc, return the value read from memory, otherwise use the
2856 // updated value.
2857 return isPre ? value : input;
2858}
2859
2860
2861Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
2862 QualType PromotionType) {
2863 QualType promotionTy = PromotionType.isNull()
2864 ? getPromotionType(Ty: E->getSubExpr()->getType())
2865 : PromotionType;
2866 Value *result = VisitPlus(E, PromotionType: promotionTy);
2867 if (result && !promotionTy.isNull())
2868 result = EmitUnPromotedValue(result, ExprType: E->getType());
2869 return result;
2870}
2871
2872Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
2873 QualType PromotionType) {
2874 // This differs from gcc, though, most likely due to a bug in gcc.
2875 TestAndClearIgnoreResultAssign();
2876 if (!PromotionType.isNull())
2877 return CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
2878 return Visit(E: E->getSubExpr());
2879}
2880
2881Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
2882 QualType PromotionType) {
2883 QualType promotionTy = PromotionType.isNull()
2884 ? getPromotionType(Ty: E->getSubExpr()->getType())
2885 : PromotionType;
2886 Value *result = VisitMinus(E, PromotionType: promotionTy);
2887 if (result && !promotionTy.isNull())
2888 result = EmitUnPromotedValue(result, ExprType: E->getType());
2889 return result;
2890}
2891
2892Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
2893 QualType PromotionType) {
2894 TestAndClearIgnoreResultAssign();
2895 Value *Op;
2896 if (!PromotionType.isNull())
2897 Op = CGF.EmitPromotedScalarExpr(E: E->getSubExpr(), PromotionType);
2898 else
2899 Op = Visit(E: E->getSubExpr());
2900
2901 // Generate a unary FNeg for FP ops.
2902 if (Op->getType()->isFPOrFPVectorTy())
2903 return Builder.CreateFNeg(V: Op, Name: "fneg");
2904
2905 // Emit unary minus with EmitSub so we handle overflow cases etc.
2906 BinOpInfo BinOp;
2907 BinOp.RHS = Op;
2908 BinOp.LHS = llvm::Constant::getNullValue(Ty: BinOp.RHS->getType());
2909 BinOp.Ty = E->getType();
2910 BinOp.Opcode = BO_Sub;
2911 BinOp.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
2912 BinOp.E = E;
2913 return EmitSub(Ops: BinOp);
2914}
2915
2916Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2917 TestAndClearIgnoreResultAssign();
2918 Value *Op = Visit(E: E->getSubExpr());
2919 return Builder.CreateNot(V: Op, Name: "not");
2920}
2921
2922Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2923 // Perform vector logical not on comparison with zero vector.
2924 if (E->getType()->isVectorType() &&
2925 E->getType()->castAs<VectorType>()->getVectorKind() ==
2926 VectorKind::Generic) {
2927 Value *Oper = Visit(E: E->getSubExpr());
2928 Value *Zero = llvm::Constant::getNullValue(Ty: Oper->getType());
2929 Value *Result;
2930 if (Oper->getType()->isFPOrFPVectorTy()) {
2931 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
2932 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
2933 Result = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_OEQ, LHS: Oper, RHS: Zero, Name: "cmp");
2934 } else
2935 Result = Builder.CreateICmp(P: llvm::CmpInst::ICMP_EQ, LHS: Oper, RHS: Zero, Name: "cmp");
2936 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
2937 }
2938
2939 // Compare operand to zero.
2940 Value *BoolVal = CGF.EvaluateExprAsBool(E: E->getSubExpr());
2941
2942 // Invert value.
2943 // TODO: Could dynamically modify easy computations here. For example, if
2944 // the operand is an icmp ne, turn into icmp eq.
2945 BoolVal = Builder.CreateNot(V: BoolVal, Name: "lnot");
2946
2947 // ZExt result to the expr type.
2948 return Builder.CreateZExt(V: BoolVal, DestTy: ConvertType(T: E->getType()), Name: "lnot.ext");
2949}
2950
2951Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2952 // Try folding the offsetof to a constant.
2953 Expr::EvalResult EVResult;
2954 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2955 llvm::APSInt Value = EVResult.Val.getInt();
2956 return Builder.getInt(AI: Value);
2957 }
2958
2959 // Loop over the components of the offsetof to compute the value.
2960 unsigned n = E->getNumComponents();
2961 llvm::Type* ResultType = ConvertType(T: E->getType());
2962 llvm::Value* Result = llvm::Constant::getNullValue(Ty: ResultType);
2963 QualType CurrentType = E->getTypeSourceInfo()->getType();
2964 for (unsigned i = 0; i != n; ++i) {
2965 OffsetOfNode ON = E->getComponent(Idx: i);
2966 llvm::Value *Offset = nullptr;
2967 switch (ON.getKind()) {
2968 case OffsetOfNode::Array: {
2969 // Compute the index
2970 Expr *IdxExpr = E->getIndexExpr(Idx: ON.getArrayExprIndex());
2971 llvm::Value* Idx = CGF.EmitScalarExpr(E: IdxExpr);
2972 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2973 Idx = Builder.CreateIntCast(V: Idx, DestTy: ResultType, isSigned: IdxSigned, Name: "conv");
2974
2975 // Save the element type
2976 CurrentType =
2977 CGF.getContext().getAsArrayType(T: CurrentType)->getElementType();
2978
2979 // Compute the element size
2980 llvm::Value* ElemSize = llvm::ConstantInt::get(Ty: ResultType,
2981 V: CGF.getContext().getTypeSizeInChars(T: CurrentType).getQuantity());
2982
2983 // Multiply out to compute the result
2984 Offset = Builder.CreateMul(LHS: Idx, RHS: ElemSize);
2985 break;
2986 }
2987
2988 case OffsetOfNode::Field: {
2989 FieldDecl *MemberDecl = ON.getField();
2990 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2991 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
2992
2993 // Compute the index of the field in its parent.
2994 unsigned i = 0;
2995 // FIXME: It would be nice if we didn't have to loop here!
2996 for (RecordDecl::field_iterator Field = RD->field_begin(),
2997 FieldEnd = RD->field_end();
2998 Field != FieldEnd; ++Field, ++i) {
2999 if (*Field == MemberDecl)
3000 break;
3001 }
3002 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3003
3004 // Compute the offset to the field
3005 int64_t OffsetInt = RL.getFieldOffset(FieldNo: i) /
3006 CGF.getContext().getCharWidth();
3007 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt);
3008
3009 // Save the element type.
3010 CurrentType = MemberDecl->getType();
3011 break;
3012 }
3013
3014 case OffsetOfNode::Identifier:
3015 llvm_unreachable("dependent __builtin_offsetof");
3016
3017 case OffsetOfNode::Base: {
3018 if (ON.getBase()->isVirtual()) {
3019 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3020 continue;
3021 }
3022
3023 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3024 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(D: RD);
3025
3026 // Save the element type.
3027 CurrentType = ON.getBase()->getType();
3028
3029 // Compute the offset to the base.
3030 auto *BaseRT = CurrentType->castAs<RecordType>();
3031 auto *BaseRD = cast<CXXRecordDecl>(Val: BaseRT->getDecl());
3032 CharUnits OffsetInt = RL.getBaseClassOffset(Base: BaseRD);
3033 Offset = llvm::ConstantInt::get(Ty: ResultType, V: OffsetInt.getQuantity());
3034 break;
3035 }
3036 }
3037 Result = Builder.CreateAdd(LHS: Result, RHS: Offset);
3038 }
3039 return Result;
3040}
3041
3042/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3043/// argument of the sizeof expression as an integer.
3044Value *
3045ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3046 const UnaryExprOrTypeTraitExpr *E) {
3047 QualType TypeToSize = E->getTypeOfArgument();
3048 if (auto Kind = E->getKind();
3049 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) {
3050 if (const VariableArrayType *VAT =
3051 CGF.getContext().getAsVariableArrayType(T: TypeToSize)) {
3052 if (E->isArgumentType()) {
3053 // sizeof(type) - make sure to emit the VLA size.
3054 CGF.EmitVariablyModifiedType(Ty: TypeToSize);
3055 } else {
3056 // C99 6.5.3.4p2: If the argument is an expression of type
3057 // VLA, it is evaluated.
3058 CGF.EmitIgnoredExpr(E: E->getArgumentExpr());
3059 }
3060
3061 auto VlaSize = CGF.getVLASize(vla: VAT);
3062 llvm::Value *size = VlaSize.NumElts;
3063
3064 // Scale the number of non-VLA elements by the non-VLA element size.
3065 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3066 if (!eltSize.isOne())
3067 size = CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize), RHS: size);
3068
3069 return size;
3070 }
3071 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3072 auto Alignment =
3073 CGF.getContext()
3074 .toCharUnitsFromBits(BitSize: CGF.getContext().getOpenMPDefaultSimdAlign(
3075 T: E->getTypeOfArgument()->getPointeeType()))
3076 .getQuantity();
3077 return llvm::ConstantInt::get(Ty: CGF.SizeTy, V: Alignment);
3078 } else if (E->getKind() == UETT_VectorElements) {
3079 auto *VecTy = cast<llvm::VectorType>(Val: ConvertType(T: E->getTypeOfArgument()));
3080 return Builder.CreateElementCount(DstType: CGF.SizeTy, EC: VecTy->getElementCount());
3081 }
3082
3083 // If this isn't sizeof(vla), the result must be constant; use the constant
3084 // folding logic so we don't have to duplicate it here.
3085 return Builder.getInt(AI: E->EvaluateKnownConstInt(CGF.getContext()));
3086}
3087
3088Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3089 QualType PromotionType) {
3090 QualType promotionTy = PromotionType.isNull()
3091 ? getPromotionType(Ty: E->getSubExpr()->getType())
3092 : PromotionType;
3093 Value *result = VisitReal(E, PromotionType: promotionTy);
3094 if (result && !promotionTy.isNull())
3095 result = EmitUnPromotedValue(result, ExprType: E->getType());
3096 return result;
3097}
3098
3099Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3100 QualType PromotionType) {
3101 Expr *Op = E->getSubExpr();
3102 if (Op->getType()->isAnyComplexType()) {
3103 // If it's an l-value, load through the appropriate subobject l-value.
3104 // Note that we have to ask E because Op might be an l-value that
3105 // this won't work for, e.g. an Obj-C property.
3106 if (E->isGLValue()) {
3107 if (!PromotionType.isNull()) {
3108 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3109 E: Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3110 if (result.first)
3111 result.first = CGF.EmitPromotedValue(result, PromotionType).first;
3112 return result.first;
3113 } else {
3114 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3115 .getScalarVal();
3116 }
3117 }
3118 // Otherwise, calculate and project.
3119 return CGF.EmitComplexExpr(E: Op, IgnoreReal: false, IgnoreImag: true).first;
3120 }
3121
3122 if (!PromotionType.isNull())
3123 return CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3124 return Visit(E: Op);
3125}
3126
3127Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3128 QualType PromotionType) {
3129 QualType promotionTy = PromotionType.isNull()
3130 ? getPromotionType(Ty: E->getSubExpr()->getType())
3131 : PromotionType;
3132 Value *result = VisitImag(E, PromotionType: promotionTy);
3133 if (result && !promotionTy.isNull())
3134 result = EmitUnPromotedValue(result, ExprType: E->getType());
3135 return result;
3136}
3137
3138Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3139 QualType PromotionType) {
3140 Expr *Op = E->getSubExpr();
3141 if (Op->getType()->isAnyComplexType()) {
3142 // If it's an l-value, load through the appropriate subobject l-value.
3143 // Note that we have to ask E because Op might be an l-value that
3144 // this won't work for, e.g. an Obj-C property.
3145 if (Op->isGLValue()) {
3146 if (!PromotionType.isNull()) {
3147 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3148 E: Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3149 if (result.second)
3150 result.second = CGF.EmitPromotedValue(result, PromotionType).second;
3151 return result.second;
3152 } else {
3153 return CGF.EmitLoadOfLValue(V: CGF.EmitLValue(E), Loc: E->getExprLoc())
3154 .getScalarVal();
3155 }
3156 }
3157 // Otherwise, calculate and project.
3158 return CGF.EmitComplexExpr(E: Op, IgnoreReal: true, IgnoreImag: false).second;
3159 }
3160
3161 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3162 // effects are evaluated, but not the actual value.
3163 if (Op->isGLValue())
3164 CGF.EmitLValue(E: Op);
3165 else if (!PromotionType.isNull())
3166 CGF.EmitPromotedScalarExpr(E: Op, PromotionType);
3167 else
3168 CGF.EmitScalarExpr(E: Op, IgnoreResultAssign: true);
3169 if (!PromotionType.isNull())
3170 return llvm::Constant::getNullValue(Ty: ConvertType(T: PromotionType));
3171 return llvm::Constant::getNullValue(Ty: ConvertType(T: E->getType()));
3172}
3173
3174//===----------------------------------------------------------------------===//
3175// Binary Operators
3176//===----------------------------------------------------------------------===//
3177
3178Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3179 QualType PromotionType) {
3180 return CGF.Builder.CreateFPExt(V: result, DestTy: ConvertType(T: PromotionType), Name: "ext");
3181}
3182
3183Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3184 QualType ExprType) {
3185 return CGF.Builder.CreateFPTrunc(V: result, DestTy: ConvertType(T: ExprType), Name: "unpromotion");
3186}
3187
3188Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3189 E = E->IgnoreParens();
3190 if (auto BO = dyn_cast<BinaryOperator>(Val: E)) {
3191 switch (BO->getOpcode()) {
3192#define HANDLE_BINOP(OP) \
3193 case BO_##OP: \
3194 return Emit##OP(EmitBinOps(BO, PromotionType));
3195 HANDLE_BINOP(Add)
3196 HANDLE_BINOP(Sub)
3197 HANDLE_BINOP(Mul)
3198 HANDLE_BINOP(Div)
3199#undef HANDLE_BINOP
3200 default:
3201 break;
3202 }
3203 } else if (auto UO = dyn_cast<UnaryOperator>(Val: E)) {
3204 switch (UO->getOpcode()) {
3205 case UO_Imag:
3206 return VisitImag(E: UO, PromotionType);
3207 case UO_Real:
3208 return VisitReal(E: UO, PromotionType);
3209 case UO_Minus:
3210 return VisitMinus(E: UO, PromotionType);
3211 case UO_Plus:
3212 return VisitPlus(E: UO, PromotionType);
3213 default:
3214 break;
3215 }
3216 }
3217 auto result = Visit(E: const_cast<Expr *>(E));
3218 if (result) {
3219 if (!PromotionType.isNull())
3220 return EmitPromotedValue(result, PromotionType);
3221 else
3222 return EmitUnPromotedValue(result, ExprType: E->getType());
3223 }
3224 return result;
3225}
3226
3227BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3228 QualType PromotionType) {
3229 TestAndClearIgnoreResultAssign();
3230 BinOpInfo Result;
3231 Result.LHS = CGF.EmitPromotedScalarExpr(E: E->getLHS(), PromotionType);
3232 Result.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType);
3233 if (!PromotionType.isNull())
3234 Result.Ty = PromotionType;
3235 else
3236 Result.Ty = E->getType();
3237 Result.Opcode = E->getOpcode();
3238 Result.FPFeatures = E->getFPFeaturesInEffect(LO: CGF.getLangOpts());
3239 Result.E = E;
3240 return Result;
3241}
3242
3243LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3244 const CompoundAssignOperator *E,
3245 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3246 Value *&Result) {
3247 QualType LHSTy = E->getLHS()->getType();
3248 BinOpInfo OpInfo;
3249
3250 if (E->getComputationResultType()->isAnyComplexType())
3251 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3252
3253 // Emit the RHS first. __block variables need to have the rhs evaluated
3254 // first, plus this should improve codegen a little.
3255
3256 QualType PromotionTypeCR;
3257 PromotionTypeCR = getPromotionType(Ty: E->getComputationResultType());
3258 if (PromotionTypeCR.isNull())
3259 PromotionTypeCR = E->getComputationResultType();
3260 QualType PromotionTypeLHS = getPromotionType(Ty: E->getComputationLHSType());
3261 QualType PromotionTypeRHS = getPromotionType(Ty: E->getRHS()->getType());
3262 if (!PromotionTypeRHS.isNull())
3263 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E: E->getRHS(), PromotionType: PromotionTypeRHS);
3264 else
3265 OpInfo.RHS = Visit(E: E->getRHS());
3266 OpInfo.Ty = PromotionTypeCR;
3267 OpInfo.Opcode = E->getOpcode();
3268 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3269 OpInfo.E = E;
3270 // Load/convert the LHS.
3271 LValue LHSLV = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
3272
3273 llvm::PHINode *atomicPHI = nullptr;
3274 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3275 QualType type = atomicTy->getValueType();
3276 if (!type->isBooleanType() && type->isIntegerType() &&
3277 !(type->isUnsignedIntegerType() &&
3278 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow)) &&
3279 CGF.getLangOpts().getSignedOverflowBehavior() !=
3280 LangOptions::SOB_Trapping) {
3281 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3282 llvm::Instruction::BinaryOps Op;
3283 switch (OpInfo.Opcode) {
3284 // We don't have atomicrmw operands for *, %, /, <<, >>
3285 case BO_MulAssign: case BO_DivAssign:
3286 case BO_RemAssign:
3287 case BO_ShlAssign:
3288 case BO_ShrAssign:
3289 break;
3290 case BO_AddAssign:
3291 AtomicOp = llvm::AtomicRMWInst::Add;
3292 Op = llvm::Instruction::Add;
3293 break;
3294 case BO_SubAssign:
3295 AtomicOp = llvm::AtomicRMWInst::Sub;
3296 Op = llvm::Instruction::Sub;
3297 break;
3298 case BO_AndAssign:
3299 AtomicOp = llvm::AtomicRMWInst::And;
3300 Op = llvm::Instruction::And;
3301 break;
3302 case BO_XorAssign:
3303 AtomicOp = llvm::AtomicRMWInst::Xor;
3304 Op = llvm::Instruction::Xor;
3305 break;
3306 case BO_OrAssign:
3307 AtomicOp = llvm::AtomicRMWInst::Or;
3308 Op = llvm::Instruction::Or;
3309 break;
3310 default:
3311 llvm_unreachable("Invalid compound assignment type");
3312 }
3313 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3314 llvm::Value *Amt = CGF.EmitToMemory(
3315 Value: EmitScalarConversion(Src: OpInfo.RHS, SrcType: E->getRHS()->getType(), DstType: LHSTy,
3316 Loc: E->getExprLoc()),
3317 Ty: LHSTy);
3318 Value *OldVal = Builder.CreateAtomicRMW(
3319 Op: AtomicOp, Addr: LHSLV.getAddress(CGF), Val: Amt,
3320 Ordering: llvm::AtomicOrdering::SequentiallyConsistent);
3321
3322 // Since operation is atomic, the result type is guaranteed to be the
3323 // same as the input in LLVM terms.
3324 Result = Builder.CreateBinOp(Opc: Op, LHS: OldVal, RHS: Amt);
3325 return LHSLV;
3326 }
3327 }
3328 // FIXME: For floating point types, we should be saving and restoring the
3329 // floating point environment in the loop.
3330 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3331 llvm::BasicBlock *opBB = CGF.createBasicBlock(name: "atomic_op", parent: CGF.CurFn);
3332 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3333 OpInfo.LHS = CGF.EmitToMemory(Value: OpInfo.LHS, Ty: type);
3334 Builder.CreateBr(Dest: opBB);
3335 Builder.SetInsertPoint(opBB);
3336 atomicPHI = Builder.CreatePHI(Ty: OpInfo.LHS->getType(), NumReservedValues: 2);
3337 atomicPHI->addIncoming(V: OpInfo.LHS, BB: startBB);
3338 OpInfo.LHS = atomicPHI;
3339 }
3340 else
3341 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3342
3343 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3344 SourceLocation Loc = E->getExprLoc();
3345 if (!PromotionTypeLHS.isNull())
3346 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy, DstType: PromotionTypeLHS,
3347 Loc: E->getExprLoc());
3348 else
3349 OpInfo.LHS = EmitScalarConversion(Src: OpInfo.LHS, SrcType: LHSTy,
3350 DstType: E->getComputationLHSType(), Loc);
3351
3352 // Expand the binary operator.
3353 Result = (this->*Func)(OpInfo);
3354
3355 // Convert the result back to the LHS type,
3356 // potentially with Implicit Conversion sanitizer check.
3357 Result = EmitScalarConversion(Src: Result, SrcType: PromotionTypeCR, DstType: LHSTy, Loc,
3358 Opts: ScalarConversionOpts(CGF.SanOpts));
3359
3360 if (atomicPHI) {
3361 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3362 llvm::BasicBlock *contBB = CGF.createBasicBlock(name: "atomic_cont", parent: CGF.CurFn);
3363 auto Pair = CGF.EmitAtomicCompareExchange(
3364 Obj: LHSLV, Expected: RValue::get(V: atomicPHI), Desired: RValue::get(V: Result), Loc: E->getExprLoc());
3365 llvm::Value *old = CGF.EmitToMemory(Value: Pair.first.getScalarVal(), Ty: LHSTy);
3366 llvm::Value *success = Pair.second;
3367 atomicPHI->addIncoming(V: old, BB: curBlock);
3368 Builder.CreateCondBr(Cond: success, True: contBB, False: atomicPHI->getParent());
3369 Builder.SetInsertPoint(contBB);
3370 return LHSLV;
3371 }
3372
3373 // Store the result value into the LHS lvalue. Bit-fields are handled
3374 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3375 // 'An assignment expression has the value of the left operand after the
3376 // assignment...'.
3377 if (LHSLV.isBitField())
3378 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: Result), Dst: LHSLV, Result: &Result);
3379 else
3380 CGF.EmitStoreThroughLValue(Src: RValue::get(V: Result), Dst: LHSLV);
3381
3382 if (CGF.getLangOpts().OpenMP)
3383 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3384 LHS: E->getLHS());
3385 return LHSLV;
3386}
3387
3388Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3389 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3390 bool Ignore = TestAndClearIgnoreResultAssign();
3391 Value *RHS = nullptr;
3392 LValue LHS = EmitCompoundAssignLValue(E, Func, Result&: RHS);
3393
3394 // If the result is clearly ignored, return now.
3395 if (Ignore)
3396 return nullptr;
3397
3398 // The result of an assignment in C is the assigned r-value.
3399 if (!CGF.getLangOpts().CPlusPlus)
3400 return RHS;
3401
3402 // If the lvalue is non-volatile, return the computed value of the assignment.
3403 if (!LHS.isVolatileQualified())
3404 return RHS;
3405
3406 // Otherwise, reload the value.
3407 return EmitLoadOfLValue(LHS, E->getExprLoc());
3408}
3409
3410void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3411 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3412 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3413
3414 if (CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero)) {
3415 Checks.push_back(Elt: std::make_pair(x: Builder.CreateICmpNE(LHS: Ops.RHS, RHS: Zero),
3416 y: SanitizerKind::IntegerDivideByZero));
3417 }
3418
3419 const auto *BO = cast<BinaryOperator>(Val: Ops.E);
3420 if (CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow) &&
3421 Ops.Ty->hasSignedIntegerRepresentation() &&
3422 !IsWidenedIntegerOp(Ctx: CGF.getContext(), E: BO->getLHS()) &&
3423 Ops.mayHaveIntegerOverflow()) {
3424 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Val: Zero->getType());
3425
3426 llvm::Value *IntMin =
3427 Builder.getInt(AI: llvm::APInt::getSignedMinValue(numBits: Ty->getBitWidth()));
3428 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3429
3430 llvm::Value *LHSCmp = Builder.CreateICmpNE(LHS: Ops.LHS, RHS: IntMin);
3431 llvm::Value *RHSCmp = Builder.CreateICmpNE(LHS: Ops.RHS, RHS: NegOne);
3432 llvm::Value *NotOverflow = Builder.CreateOr(LHS: LHSCmp, RHS: RHSCmp, Name: "or");
3433 Checks.push_back(
3434 Elt: std::make_pair(x&: NotOverflow, y: SanitizerKind::SignedIntegerOverflow));
3435 }
3436
3437 if (Checks.size() > 0)
3438 EmitBinOpCheck(Checks, Info: Ops);
3439}
3440
3441Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3442 {
3443 CodeGenFunction::SanitizerScope SanScope(&CGF);
3444 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
3445 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
3446 Ops.Ty->isIntegerType() &&
3447 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3448 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
3449 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: true);
3450 } else if (CGF.SanOpts.has(K: SanitizerKind::FloatDivideByZero) &&
3451 Ops.Ty->isRealFloatingType() &&
3452 Ops.mayHaveFloatDivisionByZero()) {
3453 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
3454 llvm::Value *NonZero = Builder.CreateFCmpUNE(LHS: Ops.RHS, RHS: Zero);
3455 EmitBinOpCheck(Checks: std::make_pair(x&: NonZero, y: SanitizerKind::FloatDivideByZero),
3456 Info: Ops);
3457 }
3458 }
3459
3460 if (Ops.Ty->isConstantMatrixType()) {
3461 llvm::MatrixBuilder MB(Builder);
3462 // We need to check the types of the operands of the operator to get the
3463 // correct matrix dimensions.
3464 auto *BO = cast<BinaryOperator>(Val: Ops.E);
3465 (void)BO;
3466 assert(
3467 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
3468 "first operand must be a matrix");
3469 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3470 "second operand must be an arithmetic type");
3471 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3472 return MB.CreateScalarDiv(LHS: Ops.LHS, RHS: Ops.RHS,
3473 IsUnsigned: Ops.Ty->hasUnsignedIntegerRepresentation());
3474 }
3475
3476 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3477 llvm::Value *Val;
3478 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3479 Val = Builder.CreateFDiv(L: Ops.LHS, R: Ops.RHS, Name: "div");
3480 CGF.SetDivFPAccuracy(Val);
3481 return Val;
3482 }
3483 else if (Ops.isFixedPointOp())
3484 return EmitFixedPointBinOp(Ops);
3485 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3486 return Builder.CreateUDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
3487 else
3488 return Builder.CreateSDiv(LHS: Ops.LHS, RHS: Ops.RHS, Name: "div");
3489}
3490
3491Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3492 // Rem in C can't be a floating point type: C99 6.5.5p2.
3493 if ((CGF.SanOpts.has(K: SanitizerKind::IntegerDivideByZero) ||
3494 CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) &&
3495 Ops.Ty->isIntegerType() &&
3496 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3497 CodeGenFunction::SanitizerScope SanScope(&CGF);
3498 llvm::Value *Zero = llvm::Constant::getNullValue(Ty: ConvertType(T: Ops.Ty));
3499 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, isDiv: false);
3500 }
3501
3502 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3503 return Builder.CreateURem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
3504 else
3505 return Builder.CreateSRem(LHS: Ops.LHS, RHS: Ops.RHS, Name: "rem");
3506}
3507
3508Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3509 unsigned IID;
3510 unsigned OpID = 0;
3511 SanitizerHandler OverflowKind;
3512
3513 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3514 switch (Ops.Opcode) {
3515 case BO_Add:
3516 case BO_AddAssign:
3517 OpID = 1;
3518 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3519 llvm::Intrinsic::uadd_with_overflow;
3520 OverflowKind = SanitizerHandler::AddOverflow;
3521 break;
3522 case BO_Sub:
3523 case BO_SubAssign:
3524 OpID = 2;
3525 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3526 llvm::Intrinsic::usub_with_overflow;
3527 OverflowKind = SanitizerHandler::SubOverflow;
3528 break;
3529 case BO_Mul:
3530 case BO_MulAssign:
3531 OpID = 3;
3532 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3533 llvm::Intrinsic::umul_with_overflow;
3534 OverflowKind = SanitizerHandler::MulOverflow;
3535 break;
3536 default:
3537 llvm_unreachable("Unsupported operation for overflow detection");
3538 }
3539 OpID <<= 1;
3540 if (isSigned)
3541 OpID |= 1;
3542
3543 CodeGenFunction::SanitizerScope SanScope(&CGF);
3544 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(T: Ops.Ty);
3545
3546 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, Tys: opTy);
3547
3548 Value *resultAndOverflow = Builder.CreateCall(Callee: intrinsic, Args: {Ops.LHS, Ops.RHS});
3549 Value *result = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 0);
3550 Value *overflow = Builder.CreateExtractValue(Agg: resultAndOverflow, Idxs: 1);
3551
3552 // Handle overflow with llvm.trap if no custom handler has been specified.
3553 const std::string *handlerName =
3554 &CGF.getLangOpts().OverflowHandler;
3555 if (handlerName->empty()) {
3556 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3557 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3558 if (!isSigned || CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow)) {
3559 llvm::Value *NotOverflow = Builder.CreateNot(V: overflow);
3560 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3561 : SanitizerKind::UnsignedIntegerOverflow;
3562 EmitBinOpCheck(Checks: std::make_pair(x&: NotOverflow, y&: Kind), Info: Ops);
3563 } else
3564 CGF.EmitTrapCheck(Checked: Builder.CreateNot(V: overflow), CheckHandlerID: OverflowKind);
3565 return result;
3566 }
3567
3568 // Branch in case of overflow.
3569 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3570 llvm::BasicBlock *continueBB =
3571 CGF.createBasicBlock(name: "nooverflow", parent: CGF.CurFn, before: initialBB->getNextNode());
3572 llvm::BasicBlock *overflowBB = CGF.createBasicBlock(name: "overflow", parent: CGF.CurFn);
3573
3574 Builder.CreateCondBr(Cond: overflow, True: overflowBB, False: continueBB);
3575
3576 // If an overflow handler is set, then we want to call it and then use its
3577 // result, if it returns.
3578 Builder.SetInsertPoint(overflowBB);
3579
3580 // Get the overflow handler.
3581 llvm::Type *Int8Ty = CGF.Int8Ty;
3582 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3583 llvm::FunctionType *handlerTy =
3584 llvm::FunctionType::get(Result: CGF.Int64Ty, Params: argTypes, isVarArg: true);
3585 llvm::FunctionCallee handler =
3586 CGF.CGM.CreateRuntimeFunction(Ty: handlerTy, Name: *handlerName);
3587
3588 // Sign extend the args to 64-bit, so that we can use the same handler for
3589 // all types of overflow.
3590 llvm::Value *lhs = Builder.CreateSExt(V: Ops.LHS, DestTy: CGF.Int64Ty);
3591 llvm::Value *rhs = Builder.CreateSExt(V: Ops.RHS, DestTy: CGF.Int64Ty);
3592
3593 // Call the handler with the two arguments, the operation, and the size of
3594 // the result.
3595 llvm::Value *handlerArgs[] = {
3596 lhs,
3597 rhs,
3598 Builder.getInt8(C: OpID),
3599 Builder.getInt8(C: cast<llvm::IntegerType>(Val: opTy)->getBitWidth())
3600 };
3601 llvm::Value *handlerResult =
3602 CGF.EmitNounwindRuntimeCall(callee: handler, args: handlerArgs);
3603
3604 // Truncate the result back to the desired size.
3605 handlerResult = Builder.CreateTrunc(V: handlerResult, DestTy: opTy);
3606 Builder.CreateBr(Dest: continueBB);
3607
3608 Builder.SetInsertPoint(continueBB);
3609 llvm::PHINode *phi = Builder.CreatePHI(Ty: opTy, NumReservedValues: 2);
3610 phi->addIncoming(V: result, BB: initialBB);
3611 phi->addIncoming(V: handlerResult, BB: overflowBB);
3612
3613 return phi;
3614}
3615
3616/// Emit pointer + index arithmetic.
3617static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3618 const BinOpInfo &op,
3619 bool isSubtraction) {
3620 // Must have binary (not unary) expr here. Unary pointer
3621 // increment/decrement doesn't use this path.
3622 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
3623
3624 Value *pointer = op.LHS;
3625 Expr *pointerOperand = expr->getLHS();
3626 Value *index = op.RHS;
3627 Expr *indexOperand = expr->getRHS();
3628
3629 // In a subtraction, the LHS is always the pointer.
3630 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3631 std::swap(a&: pointer, b&: index);
3632 std::swap(a&: pointerOperand, b&: indexOperand);
3633 }
3634
3635 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3636
3637 unsigned width = cast<llvm::IntegerType>(Val: index->getType())->getBitWidth();
3638 auto &DL = CGF.CGM.getDataLayout();
3639 auto PtrTy = cast<llvm::PointerType>(Val: pointer->getType());
3640
3641 // Some versions of glibc and gcc use idioms (particularly in their malloc
3642 // routines) that add a pointer-sized integer (known to be a pointer value)
3643 // to a null pointer in order to cast the value back to an integer or as
3644 // part of a pointer alignment algorithm. This is undefined behavior, but
3645 // we'd like to be able to compile programs that use it.
3646 //
3647 // Normally, we'd generate a GEP with a null-pointer base here in response
3648 // to that code, but it's also UB to dereference a pointer created that
3649 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3650 // generate a direct cast of the integer value to a pointer.
3651 //
3652 // The idiom (p = nullptr + N) is not met if any of the following are true:
3653 //
3654 // The operation is subtraction.
3655 // The index is not pointer-sized.
3656 // The pointer type is not byte-sized.
3657 //
3658 if (BinaryOperator::isNullPointerArithmeticExtension(Ctx&: CGF.getContext(),
3659 Opc: op.Opcode,
3660 LHS: expr->getLHS(),
3661 RHS: expr->getRHS()))
3662 return CGF.Builder.CreateIntToPtr(V: index, DestTy: pointer->getType());
3663
3664 if (width != DL.getIndexTypeSizeInBits(Ty: PtrTy)) {
3665 // Zero-extend or sign-extend the pointer value according to
3666 // whether the index is signed or not.
3667 index = CGF.Builder.CreateIntCast(V: index, DestTy: DL.getIndexType(PtrTy), isSigned,
3668 Name: "idx.ext");
3669 }
3670
3671 // If this is subtraction, negate the index.
3672 if (isSubtraction)
3673 index = CGF.Builder.CreateNeg(V: index, Name: "idx.neg");
3674
3675 if (CGF.SanOpts.has(K: SanitizerKind::ArrayBounds))
3676 CGF.EmitBoundsCheck(E: op.E, Base: pointerOperand, Index: index, IndexType: indexOperand->getType(),
3677 /*Accessed*/ false);
3678
3679 const PointerType *pointerType
3680 = pointerOperand->getType()->getAs<PointerType>();
3681 if (!pointerType) {
3682 QualType objectType = pointerOperand->getType()
3683 ->castAs<ObjCObjectPointerType>()
3684 ->getPointeeType();
3685 llvm::Value *objectSize
3686 = CGF.CGM.getSize(numChars: CGF.getContext().getTypeSizeInChars(T: objectType));
3687
3688 index = CGF.Builder.CreateMul(LHS: index, RHS: objectSize);
3689
3690 Value *result =
3691 CGF.Builder.CreateGEP(Ty: CGF.Int8Ty, Ptr: pointer, IdxList: index, Name: "add.ptr");
3692 return CGF.Builder.CreateBitCast(V: result, DestTy: pointer->getType());
3693 }
3694
3695 QualType elementType = pointerType->getPointeeType();
3696 if (const VariableArrayType *vla
3697 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
3698 // The element count here is the total number of non-VLA elements.
3699 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3700
3701 // Effectively, the multiply by the VLA size is part of the GEP.
3702 // GEP indexes are signed, and scaling an index isn't permitted to
3703 // signed-overflow, so we use the same semantics for our explicit
3704 // multiply. We suppress this if overflow is not undefined behavior.
3705 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: vla->getElementType());
3706 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3707 index = CGF.Builder.CreateMul(LHS: index, RHS: numElements, Name: "vla.index");
3708 pointer = CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
3709 } else {
3710 index = CGF.Builder.CreateNSWMul(LHS: index, RHS: numElements, Name: "vla.index");
3711 pointer = CGF.EmitCheckedInBoundsGEP(
3712 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
3713 Name: "add.ptr");
3714 }
3715 return pointer;
3716 }
3717
3718 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3719 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3720 // future proof.
3721 llvm::Type *elemTy;
3722 if (elementType->isVoidType() || elementType->isFunctionType())
3723 elemTy = CGF.Int8Ty;
3724 else
3725 elemTy = CGF.ConvertTypeForMem(T: elementType);
3726
3727 if (CGF.getLangOpts().isSignedOverflowDefined())
3728 return CGF.Builder.CreateGEP(Ty: elemTy, Ptr: pointer, IdxList: index, Name: "add.ptr");
3729
3730 return CGF.EmitCheckedInBoundsGEP(
3731 ElemTy: elemTy, Ptr: pointer, IdxList: index, SignedIndices: isSigned, IsSubtraction: isSubtraction, Loc: op.E->getExprLoc(),
3732 Name: "add.ptr");
3733}
3734
3735// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3736// Addend. Use negMul and negAdd to negate the first operand of the Mul or
3737// the add operand respectively. This allows fmuladd to represent a*b-c, or
3738// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3739// efficient operations.
3740static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3741 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3742 bool negMul, bool negAdd) {
3743 Value *MulOp0 = MulOp->getOperand(i: 0);
3744 Value *MulOp1 = MulOp->getOperand(i: 1);
3745 if (negMul)
3746 MulOp0 = Builder.CreateFNeg(V: MulOp0, Name: "neg");
3747 if (negAdd)
3748 Addend = Builder.CreateFNeg(V: Addend, Name: "neg");
3749
3750 Value *FMulAdd = nullptr;
3751 if (Builder.getIsFPConstrained()) {
3752 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
3753 "Only constrained operation should be created when Builder is in FP "
3754 "constrained mode");
3755 FMulAdd = Builder.CreateConstrainedFPCall(
3756 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3757 Addend->getType()),
3758 {MulOp0, MulOp1, Addend});
3759 } else {
3760 FMulAdd = Builder.CreateCall(
3761 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3762 {MulOp0, MulOp1, Addend});
3763 }
3764 MulOp->eraseFromParent();
3765
3766 return FMulAdd;
3767}
3768
3769// Check whether it would be legal to emit an fmuladd intrinsic call to
3770// represent op and if so, build the fmuladd.
3771//
3772// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3773// Does NOT check the type of the operation - it's assumed that this function
3774// will be called from contexts where it's known that the type is contractable.
3775static Value* tryEmitFMulAdd(const BinOpInfo &op,
3776 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3777 bool isSub=false) {
3778
3779 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
3780 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
3781 "Only fadd/fsub can be the root of an fmuladd.");
3782
3783 // Check whether this op is marked as fusable.
3784 if (!op.FPFeatures.allowFPContractWithinStatement())
3785 return nullptr;
3786
3787 Value *LHS = op.LHS;
3788 Value *RHS = op.RHS;
3789
3790 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
3791 // it is the only use of its operand.
3792 bool NegLHS = false;
3793 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: LHS)) {
3794 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
3795 LHSUnOp->use_empty() && LHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
3796 LHS = LHSUnOp->getOperand(i_nocapture: 0);
3797 NegLHS = true;
3798 }
3799 }
3800
3801 bool NegRHS = false;
3802 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(Val: RHS)) {
3803 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
3804 RHSUnOp->use_empty() && RHSUnOp->getOperand(i_nocapture: 0)->hasOneUse()) {
3805 RHS = RHSUnOp->getOperand(i_nocapture: 0);
3806 NegRHS = true;
3807 }
3808 }
3809
3810 // We have a potentially fusable op. Look for a mul on one of the operands.
3811 // Also, make sure that the mul result isn't used directly. In that case,
3812 // there's no point creating a muladd operation.
3813 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: LHS)) {
3814 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3815 (LHSBinOp->use_empty() || NegLHS)) {
3816 // If we looked through fneg, erase it.
3817 if (NegLHS)
3818 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
3819 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
3820 }
3821 }
3822 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(Val: RHS)) {
3823 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3824 (RHSBinOp->use_empty() || NegRHS)) {
3825 // If we looked through fneg, erase it.
3826 if (NegRHS)
3827 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
3828 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
3829 }
3830 }
3831
3832 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(Val: LHS)) {
3833 if (LHSBinOp->getIntrinsicID() ==
3834 llvm::Intrinsic::experimental_constrained_fmul &&
3835 (LHSBinOp->use_empty() || NegLHS)) {
3836 // If we looked through fneg, erase it.
3837 if (NegLHS)
3838 cast<llvm::Instruction>(Val: op.LHS)->eraseFromParent();
3839 return buildFMulAdd(MulOp: LHSBinOp, Addend: op.RHS, CGF, Builder, negMul: NegLHS, negAdd: isSub);
3840 }
3841 }
3842 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(Val: RHS)) {
3843 if (RHSBinOp->getIntrinsicID() ==
3844 llvm::Intrinsic::experimental_constrained_fmul &&
3845 (RHSBinOp->use_empty() || NegRHS)) {
3846 // If we looked through fneg, erase it.
3847 if (NegRHS)
3848 cast<llvm::Instruction>(Val: op.RHS)->eraseFromParent();
3849 return buildFMulAdd(MulOp: RHSBinOp, Addend: op.LHS, CGF, Builder, negMul: isSub ^ NegRHS, negAdd: false);
3850 }
3851 }
3852
3853 return nullptr;
3854}
3855
3856Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3857 if (op.LHS->getType()->isPointerTy() ||
3858 op.RHS->getType()->isPointerTy())
3859 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::NotSubtraction);
3860
3861 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3862 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3863 case LangOptions::SOB_Defined:
3864 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
3865 case LangOptions::SOB_Undefined:
3866 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
3867 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
3868 [[fallthrough]];
3869 case LangOptions::SOB_Trapping:
3870 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
3871 return Builder.CreateNSWAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
3872 return EmitOverflowCheckedBinOp(Ops: op);
3873 }
3874 }
3875
3876 // For vector and matrix adds, try to fold into a fmuladd.
3877 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3878 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3879 // Try to form an fmuladd.
3880 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3881 return FMulAdd;
3882 }
3883
3884 if (op.Ty->isConstantMatrixType()) {
3885 llvm::MatrixBuilder MB(Builder);
3886 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3887 return MB.CreateAdd(LHS: op.LHS, RHS: op.RHS);
3888 }
3889
3890 if (op.Ty->isUnsignedIntegerType() &&
3891 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
3892 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
3893 return EmitOverflowCheckedBinOp(Ops: op);
3894
3895 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3896 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3897 return Builder.CreateFAdd(L: op.LHS, R: op.RHS, Name: "add");
3898 }
3899
3900 if (op.isFixedPointOp())
3901 return EmitFixedPointBinOp(Ops: op);
3902
3903 return Builder.CreateAdd(LHS: op.LHS, RHS: op.RHS, Name: "add");
3904}
3905
3906/// The resulting value must be calculated with exact precision, so the operands
3907/// may not be the same type.
3908Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
3909 using llvm::APSInt;
3910 using llvm::ConstantInt;
3911
3912 // This is either a binary operation where at least one of the operands is
3913 // a fixed-point type, or a unary operation where the operand is a fixed-point
3914 // type. The result type of a binary operation is determined by
3915 // Sema::handleFixedPointConversions().
3916 QualType ResultTy = op.Ty;
3917 QualType LHSTy, RHSTy;
3918 if (const auto *BinOp = dyn_cast<BinaryOperator>(Val: op.E)) {
3919 RHSTy = BinOp->getRHS()->getType();
3920 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(Val: BinOp)) {
3921 // For compound assignment, the effective type of the LHS at this point
3922 // is the computation LHS type, not the actual LHS type, and the final
3923 // result type is not the type of the expression but rather the
3924 // computation result type.
3925 LHSTy = CAO->getComputationLHSType();
3926 ResultTy = CAO->getComputationResultType();
3927 } else
3928 LHSTy = BinOp->getLHS()->getType();
3929 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(Val: op.E)) {
3930 LHSTy = UnOp->getSubExpr()->getType();
3931 RHSTy = UnOp->getSubExpr()->getType();
3932 }
3933 ASTContext &Ctx = CGF.getContext();
3934 Value *LHS = op.LHS;
3935 Value *RHS = op.RHS;
3936
3937 auto LHSFixedSema = Ctx.getFixedPointSemantics(Ty: LHSTy);
3938 auto RHSFixedSema = Ctx.getFixedPointSemantics(Ty: RHSTy);
3939 auto ResultFixedSema = Ctx.getFixedPointSemantics(Ty: ResultTy);
3940 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(Other: RHSFixedSema);
3941
3942 // Perform the actual operation.
3943 Value *Result;
3944 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3945 switch (op.Opcode) {
3946 case BO_AddAssign:
3947 case BO_Add:
3948 Result = FPBuilder.CreateAdd(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
3949 break;
3950 case BO_SubAssign:
3951 case BO_Sub:
3952 Result = FPBuilder.CreateSub(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
3953 break;
3954 case BO_MulAssign:
3955 case BO_Mul:
3956 Result = FPBuilder.CreateMul(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
3957 break;
3958 case BO_DivAssign:
3959 case BO_Div:
3960 Result = FPBuilder.CreateDiv(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
3961 break;
3962 case BO_ShlAssign:
3963 case BO_Shl:
3964 Result = FPBuilder.CreateShl(LHS, LHSSema: LHSFixedSema, RHS);
3965 break;
3966 case BO_ShrAssign:
3967 case BO_Shr:
3968 Result = FPBuilder.CreateShr(LHS, LHSSema: LHSFixedSema, RHS);
3969 break;
3970 case BO_LT:
3971 return FPBuilder.CreateLT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
3972 case BO_GT:
3973 return FPBuilder.CreateGT(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
3974 case BO_LE:
3975 return FPBuilder.CreateLE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
3976 case BO_GE:
3977 return FPBuilder.CreateGE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
3978 case BO_EQ:
3979 // For equality operations, we assume any padding bits on unsigned types are
3980 // zero'd out. They could be overwritten through non-saturating operations
3981 // that cause overflow, but this leads to undefined behavior.
3982 return FPBuilder.CreateEQ(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
3983 case BO_NE:
3984 return FPBuilder.CreateNE(LHS, LHSSema: LHSFixedSema, RHS, RHSSema: RHSFixedSema);
3985 case BO_Cmp:
3986 case BO_LAnd:
3987 case BO_LOr:
3988 llvm_unreachable("Found unimplemented fixed point binary operation");
3989 case BO_PtrMemD:
3990 case BO_PtrMemI:
3991 case BO_Rem:
3992 case BO_Xor:
3993 case BO_And:
3994 case BO_Or:
3995 case BO_Assign:
3996 case BO_RemAssign:
3997 case BO_AndAssign:
3998 case BO_XorAssign:
3999 case BO_OrAssign:
4000 case BO_Comma:
4001 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4002 }
4003
4004 bool IsShift = BinaryOperator::isShiftOp(Opc: op.Opcode) ||
4005 BinaryOperator::isShiftAssignOp(Opc: op.Opcode);
4006 // Convert to the result type.
4007 return FPBuilder.CreateFixedToFixed(Src: Result, SrcSema: IsShift ? LHSFixedSema
4008 : CommonFixedSema,
4009 DstSema: ResultFixedSema);
4010}
4011
4012Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4013 // The LHS is always a pointer if either side is.
4014 if (!op.LHS->getType()->isPointerTy()) {
4015 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4016 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4017 case LangOptions::SOB_Defined:
4018 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4019 case LangOptions::SOB_Undefined:
4020 if (!CGF.SanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
4021 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4022 [[fallthrough]];
4023 case LangOptions::SOB_Trapping:
4024 if (CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4025 return Builder.CreateNSWSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4026 return EmitOverflowCheckedBinOp(Ops: op);
4027 }
4028 }
4029
4030 // For vector and matrix subs, try to fold into a fmuladd.
4031 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4032 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4033 // Try to form an fmuladd.
4034 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, isSub: true))
4035 return FMulAdd;
4036 }
4037
4038 if (op.Ty->isConstantMatrixType()) {
4039 llvm::MatrixBuilder MB(Builder);
4040 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4041 return MB.CreateSub(LHS: op.LHS, RHS: op.RHS);
4042 }
4043
4044 if (op.Ty->isUnsignedIntegerType() &&
4045 CGF.SanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
4046 !CanElideOverflowCheck(Ctx: CGF.getContext(), Op: op))
4047 return EmitOverflowCheckedBinOp(Ops: op);
4048
4049 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4050 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4051 return Builder.CreateFSub(L: op.LHS, R: op.RHS, Name: "sub");
4052 }
4053
4054 if (op.isFixedPointOp())
4055 return EmitFixedPointBinOp(op);
4056
4057 return Builder.CreateSub(LHS: op.LHS, RHS: op.RHS, Name: "sub");
4058 }
4059
4060 // If the RHS is not a pointer, then we have normal pointer
4061 // arithmetic.
4062 if (!op.RHS->getType()->isPointerTy())
4063 return emitPointerArithmetic(CGF, op, isSubtraction: CodeGenFunction::IsSubtraction);
4064
4065 // Otherwise, this is a pointer subtraction.
4066
4067 // Do the raw subtraction part.
4068 llvm::Value *LHS
4069 = Builder.CreatePtrToInt(V: op.LHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.lhs.cast");
4070 llvm::Value *RHS
4071 = Builder.CreatePtrToInt(V: op.RHS, DestTy: CGF.PtrDiffTy, Name: "sub.ptr.rhs.cast");
4072 Value *diffInChars = Builder.CreateSub(LHS, RHS, Name: "sub.ptr.sub");
4073
4074 // Okay, figure out the element size.
4075 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.E);
4076 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4077
4078 llvm::Value *divisor = nullptr;
4079
4080 // For a variable-length array, this is going to be non-constant.
4081 if (const VariableArrayType *vla
4082 = CGF.getContext().getAsVariableArrayType(T: elementType)) {
4083 auto VlaSize = CGF.getVLASize(vla);
4084 elementType = VlaSize.Type;
4085 divisor = VlaSize.NumElts;
4086
4087 // Scale the number of non-VLA elements by the non-VLA element size.
4088 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4089 if (!eltSize.isOne())
4090 divisor = CGF.Builder.CreateNUWMul(LHS: CGF.CGM.getSize(numChars: eltSize), RHS: divisor);
4091
4092 // For everything elese, we can just compute it, safe in the
4093 // assumption that Sema won't let anything through that we can't
4094 // safely compute the size of.
4095 } else {
4096 CharUnits elementSize;
4097 // Handle GCC extension for pointer arithmetic on void* and
4098 // function pointer types.
4099 if (elementType->isVoidType() || elementType->isFunctionType())
4100 elementSize = CharUnits::One();
4101 else
4102 elementSize = CGF.getContext().getTypeSizeInChars(T: elementType);
4103
4104 // Don't even emit the divide for element size of 1.
4105 if (elementSize.isOne())
4106 return diffInChars;
4107
4108 divisor = CGF.CGM.getSize(numChars: elementSize);
4109 }
4110
4111 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4112 // pointer difference in C is only defined in the case where both operands
4113 // are pointing to elements of an array.
4114 return Builder.CreateExactSDiv(LHS: diffInChars, RHS: divisor, Name: "sub.ptr.div");
4115}
4116
4117Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS) {
4118 llvm::IntegerType *Ty;
4119 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4120 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4121 else
4122 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4123 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4124 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4125 // this in ConstantInt::get, this results in the value getting truncated.
4126 // Constrain the return value to be max(RHS) in this case.
4127 llvm::Type *RHSTy = RHS->getType();
4128 llvm::APInt RHSMax = llvm::APInt::getMaxValue(numBits: RHSTy->getScalarSizeInBits());
4129 if (RHSMax.ult(RHS: Ty->getBitWidth()))
4130 return llvm::ConstantInt::get(Ty: RHSTy, V: RHSMax);
4131 return llvm::ConstantInt::get(Ty: RHSTy, V: Ty->getBitWidth() - 1);
4132}
4133
4134Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4135 const Twine &Name) {
4136 llvm::IntegerType *Ty;
4137 if (auto *VT = dyn_cast<llvm::VectorType>(Val: LHS->getType()))
4138 Ty = cast<llvm::IntegerType>(Val: VT->getElementType());
4139 else
4140 Ty = cast<llvm::IntegerType>(Val: LHS->getType());
4141
4142 if (llvm::isPowerOf2_64(Value: Ty->getBitWidth()))
4143 return Builder.CreateAnd(LHS: RHS, RHS: GetMaximumShiftAmount(LHS, RHS), Name);
4144
4145 return Builder.CreateURem(
4146 LHS: RHS, RHS: llvm::ConstantInt::get(Ty: RHS->getType(), V: Ty->getBitWidth()), Name);
4147}
4148
4149Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4150 // TODO: This misses out on the sanitizer check below.
4151 if (Ops.isFixedPointOp())
4152 return EmitFixedPointBinOp(op: Ops);
4153
4154 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4155 // RHS to the same size as the LHS.
4156 Value *RHS = Ops.RHS;
4157 if (Ops.LHS->getType() != RHS->getType())
4158 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4159
4160 bool SanitizeSignedBase = CGF.SanOpts.has(K: SanitizerKind::ShiftBase) &&
4161 Ops.Ty->hasSignedIntegerRepresentation() &&
4162 !CGF.getLangOpts().isSignedOverflowDefined() &&
4163 !CGF.getLangOpts().CPlusPlus20;
4164 bool SanitizeUnsignedBase =
4165 CGF.SanOpts.has(K: SanitizerKind::UnsignedShiftBase) &&
4166 Ops.Ty->hasUnsignedIntegerRepresentation();
4167 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4168 bool SanitizeExponent = CGF.SanOpts.has(K: SanitizerKind::ShiftExponent);
4169 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4170 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4171 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shl.mask");
4172 else if ((SanitizeBase || SanitizeExponent) &&
4173 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4174 CodeGenFunction::SanitizerScope SanScope(&CGF);
4175 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
4176 llvm::Value *WidthMinusOne = GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS);
4177 llvm::Value *ValidExponent = Builder.CreateICmpULE(LHS: Ops.RHS, RHS: WidthMinusOne);
4178
4179 if (SanitizeExponent) {
4180 Checks.push_back(
4181 Elt: std::make_pair(x&: ValidExponent, y: SanitizerKind::ShiftExponent));
4182 }
4183
4184 if (SanitizeBase) {
4185 // Check whether we are shifting any non-zero bits off the top of the
4186 // integer. We only emit this check if exponent is valid - otherwise
4187 // instructions below will have undefined behavior themselves.
4188 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4189 llvm::BasicBlock *Cont = CGF.createBasicBlock(name: "cont");
4190 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock(name: "check");
4191 Builder.CreateCondBr(Cond: ValidExponent, True: CheckShiftBase, False: Cont);
4192 llvm::Value *PromotedWidthMinusOne =
4193 (RHS == Ops.RHS) ? WidthMinusOne
4194 : GetMaximumShiftAmount(LHS: Ops.LHS, RHS);
4195 CGF.EmitBlock(BB: CheckShiftBase);
4196 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4197 LHS: Ops.LHS, RHS: Builder.CreateSub(LHS: PromotedWidthMinusOne, RHS, Name: "shl.zeros",
4198 /*NUW*/ HasNUW: true, /*NSW*/ HasNSW: true),
4199 Name: "shl.check");
4200 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4201 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4202 // Under C++11's rules, shifting a 1 bit into the sign bit is
4203 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4204 // define signed left shifts, so we use the C99 and C++11 rules there).
4205 // Unsigned shifts can always shift into the top bit.
4206 llvm::Value *One = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 1);
4207 BitsShiftedOff = Builder.CreateLShr(LHS: BitsShiftedOff, RHS: One);
4208 }
4209 llvm::Value *Zero = llvm::ConstantInt::get(Ty: BitsShiftedOff->getType(), V: 0);
4210 llvm::Value *ValidBase = Builder.CreateICmpEQ(LHS: BitsShiftedOff, RHS: Zero);
4211 CGF.EmitBlock(BB: Cont);
4212 llvm::PHINode *BaseCheck = Builder.CreatePHI(Ty: ValidBase->getType(), NumReservedValues: 2);
4213 BaseCheck->addIncoming(V: Builder.getTrue(), BB: Orig);
4214 BaseCheck->addIncoming(V: ValidBase, BB: CheckShiftBase);
4215 Checks.push_back(Elt: std::make_pair(
4216 x&: BaseCheck, y: SanitizeSignedBase ? SanitizerKind::ShiftBase
4217 : SanitizerKind::UnsignedShiftBase));
4218 }
4219
4220 assert(!Checks.empty());
4221 EmitBinOpCheck(Checks, Info: Ops);
4222 }
4223
4224 return Builder.CreateShl(LHS: Ops.LHS, RHS, Name: "shl");
4225}
4226
4227Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4228 // TODO: This misses out on the sanitizer check below.
4229 if (Ops.isFixedPointOp())
4230 return EmitFixedPointBinOp(op: Ops);
4231
4232 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4233 // RHS to the same size as the LHS.
4234 Value *RHS = Ops.RHS;
4235 if (Ops.LHS->getType() != RHS->getType())
4236 RHS = Builder.CreateIntCast(V: RHS, DestTy: Ops.LHS->getType(), isSigned: false, Name: "sh_prom");
4237
4238 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4239 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4240 RHS = ConstrainShiftValue(LHS: Ops.LHS, RHS, Name: "shr.mask");
4241 else if (CGF.SanOpts.has(K: SanitizerKind::ShiftExponent) &&
4242 isa<llvm::IntegerType>(Val: Ops.LHS->getType())) {
4243 CodeGenFunction::SanitizerScope SanScope(&CGF);
4244 llvm::Value *Valid =
4245 Builder.CreateICmpULE(LHS: Ops.RHS, RHS: GetMaximumShiftAmount(LHS: Ops.LHS, RHS: Ops.RHS));
4246 EmitBinOpCheck(Checks: std::make_pair(x&: Valid, y: SanitizerKind::ShiftExponent), Info: Ops);
4247 }
4248
4249 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4250 return Builder.CreateLShr(LHS: Ops.LHS, RHS, Name: "shr");
4251 return Builder.CreateAShr(LHS: Ops.LHS, RHS, Name: "shr");
4252}
4253
4254enum IntrinsicType { VCMPEQ, VCMPGT };
4255// return corresponding comparison intrinsic for given vector type
4256static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4257 BuiltinType::Kind ElemKind) {
4258 switch (ElemKind) {
4259 default: llvm_unreachable("unexpected element type");
4260 case BuiltinType::Char_U:
4261 case BuiltinType::UChar:
4262 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4263 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4264 case BuiltinType::Char_S:
4265 case BuiltinType::SChar:
4266 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4267 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4268 case BuiltinType::UShort:
4269 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4270 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4271 case BuiltinType::Short:
4272 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4273 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4274 case BuiltinType::UInt:
4275 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4276 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4277 case BuiltinType::Int:
4278 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4279 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4280 case BuiltinType::ULong:
4281 case BuiltinType::ULongLong:
4282 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4283 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4284 case BuiltinType::Long:
4285 case BuiltinType::LongLong:
4286 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4287 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4288 case BuiltinType::Float:
4289 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4290 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4291 case BuiltinType::Double:
4292 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4293 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4294 case BuiltinType::UInt128:
4295 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4296 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4297 case BuiltinType::Int128:
4298 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4299 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4300 }
4301}
4302
4303Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4304 llvm::CmpInst::Predicate UICmpOpc,
4305 llvm::CmpInst::Predicate SICmpOpc,
4306 llvm::CmpInst::Predicate FCmpOpc,
4307 bool IsSignaling) {
4308 TestAndClearIgnoreResultAssign();
4309 Value *Result;
4310 QualType LHSTy = E->getLHS()->getType();
4311 QualType RHSTy = E->getRHS()->getType();
4312 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4313 assert(E->getOpcode() == BO_EQ ||
4314 E->getOpcode() == BO_NE);
4315 Value *LHS = CGF.EmitScalarExpr(E: E->getLHS());
4316 Value *RHS = CGF.EmitScalarExpr(E: E->getRHS());
4317 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
4318 CGF, L: LHS, R: RHS, MPT, Inequality: E->getOpcode() == BO_NE);
4319 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4320 BinOpInfo BOInfo = EmitBinOps(E);
4321 Value *LHS = BOInfo.LHS;
4322 Value *RHS = BOInfo.RHS;
4323
4324 // If AltiVec, the comparison results in a numeric type, so we use
4325 // intrinsics comparing vectors and giving 0 or 1 as a result
4326 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4327 // constants for mapping CR6 register bits to predicate result
4328 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4329
4330 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4331
4332 // in several cases vector arguments order will be reversed
4333 Value *FirstVecArg = LHS,
4334 *SecondVecArg = RHS;
4335
4336 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4337 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4338
4339 switch(E->getOpcode()) {
4340 default: llvm_unreachable("is not a comparison operation");
4341 case BO_EQ:
4342 CR6 = CR6_LT;
4343 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4344 break;
4345 case BO_NE:
4346 CR6 = CR6_EQ;
4347 ID = GetIntrinsic(IT: VCMPEQ, ElemKind: ElementKind);
4348 break;
4349 case BO_LT:
4350 CR6 = CR6_LT;
4351 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4352 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4353 break;
4354 case BO_GT:
4355 CR6 = CR6_LT;
4356 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4357 break;
4358 case BO_LE:
4359 if (ElementKind == BuiltinType::Float) {
4360 CR6 = CR6_LT;
4361 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4362 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4363 }
4364 else {
4365 CR6 = CR6_EQ;
4366 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4367 }
4368 break;
4369 case BO_GE:
4370 if (ElementKind == BuiltinType::Float) {
4371 CR6 = CR6_LT;
4372 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4373 }
4374 else {
4375 CR6 = CR6_EQ;
4376 ID = GetIntrinsic(IT: VCMPGT, ElemKind: ElementKind);
4377 std::swap(a&: FirstVecArg, b&: SecondVecArg);
4378 }
4379 break;
4380 }
4381
4382 Value *CR6Param = Builder.getInt32(C: CR6);
4383 llvm::Function *F = CGF.CGM.getIntrinsic(IID: ID);
4384 Result = Builder.CreateCall(Callee: F, Args: {CR6Param, FirstVecArg, SecondVecArg});
4385
4386 // The result type of intrinsic may not be same as E->getType().
4387 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4388 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4389 // do nothing, if ResultTy is not i1 at the same time, it will cause
4390 // crash later.
4391 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Val: Result->getType());
4392 if (ResultTy->getBitWidth() > 1 &&
4393 E->getType() == CGF.getContext().BoolTy)
4394 Result = Builder.CreateTrunc(V: Result, DestTy: Builder.getInt1Ty());
4395 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
4396 Loc: E->getExprLoc());
4397 }
4398
4399 if (BOInfo.isFixedPointOp()) {
4400 Result = EmitFixedPointBinOp(op: BOInfo);
4401 } else if (LHS->getType()->isFPOrFPVectorTy()) {
4402 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4403 if (!IsSignaling)
4404 Result = Builder.CreateFCmp(P: FCmpOpc, LHS, RHS, Name: "cmp");
4405 else
4406 Result = Builder.CreateFCmpS(P: FCmpOpc, LHS, RHS, Name: "cmp");
4407 } else if (LHSTy->hasSignedIntegerRepresentation()) {
4408 Result = Builder.CreateICmp(P: SICmpOpc, LHS, RHS, Name: "cmp");
4409 } else {
4410 // Unsigned integers and pointers.
4411
4412 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4413 !isa<llvm::ConstantPointerNull>(Val: LHS) &&
4414 !isa<llvm::ConstantPointerNull>(Val: RHS)) {
4415
4416 // Dynamic information is required to be stripped for comparisons,
4417 // because it could leak the dynamic information. Based on comparisons
4418 // of pointers to dynamic objects, the optimizer can replace one pointer
4419 // with another, which might be incorrect in presence of invariant
4420 // groups. Comparison with null is safe because null does not carry any
4421 // dynamic information.
4422 if (LHSTy.mayBeDynamicClass())
4423 LHS = Builder.CreateStripInvariantGroup(Ptr: LHS);
4424 if (RHSTy.mayBeDynamicClass())
4425 RHS = Builder.CreateStripInvariantGroup(Ptr: RHS);
4426 }
4427
4428 Result = Builder.CreateICmp(P: UICmpOpc, LHS, RHS, Name: "cmp");
4429 }
4430
4431 // If this is a vector comparison, sign extend the result to the appropriate
4432 // vector integer type and return it (don't convert to bool).
4433 if (LHSTy->isVectorType())
4434 return Builder.CreateSExt(V: Result, DestTy: ConvertType(T: E->getType()), Name: "sext");
4435
4436 } else {
4437 // Complex Comparison: can only be an equality comparison.
4438 CodeGenFunction::ComplexPairTy LHS, RHS;
4439 QualType CETy;
4440 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4441 LHS = CGF.EmitComplexExpr(E: E->getLHS());
4442 CETy = CTy->getElementType();
4443 } else {
4444 LHS.first = Visit(E: E->getLHS());
4445 LHS.second = llvm::Constant::getNullValue(Ty: LHS.first->getType());
4446 CETy = LHSTy;
4447 }
4448 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4449 RHS = CGF.EmitComplexExpr(E: E->getRHS());
4450 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
4451 CTy->getElementType()) &&
4452 "The element types must always match.");
4453 (void)CTy;
4454 } else {
4455 RHS.first = Visit(E: E->getRHS());
4456 RHS.second = llvm::Constant::getNullValue(Ty: RHS.first->getType());
4457 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
4458 "The element types must always match.");
4459 }
4460
4461 Value *ResultR, *ResultI;
4462 if (CETy->isRealFloatingType()) {
4463 // As complex comparisons can only be equality comparisons, they
4464 // are never signaling comparisons.
4465 ResultR = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
4466 ResultI = Builder.CreateFCmp(P: FCmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
4467 } else {
4468 // Complex comparisons can only be equality comparisons. As such, signed
4469 // and unsigned opcodes are the same.
4470 ResultR = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.first, RHS: RHS.first, Name: "cmp.r");
4471 ResultI = Builder.CreateICmp(P: UICmpOpc, LHS: LHS.second, RHS: RHS.second, Name: "cmp.i");
4472 }
4473
4474 if (E->getOpcode() == BO_EQ) {
4475 Result = Builder.CreateAnd(LHS: ResultR, RHS: ResultI, Name: "and.ri");
4476 } else {
4477 assert(E->getOpcode() == BO_NE &&
4478 "Complex comparison other than == or != ?");
4479 Result = Builder.CreateOr(LHS: ResultR, RHS: ResultI, Name: "or.ri");
4480 }
4481 }
4482
4483 return EmitScalarConversion(Src: Result, SrcType: CGF.getContext().BoolTy, DstType: E->getType(),
4484 Loc: E->getExprLoc());
4485}
4486
4487Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4488 bool Ignore = TestAndClearIgnoreResultAssign();
4489
4490 Value *RHS;
4491 LValue LHS;
4492
4493 switch (E->getLHS()->getType().getObjCLifetime()) {
4494 case Qualifiers::OCL_Strong:
4495 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4496 break;
4497
4498 case Qualifiers::OCL_Autoreleasing:
4499 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreAutoreleasing(e: E);
4500 break;
4501
4502 case Qualifiers::OCL_ExplicitNone:
4503 std::tie(args&: LHS, args&: RHS) = CGF.EmitARCStoreUnsafeUnretained(e: E, ignored: Ignore);
4504 break;
4505
4506 case Qualifiers::OCL_Weak:
4507 RHS = Visit(E: E->getRHS());
4508 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
4509 RHS = CGF.EmitARCStoreWeak(addr: LHS.getAddress(CGF), value: RHS, ignored: Ignore);
4510 break;
4511
4512 case Qualifiers::OCL_None:
4513 // __block variables need to have the rhs evaluated first, plus
4514 // this should improve codegen just a little.
4515 RHS = Visit(E: E->getRHS());
4516 LHS = EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
4517
4518 // Store the value into the LHS. Bit-fields are handled specially
4519 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4520 // 'An assignment expression has the value of the left operand after
4521 // the assignment...'.
4522 if (LHS.isBitField()) {
4523 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: RHS), Dst: LHS, Result: &RHS);
4524 } else {
4525 CGF.EmitNullabilityCheck(LHS, RHS, Loc: E->getExprLoc());
4526 CGF.EmitStoreThroughLValue(Src: RValue::get(V: RHS), Dst: LHS);
4527 }
4528 }
4529
4530 // If the result is clearly ignored, return now.
4531 if (Ignore)
4532 return nullptr;
4533
4534 // The result of an assignment in C is the assigned r-value.
4535 if (!CGF.getLangOpts().CPlusPlus)
4536 return RHS;
4537
4538 // If the lvalue is non-volatile, return the computed value of the assignment.
4539 if (!LHS.isVolatileQualified())
4540 return RHS;
4541
4542 // Otherwise, reload the value.
4543 return EmitLoadOfLValue(LV: LHS, Loc: E->getExprLoc());
4544}
4545
4546Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4547 // Perform vector logical and on comparisons with zero vectors.
4548 if (E->getType()->isVectorType()) {
4549 CGF.incrementProfileCounter(E);
4550
4551 Value *LHS = Visit(E: E->getLHS());
4552 Value *RHS = Visit(E: E->getRHS());
4553 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
4554 if (LHS->getType()->isFPOrFPVectorTy()) {
4555 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4556 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
4557 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
4558 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
4559 } else {
4560 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
4561 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
4562 }
4563 Value *And = Builder.CreateAnd(LHS, RHS);
4564 return Builder.CreateSExt(V: And, DestTy: ConvertType(T: E->getType()), Name: "sext");
4565 }
4566
4567 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4568 llvm::Type *ResTy = ConvertType(T: E->getType());
4569
4570 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4571 // If we have 1 && X, just emit X without inserting the control flow.
4572 bool LHSCondVal;
4573 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
4574 if (LHSCondVal) { // If we have 1 && X, just emit X.
4575 CGF.incrementProfileCounter(E);
4576
4577 // If the top of the logical operator nest, reset the MCDC temp to 0.
4578 if (CGF.MCDCLogOpStack.empty())
4579 CGF.maybeResetMCDCCondBitmap(E);
4580
4581 CGF.MCDCLogOpStack.push_back(Elt: E);
4582
4583 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
4584
4585 // If we're generating for profiling or coverage, generate a branch to a
4586 // block that increments the RHS counter needed to track branch condition
4587 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4588 // "FalseBlock" after the increment is done.
4589 if (InstrumentRegions &&
4590 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
4591 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
4592 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "land.end");
4593 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
4594 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: FBlock);
4595 CGF.EmitBlock(BB: RHSBlockCnt);
4596 CGF.incrementProfileCounter(E->getRHS());
4597 CGF.EmitBranch(Block: FBlock);
4598 CGF.EmitBlock(BB: FBlock);
4599 }
4600
4601 CGF.MCDCLogOpStack.pop_back();
4602 // If the top of the logical operator nest, update the MCDC bitmap.
4603 if (CGF.MCDCLogOpStack.empty())
4604 CGF.maybeUpdateMCDCTestVectorBitmap(E);
4605
4606 // ZExt result to int or bool.
4607 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "land.ext");
4608 }
4609
4610 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4611 if (!CGF.ContainsLabel(E->getRHS()))
4612 return llvm::Constant::getNullValue(Ty: ResTy);
4613 }
4614
4615 // If the top of the logical operator nest, reset the MCDC temp to 0.
4616 if (CGF.MCDCLogOpStack.empty())
4617 CGF.maybeResetMCDCCondBitmap(E);
4618
4619 CGF.MCDCLogOpStack.push_back(Elt: E);
4620
4621 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "land.end");
4622 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "land.rhs");
4623
4624 CodeGenFunction::ConditionalEvaluation eval(CGF);
4625
4626 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4627 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: RHSBlock, FalseBlock: ContBlock,
4628 TrueCount: CGF.getProfileCount(E->getRHS()));
4629
4630 // Any edges into the ContBlock are now from an (indeterminate number of)
4631 // edges from this first condition. All of these values will be false. Start
4632 // setting up the PHI node in the Cont Block for this.
4633 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
4634 NameStr: "", InsertAtEnd: ContBlock);
4635 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
4636 PI != PE; ++PI)
4637 PN->addIncoming(V: llvm::ConstantInt::getFalse(Context&: VMContext), BB: *PI);
4638
4639 eval.begin(CGF);
4640 CGF.EmitBlock(BB: RHSBlock);
4641 CGF.incrementProfileCounter(E);
4642 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
4643 eval.end(CGF);
4644
4645 // Reaquire the RHS block, as there may be subblocks inserted.
4646 RHSBlock = Builder.GetInsertBlock();
4647
4648 // If we're generating for profiling or coverage, generate a branch on the
4649 // RHS to a block that increments the RHS true counter needed to track branch
4650 // condition coverage.
4651 if (InstrumentRegions &&
4652 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
4653 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
4654 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "land.rhscnt");
4655 Builder.CreateCondBr(Cond: RHSCond, True: RHSBlockCnt, False: ContBlock);
4656 CGF.EmitBlock(BB: RHSBlockCnt);
4657 CGF.incrementProfileCounter(E->getRHS());
4658 CGF.EmitBranch(Block: ContBlock);
4659 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
4660 }
4661
4662 // Emit an unconditional branch from this block to ContBlock.
4663 {
4664 // There is no need to emit line number for unconditional branch.
4665 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4666 CGF.EmitBlock(BB: ContBlock);
4667 }
4668 // Insert an entry into the phi node for the edge with the value of RHSCond.
4669 PN->addIncoming(V: RHSCond, BB: RHSBlock);
4670
4671 CGF.MCDCLogOpStack.pop_back();
4672 // If the top of the logical operator nest, update the MCDC bitmap.
4673 if (CGF.MCDCLogOpStack.empty())
4674 CGF.maybeUpdateMCDCTestVectorBitmap(E);
4675
4676 // Artificial location to preserve the scope information
4677 {
4678 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4679 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4680 }
4681
4682 // ZExt result to int.
4683 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "land.ext");
4684}
4685
4686Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4687 // Perform vector logical or on comparisons with zero vectors.
4688 if (E->getType()->isVectorType()) {
4689 CGF.incrementProfileCounter(E);
4690
4691 Value *LHS = Visit(E: E->getLHS());
4692 Value *RHS = Visit(E: E->getRHS());
4693 Value *Zero = llvm::ConstantAggregateZero::get(Ty: LHS->getType());
4694 if (LHS->getType()->isFPOrFPVectorTy()) {
4695 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4696 CGF, E->getFPFeaturesInEffect(LO: CGF.getLangOpts()));
4697 LHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS, RHS: Zero, Name: "cmp");
4698 RHS = Builder.CreateFCmp(P: llvm::CmpInst::FCMP_UNE, LHS: RHS, RHS: Zero, Name: "cmp");
4699 } else {
4700 LHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS, RHS: Zero, Name: "cmp");
4701 RHS = Builder.CreateICmp(P: llvm::CmpInst::ICMP_NE, LHS: RHS, RHS: Zero, Name: "cmp");
4702 }
4703 Value *Or = Builder.CreateOr(LHS, RHS);
4704 return Builder.CreateSExt(V: Or, DestTy: ConvertType(T: E->getType()), Name: "sext");
4705 }
4706
4707 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4708 llvm::Type *ResTy = ConvertType(T: E->getType());
4709
4710 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4711 // If we have 0 || X, just emit X without inserting the control flow.
4712 bool LHSCondVal;
4713 if (CGF.ConstantFoldsToSimpleInteger(Cond: E->getLHS(), Result&: LHSCondVal)) {
4714 if (!LHSCondVal) { // If we have 0 || X, just emit X.
4715 CGF.incrementProfileCounter(E);
4716
4717 // If the top of the logical operator nest, reset the MCDC temp to 0.
4718 if (CGF.MCDCLogOpStack.empty())
4719 CGF.maybeResetMCDCCondBitmap(E);
4720
4721 CGF.MCDCLogOpStack.push_back(Elt: E);
4722
4723 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
4724
4725 // If we're generating for profiling or coverage, generate a branch to a
4726 // block that increments the RHS counter need to track branch condition
4727 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4728 // "FalseBlock" after the increment is done.
4729 if (InstrumentRegions &&
4730 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
4731 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
4732 llvm::BasicBlock *FBlock = CGF.createBasicBlock(name: "lor.end");
4733 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
4734 Builder.CreateCondBr(Cond: RHSCond, True: FBlock, False: RHSBlockCnt);
4735 CGF.EmitBlock(BB: RHSBlockCnt);
4736 CGF.incrementProfileCounter(E->getRHS());
4737 CGF.EmitBranch(Block: FBlock);
4738 CGF.EmitBlock(BB: FBlock);
4739 }
4740
4741 CGF.MCDCLogOpStack.pop_back();
4742 // If the top of the logical operator nest, update the MCDC bitmap.
4743 if (CGF.MCDCLogOpStack.empty())
4744 CGF.maybeUpdateMCDCTestVectorBitmap(E);
4745
4746 // ZExt result to int or bool.
4747 return Builder.CreateZExtOrBitCast(V: RHSCond, DestTy: ResTy, Name: "lor.ext");
4748 }
4749
4750 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
4751 if (!CGF.ContainsLabel(E->getRHS()))
4752 return llvm::ConstantInt::get(Ty: ResTy, V: 1);
4753 }
4754
4755 // If the top of the logical operator nest, reset the MCDC temp to 0.
4756 if (CGF.MCDCLogOpStack.empty())
4757 CGF.maybeResetMCDCCondBitmap(E);
4758
4759 CGF.MCDCLogOpStack.push_back(Elt: E);
4760
4761 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "lor.end");
4762 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "lor.rhs");
4763
4764 CodeGenFunction::ConditionalEvaluation eval(CGF);
4765
4766 // Branch on the LHS first. If it is true, go to the success (cont) block.
4767 CGF.EmitBranchOnBoolExpr(Cond: E->getLHS(), TrueBlock: ContBlock, FalseBlock: RHSBlock,
4768 TrueCount: CGF.getCurrentProfileCount() -
4769 CGF.getProfileCount(E->getRHS()));
4770
4771 // Any edges into the ContBlock are now from an (indeterminate number of)
4772 // edges from this first condition. All of these values will be true. Start
4773 // setting up the PHI node in the Cont Block for this.
4774 llvm::PHINode *PN = llvm::PHINode::Create(Ty: llvm::Type::getInt1Ty(C&: VMContext), NumReservedValues: 2,
4775 NameStr: "", InsertAtEnd: ContBlock);
4776 for (llvm::pred_iterator PI = pred_begin(BB: ContBlock), PE = pred_end(BB: ContBlock);
4777 PI != PE; ++PI)
4778 PN->addIncoming(V: llvm::ConstantInt::getTrue(Context&: VMContext), BB: *PI);
4779
4780 eval.begin(CGF);
4781
4782 // Emit the RHS condition as a bool value.
4783 CGF.EmitBlock(BB: RHSBlock);
4784 CGF.incrementProfileCounter(E);
4785 Value *RHSCond = CGF.EvaluateExprAsBool(E: E->getRHS());
4786
4787 eval.end(CGF);
4788
4789 // Reaquire the RHS block, as there may be subblocks inserted.
4790 RHSBlock = Builder.GetInsertBlock();
4791
4792 // If we're generating for profiling or coverage, generate a branch on the
4793 // RHS to a block that increments the RHS true counter needed to track branch
4794 // condition coverage.
4795 if (InstrumentRegions &&
4796 CodeGenFunction::isInstrumentedCondition(C: E->getRHS())) {
4797 CGF.maybeUpdateMCDCCondBitmap(E: E->getRHS(), Val: RHSCond);
4798 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock(name: "lor.rhscnt");
4799 Builder.CreateCondBr(Cond: RHSCond, True: ContBlock, False: RHSBlockCnt);
4800 CGF.EmitBlock(BB: RHSBlockCnt);
4801 CGF.incrementProfileCounter(E->getRHS());
4802 CGF.EmitBranch(Block: ContBlock);
4803 PN->addIncoming(V: RHSCond, BB: RHSBlockCnt);
4804 }
4805
4806 // Emit an unconditional branch from this block to ContBlock. Insert an entry
4807 // into the phi node for the edge with the value of RHSCond.
4808 CGF.EmitBlock(BB: ContBlock);
4809 PN->addIncoming(V: RHSCond, BB: RHSBlock);
4810
4811 CGF.MCDCLogOpStack.pop_back();
4812 // If the top of the logical operator nest, update the MCDC bitmap.
4813 if (CGF.MCDCLogOpStack.empty())
4814 CGF.maybeUpdateMCDCTestVectorBitmap(E);
4815
4816 // ZExt result to int.
4817 return Builder.CreateZExtOrBitCast(V: PN, DestTy: ResTy, Name: "lor.ext");
4818}
4819
4820Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
4821 CGF.EmitIgnoredExpr(E: E->getLHS());
4822 CGF.EnsureInsertPoint();
4823 return Visit(E: E->getRHS());
4824}
4825
4826//===----------------------------------------------------------------------===//
4827// Other Operators
4828//===----------------------------------------------------------------------===//
4829
4830/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
4831/// expression is cheap enough and side-effect-free enough to evaluate
4832/// unconditionally instead of conditionally. This is used to convert control
4833/// flow into selects in some cases.
4834static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
4835 CodeGenFunction &CGF) {
4836 // Anything that is an integer or floating point constant is fine.
4837 return E->IgnoreParens()->isEvaluatable(Ctx: CGF.getContext());
4838
4839 // Even non-volatile automatic variables can't be evaluated unconditionally.
4840 // Referencing a thread_local may cause non-trivial initialization work to
4841 // occur. If we're inside a lambda and one of the variables is from the scope
4842 // outside the lambda, that function may have returned already. Reading its
4843 // locals is a bad idea. Also, these reads may introduce races there didn't
4844 // exist in the source-level program.
4845}
4846
4847
4848Value *ScalarExprEmitter::
4849VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4850 TestAndClearIgnoreResultAssign();
4851
4852 // Bind the common expression if necessary.
4853 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4854
4855 Expr *condExpr = E->getCond();
4856 Expr *lhsExpr = E->getTrueExpr();
4857 Expr *rhsExpr = E->getFalseExpr();
4858
4859 // If the condition constant folds and can be elided, try to avoid emitting
4860 // the condition and the dead arm.
4861 bool CondExprBool;
4862 if (CGF.ConstantFoldsToSimpleInteger(Cond: condExpr, Result&: CondExprBool)) {
4863 Expr *live = lhsExpr, *dead = rhsExpr;
4864 if (!CondExprBool) std::swap(a&: live, b&: dead);
4865
4866 // If the dead side doesn't have labels we need, just emit the Live part.
4867 if (!CGF.ContainsLabel(dead)) {
4868 if (CondExprBool)
4869 CGF.incrementProfileCounter(E);
4870 Value *Result = Visit(E: live);
4871
4872 // If the live part is a throw expression, it acts like it has a void
4873 // type, so evaluating it returns a null Value*. However, a conditional
4874 // with non-void type must return a non-null Value*.
4875 if (!Result && !E->getType()->isVoidType())
4876 Result = llvm::UndefValue::get(T: CGF.ConvertType(E->getType()));
4877
4878 return Result;
4879 }
4880 }
4881
4882 // OpenCL: If the condition is a vector, we can treat this condition like
4883 // the select function.
4884 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
4885 condExpr->getType()->isExtVectorType()) {
4886 CGF.incrementProfileCounter(E);
4887
4888 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
4889 llvm::Value *LHS = Visit(E: lhsExpr);
4890 llvm::Value *RHS = Visit(E: rhsExpr);
4891
4892 llvm::Type *condType = ConvertType(T: condExpr->getType());
4893 auto *vecTy = cast<llvm::FixedVectorType>(Val: condType);
4894
4895 unsigned numElem = vecTy->getNumElements();
4896 llvm::Type *elemType = vecTy->getElementType();
4897
4898 llvm::Value *zeroVec = llvm::Constant::getNullValue(Ty: vecTy);
4899 llvm::Value *TestMSB = Builder.CreateICmpSLT(LHS: CondV, RHS: zeroVec);
4900 llvm::Value *tmp = Builder.CreateSExt(
4901 V: TestMSB, DestTy: llvm::FixedVectorType::get(ElementType: elemType, NumElts: numElem), Name: "sext");
4902 llvm::Value *tmp2 = Builder.CreateNot(V: tmp);
4903
4904 // Cast float to int to perform ANDs if necessary.
4905 llvm::Value *RHSTmp = RHS;
4906 llvm::Value *LHSTmp = LHS;
4907 bool wasCast = false;
4908 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(Val: RHS->getType());
4909 if (rhsVTy->getElementType()->isFloatingPointTy()) {
4910 RHSTmp = Builder.CreateBitCast(V: RHS, DestTy: tmp2->getType());
4911 LHSTmp = Builder.CreateBitCast(V: LHS, DestTy: tmp->getType());
4912 wasCast = true;
4913 }
4914
4915 llvm::Value *tmp3 = Builder.CreateAnd(LHS: RHSTmp, RHS: tmp2);
4916 llvm::Value *tmp4 = Builder.CreateAnd(LHS: LHSTmp, RHS: tmp);
4917 llvm::Value *tmp5 = Builder.CreateOr(LHS: tmp3, RHS: tmp4, Name: "cond");
4918 if (wasCast)
4919 tmp5 = Builder.CreateBitCast(V: tmp5, DestTy: RHS->getType());
4920
4921 return tmp5;
4922 }
4923
4924 if (condExpr->getType()->isVectorType() ||
4925 condExpr->getType()->isSveVLSBuiltinType()) {
4926 CGF.incrementProfileCounter(E);
4927
4928 llvm::Value *CondV = CGF.EmitScalarExpr(E: condExpr);
4929 llvm::Value *LHS = Visit(E: lhsExpr);
4930 llvm::Value *RHS = Visit(E: rhsExpr);
4931
4932 llvm::Type *CondType = ConvertType(T: condExpr->getType());
4933 auto *VecTy = cast<llvm::VectorType>(Val: CondType);
4934 llvm::Value *ZeroVec = llvm::Constant::getNullValue(Ty: VecTy);
4935
4936 CondV = Builder.CreateICmpNE(LHS: CondV, RHS: ZeroVec, Name: "vector_cond");
4937 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "vector_select");
4938 }
4939
4940 // If this is a really simple expression (like x ? 4 : 5), emit this as a
4941 // select instead of as control flow. We can only do this if it is cheap and
4942 // safe to evaluate the LHS and RHS unconditionally.
4943 if (isCheapEnoughToEvaluateUnconditionally(E: lhsExpr, CGF) &&
4944 isCheapEnoughToEvaluateUnconditionally(E: rhsExpr, CGF)) {
4945 llvm::Value *CondV = CGF.EvaluateExprAsBool(E: condExpr);
4946 llvm::Value *StepV = Builder.CreateZExtOrBitCast(V: CondV, DestTy: CGF.Int64Ty);
4947
4948 CGF.incrementProfileCounter(E, StepV);
4949
4950 llvm::Value *LHS = Visit(E: lhsExpr);
4951 llvm::Value *RHS = Visit(E: rhsExpr);
4952 if (!LHS) {
4953 // If the conditional has void type, make sure we return a null Value*.
4954 assert(!RHS && "LHS and RHS types must match");
4955 return nullptr;
4956 }
4957 return Builder.CreateSelect(C: CondV, True: LHS, False: RHS, Name: "cond");
4958 }
4959
4960 // If the top of the logical operator nest, reset the MCDC temp to 0.
4961 if (CGF.MCDCLogOpStack.empty())
4962 CGF.maybeResetMCDCCondBitmap(E: condExpr);
4963
4964 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock(name: "cond.true");
4965 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "cond.false");
4966 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "cond.end");
4967
4968 CodeGenFunction::ConditionalEvaluation eval(CGF);
4969 CGF.EmitBranchOnBoolExpr(Cond: condExpr, TrueBlock: LHSBlock, FalseBlock: RHSBlock,
4970 TrueCount: CGF.getProfileCount(lhsExpr));
4971
4972 CGF.EmitBlock(BB: LHSBlock);
4973
4974 // If the top of the logical operator nest, update the MCDC bitmap for the
4975 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
4976 // may also contain a boolean expression.
4977 if (CGF.MCDCLogOpStack.empty())
4978 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
4979
4980 CGF.incrementProfileCounter(E);
4981 eval.begin(CGF);
4982 Value *LHS = Visit(E: lhsExpr);
4983 eval.end(CGF);
4984
4985 LHSBlock = Builder.GetInsertBlock();
4986 Builder.CreateBr(Dest: ContBlock);
4987
4988 CGF.EmitBlock(BB: RHSBlock);
4989
4990 // If the top of the logical operator nest, update the MCDC bitmap for the
4991 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
4992 // may also contain a boolean expression.
4993 if (CGF.MCDCLogOpStack.empty())
4994 CGF.maybeUpdateMCDCTestVectorBitmap(E: condExpr);
4995
4996 eval.begin(CGF);
4997 Value *RHS = Visit(E: rhsExpr);
4998 eval.end(CGF);
4999
5000 RHSBlock = Builder.GetInsertBlock();
5001 CGF.EmitBlock(BB: ContBlock);
5002
5003 // If the LHS or RHS is a throw expression, it will be legitimately null.
5004 if (!LHS)
5005 return RHS;
5006 if (!RHS)
5007 return LHS;
5008
5009 // Create a PHI node for the real part.
5010 llvm::PHINode *PN = Builder.CreatePHI(Ty: LHS->getType(), NumReservedValues: 2, Name: "cond");
5011 PN->addIncoming(V: LHS, BB: LHSBlock);
5012 PN->addIncoming(V: RHS, BB: RHSBlock);
5013
5014 return PN;
5015}
5016
5017Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5018 return Visit(E: E->getChosenSubExpr());
5019}
5020
5021Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5022 QualType Ty = VE->getType();
5023
5024 if (Ty->isVariablyModifiedType())
5025 CGF.EmitVariablyModifiedType(Ty);
5026
5027 Address ArgValue = Address::invalid();
5028 Address ArgPtr = CGF.EmitVAArg(VE, VAListAddr&: ArgValue);
5029
5030 llvm::Type *ArgTy = ConvertType(T: VE->getType());
5031
5032 // If EmitVAArg fails, emit an error.
5033 if (!ArgPtr.isValid()) {
5034 CGF.ErrorUnsupported(VE, "va_arg expression");
5035 return llvm::UndefValue::get(T: ArgTy);
5036 }
5037
5038 // FIXME Volatility.
5039 llvm::Value *Val = Builder.CreateLoad(Addr: ArgPtr);
5040
5041 // If EmitVAArg promoted the type, we must truncate it.
5042 if (ArgTy != Val->getType()) {
5043 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
5044 Val = Builder.CreateIntToPtr(V: Val, DestTy: ArgTy);
5045 else
5046 Val = Builder.CreateTrunc(V: Val, DestTy: ArgTy);
5047 }
5048
5049 return Val;
5050}
5051
5052Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5053 return CGF.EmitBlockLiteral(block);
5054}
5055
5056// Convert a vec3 to vec4, or vice versa.
5057static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
5058 Value *Src, unsigned NumElementsDst) {
5059 static constexpr int Mask[] = {0, 1, 2, -1};
5060 return Builder.CreateShuffleVector(V: Src, Mask: llvm::ArrayRef(Mask, NumElementsDst));
5061}
5062
5063// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5064// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5065// but could be scalar or vectors of different lengths, and either can be
5066// pointer.
5067// There are 4 cases:
5068// 1. non-pointer -> non-pointer : needs 1 bitcast
5069// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5070// 3. pointer -> non-pointer
5071// a) pointer -> intptr_t : needs 1 ptrtoint
5072// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5073// 4. non-pointer -> pointer
5074// a) intptr_t -> pointer : needs 1 inttoptr
5075// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5076// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5077// allow casting directly between pointer types and non-integer non-pointer
5078// types.
5079static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
5080 const llvm::DataLayout &DL,
5081 Value *Src, llvm::Type *DstTy,
5082 StringRef Name = "") {
5083 auto SrcTy = Src->getType();
5084
5085 // Case 1.
5086 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5087 return Builder.CreateBitCast(V: Src, DestTy: DstTy, Name);
5088
5089 // Case 2.
5090 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5091 return Builder.CreatePointerBitCastOrAddrSpaceCast(V: Src, DestTy: DstTy, Name);
5092
5093 // Case 3.
5094 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5095 // Case 3b.
5096 if (!DstTy->isIntegerTy())
5097 Src = Builder.CreatePtrToInt(V: Src, DestTy: DL.getIntPtrType(SrcTy));
5098 // Cases 3a and 3b.
5099 return Builder.CreateBitOrPointerCast(V: Src, DestTy: DstTy, Name);
5100 }
5101
5102 // Case 4b.
5103 if (!SrcTy->isIntegerTy())
5104 Src = Builder.CreateBitCast(V: Src, DestTy: DL.getIntPtrType(DstTy));
5105 // Cases 4a and 4b.
5106 return Builder.CreateIntToPtr(V: Src, DestTy: DstTy, Name);
5107}
5108
5109Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5110 Value *Src = CGF.EmitScalarExpr(E: E->getSrcExpr());
5111 llvm::Type *DstTy = ConvertType(T: E->getType());
5112
5113 llvm::Type *SrcTy = Src->getType();
5114 unsigned NumElementsSrc =
5115 isa<llvm::VectorType>(Val: SrcTy)
5116 ? cast<llvm::FixedVectorType>(Val: SrcTy)->getNumElements()
5117 : 0;
5118 unsigned NumElementsDst =
5119 isa<llvm::VectorType>(Val: DstTy)
5120 ? cast<llvm::FixedVectorType>(Val: DstTy)->getNumElements()
5121 : 0;
5122
5123 // Use bit vector expansion for ext_vector_type boolean vectors.
5124 if (E->getType()->isExtVectorBoolType())
5125 return CGF.emitBoolVecConversion(SrcVec: Src, NumElementsDst, Name: "astype");
5126
5127 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5128 // vector to get a vec4, then a bitcast if the target type is different.
5129 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5130 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 4);
5131 Src = createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(), Src,
5132 DstTy);
5133
5134 Src->setName("astype");
5135 return Src;
5136 }
5137
5138 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5139 // to vec4 if the original type is not vec4, then a shuffle vector to
5140 // get a vec3.
5141 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5142 auto *Vec4Ty = llvm::FixedVectorType::get(
5143 ElementType: cast<llvm::VectorType>(Val: DstTy)->getElementType(), NumElts: 4);
5144 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5145 Vec4Ty);
5146
5147 Src = ConvertVec3AndVec4(Builder, CGF, Src, NumElementsDst: 3);
5148 Src->setName("astype");
5149 return Src;
5150 }
5151
5152 return createCastsForTypeOfSameSize(Builder, DL: CGF.CGM.getDataLayout(),
5153 Src, DstTy, Name: "astype");
5154}
5155
5156Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5157 return CGF.EmitAtomicExpr(E).getScalarVal();
5158}
5159
5160//===----------------------------------------------------------------------===//
5161// Entry Point into this File
5162//===----------------------------------------------------------------------===//
5163
5164/// Emit the computation of the specified expression of scalar type, ignoring
5165/// the result.
5166Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5167 assert(E && hasScalarEvaluationKind(E->getType()) &&
5168 "Invalid scalar expression to emit");
5169
5170 return ScalarExprEmitter(*this, IgnoreResultAssign)
5171 .Visit(E: const_cast<Expr *>(E));
5172}
5173
5174/// Emit a conversion from the specified type to the specified destination type,
5175/// both of which are LLVM scalar types.
5176Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
5177 QualType DstTy,
5178 SourceLocation Loc) {
5179 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5180 "Invalid scalar expression to emit");
5181 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcType: SrcTy, DstType: DstTy, Loc);
5182}
5183
5184/// Emit a conversion from the specified complex type to the specified
5185/// destination type, where the destination type is an LLVM scalar type.
5186Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
5187 QualType SrcTy,
5188 QualType DstTy,
5189 SourceLocation Loc) {
5190 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5191 "Invalid complex -> scalar conversion");
5192 return ScalarExprEmitter(*this)
5193 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5194}
5195
5196
5197Value *
5198CodeGenFunction::EmitPromotedScalarExpr(const Expr *E,
5199 QualType PromotionType) {
5200 if (!PromotionType.isNull())
5201 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5202 else
5203 return ScalarExprEmitter(*this).Visit(E: const_cast<Expr *>(E));
5204}
5205
5206
5207llvm::Value *CodeGenFunction::
5208EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
5209 bool isInc, bool isPre) {
5210 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5211}
5212
5213LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
5214 // object->isa or (*object).isa
5215 // Generate code as for: *(Class*)object
5216
5217 Expr *BaseExpr = E->getBase();
5218 Address Addr = Address::invalid();
5219 if (BaseExpr->isPRValue()) {
5220 llvm::Type *BaseTy =
5221 ConvertTypeForMem(T: BaseExpr->getType()->getPointeeType());
5222 Addr = Address(EmitScalarExpr(E: BaseExpr), BaseTy, getPointerAlign());
5223 } else {
5224 Addr = EmitLValue(E: BaseExpr).getAddress(CGF&: *this);
5225 }
5226
5227 // Cast the address to Class*.
5228 Addr = Addr.withElementType(ElemTy: ConvertType(E->getType()));
5229 return MakeAddrLValue(Addr, E->getType());
5230}
5231
5232
5233LValue CodeGenFunction::EmitCompoundAssignmentLValue(
5234 const CompoundAssignOperator *E) {
5235 ScalarExprEmitter Scalar(*this);
5236 Value *Result = nullptr;
5237 switch (E->getOpcode()) {
5238#define COMPOUND_OP(Op) \
5239 case BO_##Op##Assign: \
5240 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5241 Result)
5242 COMPOUND_OP(Mul);
5243 COMPOUND_OP(Div);
5244 COMPOUND_OP(Rem);
5245 COMPOUND_OP(Add);
5246 COMPOUND_OP(Sub);
5247 COMPOUND_OP(Shl);
5248 COMPOUND_OP(Shr);
5249 COMPOUND_OP(And);
5250 COMPOUND_OP(Xor);
5251 COMPOUND_OP(Or);
5252#undef COMPOUND_OP
5253
5254 case BO_PtrMemD:
5255 case BO_PtrMemI:
5256 case BO_Mul:
5257 case BO_Div:
5258 case BO_Rem:
5259 case BO_Add:
5260 case BO_Sub:
5261 case BO_Shl:
5262 case BO_Shr:
5263 case BO_LT:
5264 case BO_GT:
5265 case BO_LE:
5266 case BO_GE:
5267 case BO_EQ:
5268 case BO_NE:
5269 case BO_Cmp:
5270 case BO_And:
5271 case BO_Xor:
5272 case BO_Or:
5273 case BO_LAnd:
5274 case BO_LOr:
5275 case BO_Assign:
5276 case BO_Comma:
5277 llvm_unreachable("Not valid compound assignment operators");
5278 }
5279
5280 llvm_unreachable("Unhandled compound assignment operator");
5281}
5282
5283struct GEPOffsetAndOverflow {
5284 // The total (signed) byte offset for the GEP.
5285 llvm::Value *TotalOffset;
5286 // The offset overflow flag - true if the total offset overflows.
5287 llvm::Value *OffsetOverflows;
5288};
5289
5290/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5291/// and compute the total offset it applies from it's base pointer BasePtr.
5292/// Returns offset in bytes and a boolean flag whether an overflow happened
5293/// during evaluation.
5294static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
5295 llvm::LLVMContext &VMContext,
5296 CodeGenModule &CGM,
5297 CGBuilderTy &Builder) {
5298 const auto &DL = CGM.getDataLayout();
5299
5300 // The total (signed) byte offset for the GEP.
5301 llvm::Value *TotalOffset = nullptr;
5302
5303 // Was the GEP already reduced to a constant?
5304 if (isa<llvm::Constant>(Val: GEPVal)) {
5305 // Compute the offset by casting both pointers to integers and subtracting:
5306 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
5307 Value *BasePtr_int =
5308 Builder.CreatePtrToInt(V: BasePtr, DestTy: DL.getIntPtrType(BasePtr->getType()));
5309 Value *GEPVal_int =
5310 Builder.CreatePtrToInt(V: GEPVal, DestTy: DL.getIntPtrType(GEPVal->getType()));
5311 TotalOffset = Builder.CreateSub(LHS: GEPVal_int, RHS: BasePtr_int);
5312 return {.TotalOffset: TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
5313 }
5314
5315 auto *GEP = cast<llvm::GEPOperator>(Val: GEPVal);
5316 assert(GEP->getPointerOperand() == BasePtr &&
5317 "BasePtr must be the base of the GEP.");
5318 assert(GEP->isInBounds() && "Expected inbounds GEP");
5319
5320 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
5321
5322 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5323 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
5324 auto *SAddIntrinsic =
5325 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
5326 auto *SMulIntrinsic =
5327 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
5328
5329 // The offset overflow flag - true if the total offset overflows.
5330 llvm::Value *OffsetOverflows = Builder.getFalse();
5331
5332 /// Return the result of the given binary operation.
5333 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
5334 llvm::Value *RHS) -> llvm::Value * {
5335 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
5336
5337 // If the operands are constants, return a constant result.
5338 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(Val: LHS)) {
5339 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(Val: RHS)) {
5340 llvm::APInt N;
5341 bool HasOverflow = mayHaveIntegerOverflow(LHS: LHSCI, RHS: RHSCI, Opcode,
5342 /*Signed=*/true, Result&: N);
5343 if (HasOverflow)
5344 OffsetOverflows = Builder.getTrue();
5345 return llvm::ConstantInt::get(Context&: VMContext, V: N);
5346 }
5347 }
5348
5349 // Otherwise, compute the result with checked arithmetic.
5350 auto *ResultAndOverflow = Builder.CreateCall(
5351 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
5352 OffsetOverflows = Builder.CreateOr(
5353 Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 1), OffsetOverflows);
5354 return Builder.CreateExtractValue(Agg: ResultAndOverflow, Idxs: 0);
5355 };
5356
5357 // Determine the total byte offset by looking at each GEP operand.
5358 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
5359 GTI != GTE; ++GTI) {
5360 llvm::Value *LocalOffset;
5361 auto *Index = GTI.getOperand();
5362 // Compute the local offset contributed by this indexing step:
5363 if (auto *STy = GTI.getStructTypeOrNull()) {
5364 // For struct indexing, the local offset is the byte position of the
5365 // specified field.
5366 unsigned FieldNo = cast<llvm::ConstantInt>(Val: Index)->getZExtValue();
5367 LocalOffset = llvm::ConstantInt::get(
5368 Ty: IntPtrTy, V: DL.getStructLayout(Ty: STy)->getElementOffset(Idx: FieldNo));
5369 } else {
5370 // Otherwise this is array-like indexing. The local offset is the index
5371 // multiplied by the element size.
5372 auto *ElementSize =
5373 llvm::ConstantInt::get(Ty: IntPtrTy, V: GTI.getSequentialElementStride(DL));
5374 auto *IndexS = Builder.CreateIntCast(V: Index, DestTy: IntPtrTy, /*isSigned=*/true);
5375 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
5376 }
5377
5378 // If this is the first offset, set it as the total offset. Otherwise, add
5379 // the local offset into the running total.
5380 if (!TotalOffset || TotalOffset == Zero)
5381 TotalOffset = LocalOffset;
5382 else
5383 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
5384 }
5385
5386 return {.TotalOffset: TotalOffset, .OffsetOverflows: OffsetOverflows};
5387}
5388
5389Value *
5390CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
5391 ArrayRef<Value *> IdxList,
5392 bool SignedIndices, bool IsSubtraction,
5393 SourceLocation Loc, const Twine &Name) {
5394 llvm::Type *PtrTy = Ptr->getType();
5395 Value *GEPVal = Builder.CreateInBoundsGEP(Ty: ElemTy, Ptr, IdxList, Name);
5396
5397 // If the pointer overflow sanitizer isn't enabled, do nothing.
5398 if (!SanOpts.has(K: SanitizerKind::PointerOverflow))
5399 return GEPVal;
5400
5401 // Perform nullptr-and-offset check unless the nullptr is defined.
5402 bool PerformNullCheck = !NullPointerIsDefined(
5403 F: Builder.GetInsertBlock()->getParent(), AS: PtrTy->getPointerAddressSpace());
5404 // Check for overflows unless the GEP got constant-folded,
5405 // and only in the default address space
5406 bool PerformOverflowCheck =
5407 !isa<llvm::Constant>(Val: GEPVal) && PtrTy->getPointerAddressSpace() == 0;
5408
5409 if (!(PerformNullCheck || PerformOverflowCheck))
5410 return GEPVal;
5411
5412 const auto &DL = CGM.getDataLayout();
5413
5414 SanitizerScope SanScope(this);
5415 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
5416
5417 GEPOffsetAndOverflow EvaluatedGEP =
5418 EmitGEPOffsetInBytes(BasePtr: Ptr, GEPVal, VMContext&: getLLVMContext(), CGM, Builder);
5419
5420 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
5421 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
5422 "If the offset got constant-folded, we don't expect that there was an "
5423 "overflow.");
5424
5425 auto *Zero = llvm::ConstantInt::getNullValue(Ty: IntPtrTy);
5426
5427 // Common case: if the total offset is zero, and we are using C++ semantics,
5428 // where nullptr+0 is defined, don't emit a check.
5429 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
5430 return GEPVal;
5431
5432 // Now that we've computed the total offset, add it to the base pointer (with
5433 // wrapping semantics).
5434 auto *IntPtr = Builder.CreatePtrToInt(V: Ptr, DestTy: IntPtrTy);
5435 auto *ComputedGEP = Builder.CreateAdd(LHS: IntPtr, RHS: EvaluatedGEP.TotalOffset);
5436
5437 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
5438
5439 if (PerformNullCheck) {
5440 // In C++, if the base pointer evaluates to a null pointer value,
5441 // the only valid pointer this inbounds GEP can produce is also
5442 // a null pointer, so the offset must also evaluate to zero.
5443 // Likewise, if we have non-zero base pointer, we can not get null pointer
5444 // as a result, so the offset can not be -intptr_t(BasePtr).
5445 // In other words, both pointers are either null, or both are non-null,
5446 // or the behaviour is undefined.
5447 //
5448 // C, however, is more strict in this regard, and gives more
5449 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5450 // So both the input to the 'gep inbounds' AND the output must not be null.
5451 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Arg: Ptr);
5452 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(Arg: ComputedGEP);
5453 auto *Valid =
5454 CGM.getLangOpts().CPlusPlus
5455 ? Builder.CreateICmpEQ(LHS: BaseIsNotNullptr, RHS: ResultIsNotNullptr)
5456 : Builder.CreateAnd(LHS: BaseIsNotNullptr, RHS: ResultIsNotNullptr);
5457 Checks.emplace_back(Args&: Valid, Args: SanitizerKind::PointerOverflow);
5458 }
5459
5460 if (PerformOverflowCheck) {
5461 // The GEP is valid if:
5462 // 1) The total offset doesn't overflow, and
5463 // 2) The sign of the difference between the computed address and the base
5464 // pointer matches the sign of the total offset.
5465 llvm::Value *ValidGEP;
5466 auto *NoOffsetOverflow = Builder.CreateNot(V: EvaluatedGEP.OffsetOverflows);
5467 if (SignedIndices) {
5468 // GEP is computed as `unsigned base + signed offset`, therefore:
5469 // * If offset was positive, then the computed pointer can not be
5470 // [unsigned] less than the base pointer, unless it overflowed.
5471 // * If offset was negative, then the computed pointer can not be
5472 // [unsigned] greater than the bas pointere, unless it overflowed.
5473 auto *PosOrZeroValid = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
5474 auto *PosOrZeroOffset =
5475 Builder.CreateICmpSGE(LHS: EvaluatedGEP.TotalOffset, RHS: Zero);
5476 llvm::Value *NegValid = Builder.CreateICmpULT(LHS: ComputedGEP, RHS: IntPtr);
5477 ValidGEP =
5478 Builder.CreateSelect(C: PosOrZeroOffset, True: PosOrZeroValid, False: NegValid);
5479 } else if (!IsSubtraction) {
5480 // GEP is computed as `unsigned base + unsigned offset`, therefore the
5481 // computed pointer can not be [unsigned] less than base pointer,
5482 // unless there was an overflow.
5483 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5484 ValidGEP = Builder.CreateICmpUGE(LHS: ComputedGEP, RHS: IntPtr);
5485 } else {
5486 // GEP is computed as `unsigned base - unsigned offset`, therefore the
5487 // computed pointer can not be [unsigned] greater than base pointer,
5488 // unless there was an overflow.
5489 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5490 ValidGEP = Builder.CreateICmpULE(LHS: ComputedGEP, RHS: IntPtr);
5491 }
5492 ValidGEP = Builder.CreateAnd(LHS: ValidGEP, RHS: NoOffsetOverflow);
5493 Checks.emplace_back(Args&: ValidGEP, Args: SanitizerKind::PointerOverflow);
5494 }
5495
5496 assert(!Checks.empty() && "Should have produced some checks.");
5497
5498 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
5499 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5500 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
5501 EmitCheck(Checked: Checks, Check: SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
5502
5503 return GEPVal;
5504}
5505

source code of clang/lib/CodeGen/CGExprScalar.cpp