1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGObjCRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "ConstantEmitter.h"
18#include "TargetInfo.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/DeclTemplate.h"
23#include "clang/AST/StmtVisitor.h"
24#include "llvm/IR/Constants.h"
25#include "llvm/IR/Function.h"
26#include "llvm/IR/GlobalVariable.h"
27#include "llvm/IR/IntrinsicInst.h"
28#include "llvm/IR/Intrinsics.h"
29using namespace clang;
30using namespace CodeGen;
31
32//===----------------------------------------------------------------------===//
33// Aggregate Expression Emitter
34//===----------------------------------------------------------------------===//
35
36namespace {
37class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
38 CodeGenFunction &CGF;
39 CGBuilderTy &Builder;
40 AggValueSlot Dest;
41 bool IsResultUnused;
42
43 AggValueSlot EnsureSlot(QualType T) {
44 if (!Dest.isIgnored()) return Dest;
45 return CGF.CreateAggTemp(T, Name: "agg.tmp.ensured");
46 }
47 void EnsureDest(QualType T) {
48 if (!Dest.isIgnored()) return;
49 Dest = CGF.CreateAggTemp(T, Name: "agg.tmp.ensured");
50 }
51
52 // Calls `Fn` with a valid return value slot, potentially creating a temporary
53 // to do so. If a temporary is created, an appropriate copy into `Dest` will
54 // be emitted, as will lifetime markers.
55 //
56 // The given function should take a ReturnValueSlot, and return an RValue that
57 // points to said slot.
58 void withReturnValueSlot(const Expr *E,
59 llvm::function_ref<RValue(ReturnValueSlot)> Fn);
60
61public:
62 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
63 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
64 IsResultUnused(IsResultUnused) { }
65
66 //===--------------------------------------------------------------------===//
67 // Utilities
68 //===--------------------------------------------------------------------===//
69
70 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
71 /// represents a value lvalue, this method emits the address of the lvalue,
72 /// then loads the result into DestPtr.
73 void EmitAggLoadOfLValue(const Expr *E);
74
75 enum ExprValueKind {
76 EVK_RValue,
77 EVK_NonRValue
78 };
79
80 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
81 /// SrcIsRValue is true if source comes from an RValue.
82 void EmitFinalDestCopy(QualType type, const LValue &src,
83 ExprValueKind SrcValueKind = EVK_NonRValue);
84 void EmitFinalDestCopy(QualType type, RValue src);
85 void EmitCopy(QualType type, const AggValueSlot &dest,
86 const AggValueSlot &src);
87
88 void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
89 Expr *ExprToVisit, ArrayRef<Expr *> Args,
90 Expr *ArrayFiller);
91
92 AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
93 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
94 return AggValueSlot::NeedsGCBarriers;
95 return AggValueSlot::DoesNotNeedGCBarriers;
96 }
97
98 bool TypeRequiresGCollection(QualType T);
99
100 //===--------------------------------------------------------------------===//
101 // Visitor Methods
102 //===--------------------------------------------------------------------===//
103
104 void Visit(Expr *E) {
105 ApplyDebugLocation DL(CGF, E);
106 StmtVisitor<AggExprEmitter>::Visit(E);
107 }
108
109 void VisitStmt(Stmt *S) {
110 CGF.ErrorUnsupported(S, Type: "aggregate expression");
111 }
112 void VisitParenExpr(ParenExpr *PE) { Visit(E: PE->getSubExpr()); }
113 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
114 Visit(E: GE->getResultExpr());
115 }
116 void VisitCoawaitExpr(CoawaitExpr *E) {
117 CGF.EmitCoawaitExpr(E: *E, aggSlot: Dest, ignoreResult: IsResultUnused);
118 }
119 void VisitCoyieldExpr(CoyieldExpr *E) {
120 CGF.EmitCoyieldExpr(E: *E, aggSlot: Dest, ignoreResult: IsResultUnused);
121 }
122 void VisitUnaryCoawait(UnaryOperator *E) { Visit(E: E->getSubExpr()); }
123 void VisitUnaryExtension(UnaryOperator *E) { Visit(E: E->getSubExpr()); }
124 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
125 return Visit(E: E->getReplacement());
126 }
127
128 void VisitConstantExpr(ConstantExpr *E) {
129 EnsureDest(T: E->getType());
130
131 if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(CE: E)) {
132 Address StoreDest = Dest.getAddress();
133 // The emitted value is guaranteed to have the same size as the
134 // destination but can have a different type. Just do a bitcast in this
135 // case to avoid incorrect GEPs.
136 if (Result->getType() != StoreDest.getType())
137 StoreDest = StoreDest.withElementType(ElemTy: Result->getType());
138
139 CGF.EmitAggregateStore(Val: Result, Dest: StoreDest,
140 DestIsVolatile: E->getType().isVolatileQualified());
141 return;
142 }
143 return Visit(E: E->getSubExpr());
144 }
145
146 // l-values.
147 void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
148 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
149 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
150 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
151 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
152 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
153 EmitAggLoadOfLValue(E);
154 }
155 void VisitPredefinedExpr(const PredefinedExpr *E) {
156 EmitAggLoadOfLValue(E);
157 }
158
159 // Operators.
160 void VisitCastExpr(CastExpr *E);
161 void VisitCallExpr(const CallExpr *E);
162 void VisitStmtExpr(const StmtExpr *E);
163 void VisitBinaryOperator(const BinaryOperator *BO);
164 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
165 void VisitBinAssign(const BinaryOperator *E);
166 void VisitBinComma(const BinaryOperator *E);
167 void VisitBinCmp(const BinaryOperator *E);
168 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
169 Visit(E: E->getSemanticForm());
170 }
171
172 void VisitObjCMessageExpr(ObjCMessageExpr *E);
173 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
174 EmitAggLoadOfLValue(E);
175 }
176
177 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
178 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
179 void VisitChooseExpr(const ChooseExpr *CE);
180 void VisitInitListExpr(InitListExpr *E);
181 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
182 FieldDecl *InitializedFieldInUnion,
183 Expr *ArrayFiller);
184 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
185 llvm::Value *outerBegin = nullptr);
186 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
187 void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
188 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
189 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
190 Visit(E: DAE->getExpr());
191 }
192 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
193 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
194 Visit(E: DIE->getExpr());
195 }
196 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
197 void VisitCXXConstructExpr(const CXXConstructExpr *E);
198 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
199 void VisitLambdaExpr(LambdaExpr *E);
200 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
201 void VisitExprWithCleanups(ExprWithCleanups *E);
202 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
203 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
204 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
205 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
206
207 void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
208 if (E->isGLValue()) {
209 LValue LV = CGF.EmitPseudoObjectLValue(e: E);
210 return EmitFinalDestCopy(E->getType(), LV);
211 }
212
213 AggValueSlot Slot = EnsureSlot(T: E->getType());
214 bool NeedsDestruction =
215 !Slot.isExternallyDestructed() &&
216 E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
217 if (NeedsDestruction)
218 Slot.setExternallyDestructed();
219 CGF.EmitPseudoObjectRValue(e: E, slot: Slot);
220 if (NeedsDestruction)
221 CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Slot.getAddress(),
222 E->getType());
223 }
224
225 void VisitVAArgExpr(VAArgExpr *E);
226 void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
227 void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
228 Expr *ArrayFiller);
229
230 void EmitInitializationToLValue(Expr *E, LValue Address);
231 void EmitNullInitializationToLValue(LValue Address);
232 // case Expr::ChooseExprClass:
233 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
234 void VisitAtomicExpr(AtomicExpr *E) {
235 RValue Res = CGF.EmitAtomicExpr(E);
236 EmitFinalDestCopy(E->getType(), Res);
237 }
238 void VisitPackIndexingExpr(PackIndexingExpr *E) {
239 Visit(E: E->getSelectedExpr());
240 }
241};
242} // end anonymous namespace.
243
244//===----------------------------------------------------------------------===//
245// Utilities
246//===----------------------------------------------------------------------===//
247
248/// EmitAggLoadOfLValue - Given an expression with aggregate type that
249/// represents a value lvalue, this method emits the address of the lvalue,
250/// then loads the result into DestPtr.
251void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
252 LValue LV = CGF.EmitLValue(E);
253
254 // If the type of the l-value is atomic, then do an atomic load.
255 if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(Src: LV)) {
256 CGF.EmitAtomicLoad(LV, SL: E->getExprLoc(), Slot: Dest);
257 return;
258 }
259
260 EmitFinalDestCopy(type: E->getType(), src: LV);
261}
262
263/// True if the given aggregate type requires special GC API calls.
264bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
265 // Only record types have members that might require garbage collection.
266 const RecordType *RecordTy = T->getAs<RecordType>();
267 if (!RecordTy) return false;
268
269 // Don't mess with non-trivial C++ types.
270 RecordDecl *Record = RecordTy->getDecl();
271 if (isa<CXXRecordDecl>(Val: Record) &&
272 (cast<CXXRecordDecl>(Val: Record)->hasNonTrivialCopyConstructor() ||
273 !cast<CXXRecordDecl>(Val: Record)->hasTrivialDestructor()))
274 return false;
275
276 // Check whether the type has an object member.
277 return Record->hasObjectMember();
278}
279
280void AggExprEmitter::withReturnValueSlot(
281 const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
282 QualType RetTy = E->getType();
283 bool RequiresDestruction =
284 !Dest.isExternallyDestructed() &&
285 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
286
287 // If it makes no observable difference, save a memcpy + temporary.
288 //
289 // We need to always provide our own temporary if destruction is required.
290 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
291 // its lifetime before we have the chance to emit a proper destructor call.
292 bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
293 (RequiresDestruction && !Dest.getAddress().isValid());
294
295 Address RetAddr = Address::invalid();
296 Address RetAllocaAddr = Address::invalid();
297
298 EHScopeStack::stable_iterator LifetimeEndBlock;
299 llvm::Value *LifetimeSizePtr = nullptr;
300 llvm::IntrinsicInst *LifetimeStartInst = nullptr;
301 if (!UseTemp) {
302 RetAddr = Dest.getAddress();
303 } else {
304 RetAddr = CGF.CreateMemTemp(T: RetTy, Name: "tmp", Alloca: &RetAllocaAddr);
305 llvm::TypeSize Size =
306 CGF.CGM.getDataLayout().getTypeAllocSize(Ty: CGF.ConvertTypeForMem(T: RetTy));
307 LifetimeSizePtr = CGF.EmitLifetimeStart(Size, Addr: RetAllocaAddr.getPointer());
308 if (LifetimeSizePtr) {
309 LifetimeStartInst =
310 cast<llvm::IntrinsicInst>(Val: std::prev(x: Builder.GetInsertPoint()));
311 assert(LifetimeStartInst->getIntrinsicID() ==
312 llvm::Intrinsic::lifetime_start &&
313 "Last insertion wasn't a lifetime.start?");
314
315 CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
316 kind: NormalEHLifetimeMarker, A: RetAllocaAddr, A: LifetimeSizePtr);
317 LifetimeEndBlock = CGF.EHStack.stable_begin();
318 }
319 }
320
321 RValue Src =
322 EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
323 Dest.isExternallyDestructed()));
324
325 if (!UseTemp)
326 return;
327
328 assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer());
329 EmitFinalDestCopy(type: E->getType(), src: Src);
330
331 if (!RequiresDestruction && LifetimeStartInst) {
332 // If there's no dtor to run, the copy was the last use of our temporary.
333 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
334 // eagerly.
335 CGF.DeactivateCleanupBlock(Cleanup: LifetimeEndBlock, DominatingIP: LifetimeStartInst);
336 CGF.EmitLifetimeEnd(Size: LifetimeSizePtr, Addr: RetAllocaAddr.getPointer());
337 }
338}
339
340/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
341void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
342 assert(src.isAggregate() && "value must be aggregate value!");
343 LValue srcLV = CGF.MakeAddrLValue(Addr: src.getAggregateAddress(), T: type);
344 EmitFinalDestCopy(type, src: srcLV, SrcValueKind: EVK_RValue);
345}
346
347/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
348void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
349 ExprValueKind SrcValueKind) {
350 // If Dest is ignored, then we're evaluating an aggregate expression
351 // in a context that doesn't care about the result. Note that loads
352 // from volatile l-values force the existence of a non-ignored
353 // destination.
354 if (Dest.isIgnored())
355 return;
356
357 // Copy non-trivial C structs here.
358 LValue DstLV = CGF.MakeAddrLValue(
359 Addr: Dest.getAddress(), T: Dest.isVolatile() ? type.withVolatile() : type);
360
361 if (SrcValueKind == EVK_RValue) {
362 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
363 if (Dest.isPotentiallyAliased())
364 CGF.callCStructMoveAssignmentOperator(Dst: DstLV, Src: src);
365 else
366 CGF.callCStructMoveConstructor(Dst: DstLV, Src: src);
367 return;
368 }
369 } else {
370 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
371 if (Dest.isPotentiallyAliased())
372 CGF.callCStructCopyAssignmentOperator(Dst: DstLV, Src: src);
373 else
374 CGF.callCStructCopyConstructor(Dst: DstLV, Src: src);
375 return;
376 }
377 }
378
379 AggValueSlot srcAgg = AggValueSlot::forLValue(
380 LV: src, CGF, isDestructed: AggValueSlot::IsDestructed, needsGC: needsGC(T: type),
381 isAliased: AggValueSlot::IsAliased, mayOverlap: AggValueSlot::MayOverlap);
382 EmitCopy(type, dest: Dest, src: srcAgg);
383}
384
385/// Perform a copy from the source into the destination.
386///
387/// \param type - the type of the aggregate being copied; qualifiers are
388/// ignored
389void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
390 const AggValueSlot &src) {
391 if (dest.requiresGCollection()) {
392 CharUnits sz = dest.getPreferredSize(Ctx&: CGF.getContext(), Type: type);
393 llvm::Value *size = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: sz.getQuantity());
394 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
395 DestPtr: dest.getAddress(),
396 SrcPtr: src.getAddress(),
397 Size: size);
398 return;
399 }
400
401 // If the result of the assignment is used, copy the LHS there also.
402 // It's volatile if either side is. Use the minimum alignment of
403 // the two sides.
404 LValue DestLV = CGF.MakeAddrLValue(Addr: dest.getAddress(), T: type);
405 LValue SrcLV = CGF.MakeAddrLValue(Addr: src.getAddress(), T: type);
406 CGF.EmitAggregateCopy(Dest: DestLV, Src: SrcLV, EltTy: type, MayOverlap: dest.mayOverlap(),
407 isVolatile: dest.isVolatile() || src.isVolatile());
408}
409
410/// Emit the initializer for a std::initializer_list initialized with a
411/// real initializer list.
412void
413AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
414 // Emit an array containing the elements. The array is externally destructed
415 // if the std::initializer_list object is.
416 ASTContext &Ctx = CGF.getContext();
417 LValue Array = CGF.EmitLValue(E: E->getSubExpr());
418 assert(Array.isSimple() && "initializer_list array not a simple lvalue");
419 Address ArrayPtr = Array.getAddress(CGF);
420
421 const ConstantArrayType *ArrayType =
422 Ctx.getAsConstantArrayType(T: E->getSubExpr()->getType());
423 assert(ArrayType && "std::initializer_list constructed from non-array");
424
425 // FIXME: Perform the checks on the field types in SemaInit.
426 RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
427 RecordDecl::field_iterator Field = Record->field_begin();
428 if (Field == Record->field_end()) {
429 CGF.ErrorUnsupported(E, "weird std::initializer_list");
430 return;
431 }
432
433 // Start pointer.
434 if (!Field->getType()->isPointerType() ||
435 !Ctx.hasSameType(Field->getType()->getPointeeType(),
436 ArrayType->getElementType())) {
437 CGF.ErrorUnsupported(E, "weird std::initializer_list");
438 return;
439 }
440
441 AggValueSlot Dest = EnsureSlot(T: E->getType());
442 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
443 LValue Start = CGF.EmitLValueForFieldInitialization(Base: DestLV, Field: *Field);
444 llvm::Value *Zero = llvm::ConstantInt::get(Ty: CGF.PtrDiffTy, V: 0);
445 llvm::Value *IdxStart[] = { Zero, Zero };
446 llvm::Value *ArrayStart = Builder.CreateInBoundsGEP(
447 Ty: ArrayPtr.getElementType(), Ptr: ArrayPtr.getPointer(), IdxList: IdxStart, Name: "arraystart");
448 CGF.EmitStoreThroughLValue(Src: RValue::get(V: ArrayStart), Dst: Start);
449 ++Field;
450
451 if (Field == Record->field_end()) {
452 CGF.ErrorUnsupported(E, "weird std::initializer_list");
453 return;
454 }
455
456 llvm::Value *Size = Builder.getInt(AI: ArrayType->getSize());
457 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(Base: DestLV, Field: *Field);
458 if (Field->getType()->isPointerType() &&
459 Ctx.hasSameType(Field->getType()->getPointeeType(),
460 ArrayType->getElementType())) {
461 // End pointer.
462 llvm::Value *IdxEnd[] = { Zero, Size };
463 llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
464 Ty: ArrayPtr.getElementType(), Ptr: ArrayPtr.getPointer(), IdxList: IdxEnd, Name: "arrayend");
465 CGF.EmitStoreThroughLValue(Src: RValue::get(V: ArrayEnd), Dst: EndOrLength);
466 } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
467 // Length.
468 CGF.EmitStoreThroughLValue(Src: RValue::get(V: Size), Dst: EndOrLength);
469 } else {
470 CGF.ErrorUnsupported(E, "weird std::initializer_list");
471 return;
472 }
473}
474
475/// Determine if E is a trivial array filler, that is, one that is
476/// equivalent to zero-initialization.
477static bool isTrivialFiller(Expr *E) {
478 if (!E)
479 return true;
480
481 if (isa<ImplicitValueInitExpr>(Val: E))
482 return true;
483
484 if (auto *ILE = dyn_cast<InitListExpr>(Val: E)) {
485 if (ILE->getNumInits())
486 return false;
487 return isTrivialFiller(E: ILE->getArrayFiller());
488 }
489
490 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(Val: E))
491 return Cons->getConstructor()->isDefaultConstructor() &&
492 Cons->getConstructor()->isTrivial();
493
494 // FIXME: Are there other cases where we can avoid emitting an initializer?
495 return false;
496}
497
498/// Emit initialization of an array from an initializer list. ExprToVisit must
499/// be either an InitListEpxr a CXXParenInitListExpr.
500void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
501 QualType ArrayQTy, Expr *ExprToVisit,
502 ArrayRef<Expr *> Args, Expr *ArrayFiller) {
503 uint64_t NumInitElements = Args.size();
504
505 uint64_t NumArrayElements = AType->getNumElements();
506 assert(NumInitElements <= NumArrayElements);
507
508 QualType elementType =
509 CGF.getContext().getAsArrayType(T: ArrayQTy)->getElementType();
510
511 // DestPtr is an array*. Construct an elementType* by drilling
512 // down a level.
513 llvm::Value *zero = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: 0);
514 llvm::Value *indices[] = { zero, zero };
515 llvm::Value *begin = Builder.CreateInBoundsGEP(
516 Ty: DestPtr.getElementType(), Ptr: DestPtr.getPointer(), IdxList: indices,
517 Name: "arrayinit.begin");
518
519 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(T: elementType);
520 CharUnits elementAlign =
521 DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
522 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(T: elementType);
523
524 // Consider initializing the array by copying from a global. For this to be
525 // more efficient than per-element initialization, the size of the elements
526 // with explicit initializers should be large enough.
527 if (NumInitElements * elementSize.getQuantity() > 16 &&
528 elementType.isTriviallyCopyableType(Context: CGF.getContext())) {
529 CodeGen::CodeGenModule &CGM = CGF.CGM;
530 ConstantEmitter Emitter(CGF);
531 LangAS AS = ArrayQTy.getAddressSpace();
532 if (llvm::Constant *C =
533 Emitter.tryEmitForInitializer(E: ExprToVisit, destAddrSpace: AS, destType: ArrayQTy)) {
534 auto GV = new llvm::GlobalVariable(
535 CGM.getModule(), C->getType(),
536 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
537 "constinit",
538 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
539 CGM.getContext().getTargetAddressSpace(AS));
540 Emitter.finalize(global: GV);
541 CharUnits Align = CGM.getContext().getTypeAlignInChars(T: ArrayQTy);
542 GV->setAlignment(Align.getAsAlign());
543 Address GVAddr(GV, GV->getValueType(), Align);
544 EmitFinalDestCopy(type: ArrayQTy, src: CGF.MakeAddrLValue(Addr: GVAddr, T: ArrayQTy));
545 return;
546 }
547 }
548
549 // Exception safety requires us to destroy all the
550 // already-constructed members if an initializer throws.
551 // For that, we'll need an EH cleanup.
552 QualType::DestructionKind dtorKind = elementType.isDestructedType();
553 Address endOfInit = Address::invalid();
554 EHScopeStack::stable_iterator cleanup;
555 llvm::Instruction *cleanupDominator = nullptr;
556 if (CGF.needsEHCleanup(kind: dtorKind)) {
557 // In principle we could tell the cleanup where we are more
558 // directly, but the control flow can get so varied here that it
559 // would actually be quite complex. Therefore we go through an
560 // alloca.
561 endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
562 "arrayinit.endOfInit");
563 cleanupDominator = Builder.CreateStore(Val: begin, Addr: endOfInit);
564 CGF.pushIrregularPartialArrayCleanup(arrayBegin: begin, arrayEndPointer: endOfInit, elementType,
565 elementAlignment: elementAlign,
566 destroyer: CGF.getDestroyer(destructionKind: dtorKind));
567 cleanup = CGF.EHStack.stable_begin();
568
569 // Otherwise, remember that we didn't need a cleanup.
570 } else {
571 dtorKind = QualType::DK_none;
572 }
573
574 llvm::Value *one = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: 1);
575
576 // The 'current element to initialize'. The invariants on this
577 // variable are complicated. Essentially, after each iteration of
578 // the loop, it points to the last initialized element, except
579 // that it points to the beginning of the array before any
580 // elements have been initialized.
581 llvm::Value *element = begin;
582
583 // Emit the explicit initializers.
584 for (uint64_t i = 0; i != NumInitElements; ++i) {
585 // Advance to the next element.
586 if (i > 0) {
587 element = Builder.CreateInBoundsGEP(
588 Ty: llvmElementType, Ptr: element, IdxList: one, Name: "arrayinit.element");
589
590 // Tell the cleanup that it needs to destroy up to this
591 // element. TODO: some of these stores can be trivially
592 // observed to be unnecessary.
593 if (endOfInit.isValid()) Builder.CreateStore(Val: element, Addr: endOfInit);
594 }
595
596 LValue elementLV = CGF.MakeAddrLValue(
597 Addr: Address(element, llvmElementType, elementAlign), T: elementType);
598 EmitInitializationToLValue(E: Args[i], Address: elementLV);
599 }
600
601 // Check whether there's a non-trivial array-fill expression.
602 bool hasTrivialFiller = isTrivialFiller(E: ArrayFiller);
603
604 // Any remaining elements need to be zero-initialized, possibly
605 // using the filler expression. We can skip this if the we're
606 // emitting to zeroed memory.
607 if (NumInitElements != NumArrayElements &&
608 !(Dest.isZeroed() && hasTrivialFiller &&
609 CGF.getTypes().isZeroInitializable(T: elementType))) {
610
611 // Use an actual loop. This is basically
612 // do { *array++ = filler; } while (array != end);
613
614 // Advance to the start of the rest of the array.
615 if (NumInitElements) {
616 element = Builder.CreateInBoundsGEP(
617 Ty: llvmElementType, Ptr: element, IdxList: one, Name: "arrayinit.start");
618 if (endOfInit.isValid()) Builder.CreateStore(Val: element, Addr: endOfInit);
619 }
620
621 // Compute the end of the array.
622 llvm::Value *end = Builder.CreateInBoundsGEP(
623 Ty: llvmElementType, Ptr: begin,
624 IdxList: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: NumArrayElements), Name: "arrayinit.end");
625
626 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
627 llvm::BasicBlock *bodyBB = CGF.createBasicBlock(name: "arrayinit.body");
628
629 // Jump into the body.
630 CGF.EmitBlock(BB: bodyBB);
631 llvm::PHINode *currentElement =
632 Builder.CreatePHI(Ty: element->getType(), NumReservedValues: 2, Name: "arrayinit.cur");
633 currentElement->addIncoming(V: element, BB: entryBB);
634
635 // Emit the actual filler expression.
636 {
637 // C++1z [class.temporary]p5:
638 // when a default constructor is called to initialize an element of
639 // an array with no corresponding initializer [...] the destruction of
640 // every temporary created in a default argument is sequenced before
641 // the construction of the next array element, if any
642 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
643 LValue elementLV = CGF.MakeAddrLValue(
644 Addr: Address(currentElement, llvmElementType, elementAlign), T: elementType);
645 if (ArrayFiller)
646 EmitInitializationToLValue(E: ArrayFiller, Address: elementLV);
647 else
648 EmitNullInitializationToLValue(Address: elementLV);
649 }
650
651 // Move on to the next element.
652 llvm::Value *nextElement = Builder.CreateInBoundsGEP(
653 Ty: llvmElementType, Ptr: currentElement, IdxList: one, Name: "arrayinit.next");
654
655 // Tell the EH cleanup that we finished with the last element.
656 if (endOfInit.isValid()) Builder.CreateStore(Val: nextElement, Addr: endOfInit);
657
658 // Leave the loop if we're done.
659 llvm::Value *done = Builder.CreateICmpEQ(LHS: nextElement, RHS: end,
660 Name: "arrayinit.done");
661 llvm::BasicBlock *endBB = CGF.createBasicBlock(name: "arrayinit.end");
662 Builder.CreateCondBr(Cond: done, True: endBB, False: bodyBB);
663 currentElement->addIncoming(V: nextElement, BB: Builder.GetInsertBlock());
664
665 CGF.EmitBlock(BB: endBB);
666 }
667
668 // Leave the partial-array cleanup if we entered one.
669 if (dtorKind) CGF.DeactivateCleanupBlock(Cleanup: cleanup, DominatingIP: cleanupDominator);
670}
671
672//===----------------------------------------------------------------------===//
673// Visitor Methods
674//===----------------------------------------------------------------------===//
675
676void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
677 Visit(E: E->getSubExpr());
678}
679
680void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
681 // If this is a unique OVE, just visit its source expression.
682 if (e->isUnique())
683 Visit(E: e->getSourceExpr());
684 else
685 EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
686}
687
688void
689AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
690 if (Dest.isPotentiallyAliased() &&
691 E->getType().isPODType(CGF.getContext())) {
692 // For a POD type, just emit a load of the lvalue + a copy, because our
693 // compound literal might alias the destination.
694 EmitAggLoadOfLValue(E);
695 return;
696 }
697
698 AggValueSlot Slot = EnsureSlot(T: E->getType());
699
700 // Block-scope compound literals are destroyed at the end of the enclosing
701 // scope in C.
702 bool Destruct =
703 !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
704 if (Destruct)
705 Slot.setExternallyDestructed();
706
707 CGF.EmitAggExpr(E: E->getInitializer(), AS: Slot);
708
709 if (Destruct)
710 if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
711 CGF.pushLifetimeExtendedDestroy(
712 kind: CGF.getCleanupKind(kind: DtorKind), addr: Slot.getAddress(), type: E->getType(),
713 destroyer: CGF.getDestroyer(destructionKind: DtorKind), useEHCleanupForArray: DtorKind & EHCleanup);
714}
715
716/// Attempt to look through various unimportant expressions to find a
717/// cast of the given kind.
718static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
719 op = op->IgnoreParenNoopCasts(Ctx: ctx);
720 if (auto castE = dyn_cast<CastExpr>(Val: op)) {
721 if (castE->getCastKind() == kind)
722 return castE->getSubExpr();
723 }
724 return nullptr;
725}
726
727void AggExprEmitter::VisitCastExpr(CastExpr *E) {
728 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(Val: E))
729 CGF.CGM.EmitExplicitCastExprType(E: ECE, CGF: &CGF);
730 switch (E->getCastKind()) {
731 case CK_Dynamic: {
732 // FIXME: Can this actually happen? We have no test coverage for it.
733 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
734 LValue LV = CGF.EmitCheckedLValue(E: E->getSubExpr(),
735 TCK: CodeGenFunction::TCK_Load);
736 // FIXME: Do we also need to handle property references here?
737 if (LV.isSimple())
738 CGF.EmitDynamicCast(V: LV.getAddress(CGF), DCE: cast<CXXDynamicCastExpr>(Val: E));
739 else
740 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
741
742 if (!Dest.isIgnored())
743 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
744 break;
745 }
746
747 case CK_ToUnion: {
748 // Evaluate even if the destination is ignored.
749 if (Dest.isIgnored()) {
750 CGF.EmitAnyExpr(E: E->getSubExpr(), aggSlot: AggValueSlot::ignored(),
751 /*ignoreResult=*/true);
752 break;
753 }
754
755 // GCC union extension
756 QualType Ty = E->getSubExpr()->getType();
757 Address CastPtr = Dest.getAddress().withElementType(ElemTy: CGF.ConvertType(T: Ty));
758 EmitInitializationToLValue(E: E->getSubExpr(),
759 Address: CGF.MakeAddrLValue(Addr: CastPtr, T: Ty));
760 break;
761 }
762
763 case CK_LValueToRValueBitCast: {
764 if (Dest.isIgnored()) {
765 CGF.EmitAnyExpr(E: E->getSubExpr(), aggSlot: AggValueSlot::ignored(),
766 /*ignoreResult=*/true);
767 break;
768 }
769
770 LValue SourceLV = CGF.EmitLValue(E: E->getSubExpr());
771 Address SourceAddress =
772 SourceLV.getAddress(CGF).withElementType(ElemTy: CGF.Int8Ty);
773 Address DestAddress = Dest.getAddress().withElementType(ElemTy: CGF.Int8Ty);
774 llvm::Value *SizeVal = llvm::ConstantInt::get(
775 CGF.SizeTy,
776 CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity());
777 Builder.CreateMemCpy(Dest: DestAddress, Src: SourceAddress, Size: SizeVal);
778 break;
779 }
780
781 case CK_DerivedToBase:
782 case CK_BaseToDerived:
783 case CK_UncheckedDerivedToBase: {
784 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
785 "should have been unpacked before we got here");
786 }
787
788 case CK_NonAtomicToAtomic:
789 case CK_AtomicToNonAtomic: {
790 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
791
792 // Determine the atomic and value types.
793 QualType atomicType = E->getSubExpr()->getType();
794 QualType valueType = E->getType();
795 if (isToAtomic) std::swap(a&: atomicType, b&: valueType);
796
797 assert(atomicType->isAtomicType());
798 assert(CGF.getContext().hasSameUnqualifiedType(valueType,
799 atomicType->castAs<AtomicType>()->getValueType()));
800
801 // Just recurse normally if we're ignoring the result or the
802 // atomic type doesn't change representation.
803 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(type: atomicType)) {
804 return Visit(E: E->getSubExpr());
805 }
806
807 CastKind peepholeTarget =
808 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
809
810 // These two cases are reverses of each other; try to peephole them.
811 if (Expr *op =
812 findPeephole(op: E->getSubExpr(), kind: peepholeTarget, ctx: CGF.getContext())) {
813 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
814 E->getType()) &&
815 "peephole significantly changed types?");
816 return Visit(E: op);
817 }
818
819 // If we're converting an r-value of non-atomic type to an r-value
820 // of atomic type, just emit directly into the relevant sub-object.
821 if (isToAtomic) {
822 AggValueSlot valueDest = Dest;
823 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(type: atomicType)) {
824 // Zero-initialize. (Strictly speaking, we only need to initialize
825 // the padding at the end, but this is simpler.)
826 if (!Dest.isZeroed())
827 CGF.EmitNullInitialization(DestPtr: Dest.getAddress(), Ty: atomicType);
828
829 // Build a GEP to refer to the subobject.
830 Address valueAddr =
831 CGF.Builder.CreateStructGEP(Addr: valueDest.getAddress(), Index: 0);
832 valueDest = AggValueSlot::forAddr(addr: valueAddr,
833 quals: valueDest.getQualifiers(),
834 isDestructed: valueDest.isExternallyDestructed(),
835 needsGC: valueDest.requiresGCollection(),
836 isAliased: valueDest.isPotentiallyAliased(),
837 mayOverlap: AggValueSlot::DoesNotOverlap,
838 isZeroed: AggValueSlot::IsZeroed);
839 }
840
841 CGF.EmitAggExpr(E: E->getSubExpr(), AS: valueDest);
842 return;
843 }
844
845 // Otherwise, we're converting an atomic type to a non-atomic type.
846 // Make an atomic temporary, emit into that, and then copy the value out.
847 AggValueSlot atomicSlot =
848 CGF.CreateAggTemp(T: atomicType, Name: "atomic-to-nonatomic.temp");
849 CGF.EmitAggExpr(E: E->getSubExpr(), AS: atomicSlot);
850
851 Address valueAddr = Builder.CreateStructGEP(Addr: atomicSlot.getAddress(), Index: 0);
852 RValue rvalue = RValue::getAggregate(addr: valueAddr, isVolatile: atomicSlot.isVolatile());
853 return EmitFinalDestCopy(type: valueType, src: rvalue);
854 }
855 case CK_AddressSpaceConversion:
856 return Visit(E: E->getSubExpr());
857
858 case CK_LValueToRValue:
859 // If we're loading from a volatile type, force the destination
860 // into existence.
861 if (E->getSubExpr()->getType().isVolatileQualified()) {
862 bool Destruct =
863 !Dest.isExternallyDestructed() &&
864 E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
865 if (Destruct)
866 Dest.setExternallyDestructed();
867 EnsureDest(T: E->getType());
868 Visit(E: E->getSubExpr());
869
870 if (Destruct)
871 CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
872 E->getType());
873
874 return;
875 }
876
877 [[fallthrough]];
878
879
880 case CK_NoOp:
881 case CK_UserDefinedConversion:
882 case CK_ConstructorConversion:
883 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
884 E->getType()) &&
885 "Implicit cast types must be compatible");
886 Visit(E: E->getSubExpr());
887 break;
888
889 case CK_LValueBitCast:
890 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
891
892 case CK_Dependent:
893 case CK_BitCast:
894 case CK_ArrayToPointerDecay:
895 case CK_FunctionToPointerDecay:
896 case CK_NullToPointer:
897 case CK_NullToMemberPointer:
898 case CK_BaseToDerivedMemberPointer:
899 case CK_DerivedToBaseMemberPointer:
900 case CK_MemberPointerToBoolean:
901 case CK_ReinterpretMemberPointer:
902 case CK_IntegralToPointer:
903 case CK_PointerToIntegral:
904 case CK_PointerToBoolean:
905 case CK_ToVoid:
906 case CK_VectorSplat:
907 case CK_IntegralCast:
908 case CK_BooleanToSignedIntegral:
909 case CK_IntegralToBoolean:
910 case CK_IntegralToFloating:
911 case CK_FloatingToIntegral:
912 case CK_FloatingToBoolean:
913 case CK_FloatingCast:
914 case CK_CPointerToObjCPointerCast:
915 case CK_BlockPointerToObjCPointerCast:
916 case CK_AnyPointerToBlockPointerCast:
917 case CK_ObjCObjectLValueCast:
918 case CK_FloatingRealToComplex:
919 case CK_FloatingComplexToReal:
920 case CK_FloatingComplexToBoolean:
921 case CK_FloatingComplexCast:
922 case CK_FloatingComplexToIntegralComplex:
923 case CK_IntegralRealToComplex:
924 case CK_IntegralComplexToReal:
925 case CK_IntegralComplexToBoolean:
926 case CK_IntegralComplexCast:
927 case CK_IntegralComplexToFloatingComplex:
928 case CK_ARCProduceObject:
929 case CK_ARCConsumeObject:
930 case CK_ARCReclaimReturnedObject:
931 case CK_ARCExtendBlockObject:
932 case CK_CopyAndAutoreleaseBlockObject:
933 case CK_BuiltinFnToFnPtr:
934 case CK_ZeroToOCLOpaqueType:
935 case CK_MatrixCast:
936
937 case CK_IntToOCLSampler:
938 case CK_FloatingToFixedPoint:
939 case CK_FixedPointToFloating:
940 case CK_FixedPointCast:
941 case CK_FixedPointToBoolean:
942 case CK_FixedPointToIntegral:
943 case CK_IntegralToFixedPoint:
944 llvm_unreachable("cast kind invalid for aggregate types");
945 }
946}
947
948void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
949 if (E->getCallReturnType(Ctx: CGF.getContext())->isReferenceType()) {
950 EmitAggLoadOfLValue(E);
951 return;
952 }
953
954 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
955 return CGF.EmitCallExpr(E, ReturnValue: Slot);
956 });
957}
958
959void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
960 withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
961 return CGF.EmitObjCMessageExpr(E, Return: Slot);
962 });
963}
964
965void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
966 CGF.EmitIgnoredExpr(E: E->getLHS());
967 Visit(E: E->getRHS());
968}
969
970void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
971 CodeGenFunction::StmtExprEvaluation eval(CGF);
972 CGF.EmitCompoundStmt(S: *E->getSubStmt(), GetLast: true, AVS: Dest);
973}
974
975enum CompareKind {
976 CK_Less,
977 CK_Greater,
978 CK_Equal,
979};
980
981static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
982 const BinaryOperator *E, llvm::Value *LHS,
983 llvm::Value *RHS, CompareKind Kind,
984 const char *NameSuffix = "") {
985 QualType ArgTy = E->getLHS()->getType();
986 if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
987 ArgTy = CT->getElementType();
988
989 if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
990 assert(Kind == CK_Equal &&
991 "member pointers may only be compared for equality");
992 return CGF.CGM.getCXXABI().EmitMemberPointerComparison(
993 CGF, L: LHS, R: RHS, MPT, /*IsInequality*/ Inequality: false);
994 }
995
996 // Compute the comparison instructions for the specified comparison kind.
997 struct CmpInstInfo {
998 const char *Name;
999 llvm::CmpInst::Predicate FCmp;
1000 llvm::CmpInst::Predicate SCmp;
1001 llvm::CmpInst::Predicate UCmp;
1002 };
1003 CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
1004 using FI = llvm::FCmpInst;
1005 using II = llvm::ICmpInst;
1006 switch (Kind) {
1007 case CK_Less:
1008 return {.Name: "cmp.lt", .FCmp: FI::FCMP_OLT, .SCmp: II::ICMP_SLT, .UCmp: II::ICMP_ULT};
1009 case CK_Greater:
1010 return {.Name: "cmp.gt", .FCmp: FI::FCMP_OGT, .SCmp: II::ICMP_SGT, .UCmp: II::ICMP_UGT};
1011 case CK_Equal:
1012 return {.Name: "cmp.eq", .FCmp: FI::FCMP_OEQ, .SCmp: II::ICMP_EQ, .UCmp: II::ICMP_EQ};
1013 }
1014 llvm_unreachable("Unrecognised CompareKind enum");
1015 }();
1016
1017 if (ArgTy->hasFloatingRepresentation())
1018 return Builder.CreateFCmp(P: InstInfo.FCmp, LHS, RHS,
1019 Name: llvm::Twine(InstInfo.Name) + NameSuffix);
1020 if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
1021 auto Inst =
1022 ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
1023 return Builder.CreateICmp(P: Inst, LHS, RHS,
1024 Name: llvm::Twine(InstInfo.Name) + NameSuffix);
1025 }
1026
1027 llvm_unreachable("unsupported aggregate binary expression should have "
1028 "already been handled");
1029}
1030
1031void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
1032 using llvm::BasicBlock;
1033 using llvm::PHINode;
1034 using llvm::Value;
1035 assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
1036 E->getRHS()->getType()));
1037 const ComparisonCategoryInfo &CmpInfo =
1038 CGF.getContext().CompCategories.getInfoForType(Ty: E->getType());
1039 assert(CmpInfo.Record->isTriviallyCopyable() &&
1040 "cannot copy non-trivially copyable aggregate");
1041
1042 QualType ArgTy = E->getLHS()->getType();
1043
1044 if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
1045 !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
1046 !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
1047 return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
1048 }
1049 bool IsComplex = ArgTy->isAnyComplexType();
1050
1051 // Evaluate the operands to the expression and extract their values.
1052 auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
1053 RValue RV = CGF.EmitAnyExpr(E);
1054 if (RV.isScalar())
1055 return {RV.getScalarVal(), nullptr};
1056 if (RV.isAggregate())
1057 return {RV.getAggregatePointer(), nullptr};
1058 assert(RV.isComplex());
1059 return RV.getComplexVal();
1060 };
1061 auto LHSValues = EmitOperand(E->getLHS()),
1062 RHSValues = EmitOperand(E->getRHS());
1063
1064 auto EmitCmp = [&](CompareKind K) {
1065 Value *Cmp = EmitCompare(Builder, CGF, E, LHS: LHSValues.first, RHS: RHSValues.first,
1066 Kind: K, NameSuffix: IsComplex ? ".r" : "");
1067 if (!IsComplex)
1068 return Cmp;
1069 assert(K == CompareKind::CK_Equal);
1070 Value *CmpImag = EmitCompare(Builder, CGF, E, LHS: LHSValues.second,
1071 RHS: RHSValues.second, Kind: K, NameSuffix: ".i");
1072 return Builder.CreateAnd(LHS: Cmp, RHS: CmpImag, Name: "and.eq");
1073 };
1074 auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1075 return Builder.getInt(AI: VInfo->getIntValue());
1076 };
1077
1078 Value *Select;
1079 if (ArgTy->isNullPtrType()) {
1080 Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1081 } else if (!CmpInfo.isPartial()) {
1082 Value *SelectOne =
1083 Builder.CreateSelect(C: EmitCmp(CK_Less), True: EmitCmpRes(CmpInfo.getLess()),
1084 False: EmitCmpRes(CmpInfo.getGreater()), Name: "sel.lt");
1085 Select = Builder.CreateSelect(C: EmitCmp(CK_Equal),
1086 True: EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1087 False: SelectOne, Name: "sel.eq");
1088 } else {
1089 Value *SelectEq = Builder.CreateSelect(
1090 C: EmitCmp(CK_Equal), True: EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1091 False: EmitCmpRes(CmpInfo.getUnordered()), Name: "sel.eq");
1092 Value *SelectGT = Builder.CreateSelect(C: EmitCmp(CK_Greater),
1093 True: EmitCmpRes(CmpInfo.getGreater()),
1094 False: SelectEq, Name: "sel.gt");
1095 Select = Builder.CreateSelect(
1096 C: EmitCmp(CK_Less), True: EmitCmpRes(CmpInfo.getLess()), False: SelectGT, Name: "sel.lt");
1097 }
1098 // Create the return value in the destination slot.
1099 EnsureDest(T: E->getType());
1100 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1101
1102 // Emit the address of the first (and only) field in the comparison category
1103 // type, and initialize it from the constant integer value selected above.
1104 LValue FieldLV = CGF.EmitLValueForFieldInitialization(
1105 Base: DestLV, Field: *CmpInfo.Record->field_begin());
1106 CGF.EmitStoreThroughLValue(Src: RValue::get(V: Select), Dst: FieldLV, /*IsInit*/ isInit: true);
1107
1108 // All done! The result is in the Dest slot.
1109}
1110
1111void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1112 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1113 VisitPointerToDataMemberBinaryOperator(BO: E);
1114 else
1115 CGF.ErrorUnsupported(E, "aggregate binary expression");
1116}
1117
1118void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1119 const BinaryOperator *E) {
1120 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
1121 EmitFinalDestCopy(E->getType(), LV);
1122}
1123
1124/// Is the value of the given expression possibly a reference to or
1125/// into a __block variable?
1126static bool isBlockVarRef(const Expr *E) {
1127 // Make sure we look through parens.
1128 E = E->IgnoreParens();
1129
1130 // Check for a direct reference to a __block variable.
1131 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
1132 const VarDecl *var = dyn_cast<VarDecl>(Val: DRE->getDecl());
1133 return (var && var->hasAttr<BlocksAttr>());
1134 }
1135
1136 // More complicated stuff.
1137
1138 // Binary operators.
1139 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(Val: E)) {
1140 // For an assignment or pointer-to-member operation, just care
1141 // about the LHS.
1142 if (op->isAssignmentOp() || op->isPtrMemOp())
1143 return isBlockVarRef(E: op->getLHS());
1144
1145 // For a comma, just care about the RHS.
1146 if (op->getOpcode() == BO_Comma)
1147 return isBlockVarRef(E: op->getRHS());
1148
1149 // FIXME: pointer arithmetic?
1150 return false;
1151
1152 // Check both sides of a conditional operator.
1153 } else if (const AbstractConditionalOperator *op
1154 = dyn_cast<AbstractConditionalOperator>(Val: E)) {
1155 return isBlockVarRef(E: op->getTrueExpr())
1156 || isBlockVarRef(E: op->getFalseExpr());
1157
1158 // OVEs are required to support BinaryConditionalOperators.
1159 } else if (const OpaqueValueExpr *op
1160 = dyn_cast<OpaqueValueExpr>(Val: E)) {
1161 if (const Expr *src = op->getSourceExpr())
1162 return isBlockVarRef(E: src);
1163
1164 // Casts are necessary to get things like (*(int*)&var) = foo().
1165 // We don't really care about the kind of cast here, except
1166 // we don't want to look through l2r casts, because it's okay
1167 // to get the *value* in a __block variable.
1168 } else if (const CastExpr *cast = dyn_cast<CastExpr>(Val: E)) {
1169 if (cast->getCastKind() == CK_LValueToRValue)
1170 return false;
1171 return isBlockVarRef(E: cast->getSubExpr());
1172
1173 // Handle unary operators. Again, just aggressively look through
1174 // it, ignoring the operation.
1175 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(Val: E)) {
1176 return isBlockVarRef(E: uop->getSubExpr());
1177
1178 // Look into the base of a field access.
1179 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(Val: E)) {
1180 return isBlockVarRef(E: mem->getBase());
1181
1182 // Look into the base of a subscript.
1183 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(Val: E)) {
1184 return isBlockVarRef(E: sub->getBase());
1185 }
1186
1187 return false;
1188}
1189
1190void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1191 // For an assignment to work, the value on the right has
1192 // to be compatible with the value on the left.
1193 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1194 E->getRHS()->getType())
1195 && "Invalid assignment");
1196
1197 // If the LHS might be a __block variable, and the RHS can
1198 // potentially cause a block copy, we need to evaluate the RHS first
1199 // so that the assignment goes the right place.
1200 // This is pretty semantically fragile.
1201 if (isBlockVarRef(E: E->getLHS()) &&
1202 E->getRHS()->HasSideEffects(Ctx: CGF.getContext())) {
1203 // Ensure that we have a destination, and evaluate the RHS into that.
1204 EnsureDest(T: E->getRHS()->getType());
1205 Visit(E: E->getRHS());
1206
1207 // Now emit the LHS and copy into it.
1208 LValue LHS = CGF.EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store);
1209
1210 // That copy is an atomic copy if the LHS is atomic.
1211 if (LHS.getType()->isAtomicType() ||
1212 CGF.LValueIsSuitableForInlineAtomic(Src: LHS)) {
1213 CGF.EmitAtomicStore(rvalue: Dest.asRValue(), lvalue: LHS, /*isInit*/ false);
1214 return;
1215 }
1216
1217 EmitCopy(type: E->getLHS()->getType(),
1218 dest: AggValueSlot::forLValue(LV: LHS, CGF, isDestructed: AggValueSlot::IsDestructed,
1219 needsGC: needsGC(T: E->getLHS()->getType()),
1220 isAliased: AggValueSlot::IsAliased,
1221 mayOverlap: AggValueSlot::MayOverlap),
1222 src: Dest);
1223 return;
1224 }
1225
1226 LValue LHS = CGF.EmitLValue(E: E->getLHS());
1227
1228 // If we have an atomic type, evaluate into the destination and then
1229 // do an atomic copy.
1230 if (LHS.getType()->isAtomicType() ||
1231 CGF.LValueIsSuitableForInlineAtomic(Src: LHS)) {
1232 EnsureDest(T: E->getRHS()->getType());
1233 Visit(E: E->getRHS());
1234 CGF.EmitAtomicStore(rvalue: Dest.asRValue(), lvalue: LHS, /*isInit*/ false);
1235 return;
1236 }
1237
1238 // Codegen the RHS so that it stores directly into the LHS.
1239 AggValueSlot LHSSlot = AggValueSlot::forLValue(
1240 LV: LHS, CGF, isDestructed: AggValueSlot::IsDestructed, needsGC: needsGC(T: E->getLHS()->getType()),
1241 isAliased: AggValueSlot::IsAliased, mayOverlap: AggValueSlot::MayOverlap);
1242 // A non-volatile aggregate destination might have volatile member.
1243 if (!LHSSlot.isVolatile() &&
1244 CGF.hasVolatileMember(T: E->getLHS()->getType()))
1245 LHSSlot.setVolatile(true);
1246
1247 CGF.EmitAggExpr(E: E->getRHS(), AS: LHSSlot);
1248
1249 // Copy into the destination if the assignment isn't ignored.
1250 EmitFinalDestCopy(E->getType(), LHS);
1251
1252 if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
1253 E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
1254 CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
1255 E->getType());
1256}
1257
1258void AggExprEmitter::
1259VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1260 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock(name: "cond.true");
1261 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "cond.false");
1262 llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "cond.end");
1263
1264 // Bind the common expression if necessary.
1265 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1266
1267 CodeGenFunction::ConditionalEvaluation eval(CGF);
1268 CGF.EmitBranchOnBoolExpr(Cond: E->getCond(), TrueBlock: LHSBlock, FalseBlock: RHSBlock,
1269 TrueCount: CGF.getProfileCount(E));
1270
1271 // Save whether the destination's lifetime is externally managed.
1272 bool isExternallyDestructed = Dest.isExternallyDestructed();
1273 bool destructNonTrivialCStruct =
1274 !isExternallyDestructed &&
1275 E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
1276 isExternallyDestructed |= destructNonTrivialCStruct;
1277 Dest.setExternallyDestructed(isExternallyDestructed);
1278
1279 eval.begin(CGF);
1280 CGF.EmitBlock(BB: LHSBlock);
1281 CGF.incrementProfileCounter(E);
1282 Visit(E: E->getTrueExpr());
1283 eval.end(CGF);
1284
1285 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1286 CGF.Builder.CreateBr(Dest: ContBlock);
1287
1288 // If the result of an agg expression is unused, then the emission
1289 // of the LHS might need to create a destination slot. That's fine
1290 // with us, and we can safely emit the RHS into the same slot, but
1291 // we shouldn't claim that it's already being destructed.
1292 Dest.setExternallyDestructed(isExternallyDestructed);
1293
1294 eval.begin(CGF);
1295 CGF.EmitBlock(BB: RHSBlock);
1296 Visit(E: E->getFalseExpr());
1297 eval.end(CGF);
1298
1299 if (destructNonTrivialCStruct)
1300 CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
1301 E->getType());
1302
1303 CGF.EmitBlock(BB: ContBlock);
1304}
1305
1306void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1307 Visit(E: CE->getChosenSubExpr());
1308}
1309
1310void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1311 Address ArgValue = Address::invalid();
1312 Address ArgPtr = CGF.EmitVAArg(VE, VAListAddr&: ArgValue);
1313
1314 // If EmitVAArg fails, emit an error.
1315 if (!ArgPtr.isValid()) {
1316 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1317 return;
1318 }
1319
1320 EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
1321}
1322
1323void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1324 // Ensure that we have a slot, but if we already do, remember
1325 // whether it was externally destructed.
1326 bool wasExternallyDestructed = Dest.isExternallyDestructed();
1327 EnsureDest(T: E->getType());
1328
1329 // We're going to push a destructor if there isn't already one.
1330 Dest.setExternallyDestructed();
1331
1332 Visit(E: E->getSubExpr());
1333
1334 // Push that destructor we promised.
1335 if (!wasExternallyDestructed)
1336 CGF.EmitCXXTemporary(Temporary: E->getTemporary(), TempType: E->getType(), Ptr: Dest.getAddress());
1337}
1338
1339void
1340AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1341 AggValueSlot Slot = EnsureSlot(T: E->getType());
1342 CGF.EmitCXXConstructExpr(E, Dest: Slot);
1343}
1344
1345void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1346 const CXXInheritedCtorInitExpr *E) {
1347 AggValueSlot Slot = EnsureSlot(T: E->getType());
1348 CGF.EmitInheritedCXXConstructorCall(
1349 D: E->getConstructor(), ForVirtualBase: E->constructsVBase(), This: Slot.getAddress(),
1350 InheritedFromVBase: E->inheritedFromVBase(), E);
1351}
1352
1353void
1354AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1355 AggValueSlot Slot = EnsureSlot(T: E->getType());
1356 LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1357
1358 // We'll need to enter cleanup scopes in case any of the element
1359 // initializers throws an exception.
1360 SmallVector<EHScopeStack::stable_iterator, 16> Cleanups;
1361 llvm::Instruction *CleanupDominator = nullptr;
1362
1363 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1364 for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1365 e = E->capture_init_end();
1366 i != e; ++i, ++CurField) {
1367 // Emit initialization
1368 LValue LV = CGF.EmitLValueForFieldInitialization(Base: SlotLV, Field: *CurField);
1369 if (CurField->hasCapturedVLAType()) {
1370 CGF.EmitLambdaVLACapture(VAT: CurField->getCapturedVLAType(), LV);
1371 continue;
1372 }
1373
1374 EmitInitializationToLValue(E: *i, Address: LV);
1375
1376 // Push a destructor if necessary.
1377 if (QualType::DestructionKind DtorKind =
1378 CurField->getType().isDestructedType()) {
1379 assert(LV.isSimple());
1380 if (CGF.needsEHCleanup(kind: DtorKind)) {
1381 if (!CleanupDominator)
1382 CleanupDominator = CGF.Builder.CreateAlignedLoad(
1383 Ty: CGF.Int8Ty,
1384 Addr: llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy),
1385 Align: CharUnits::One()); // placeholder
1386
1387 CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(),
1388 CGF.getDestroyer(destructionKind: DtorKind), false);
1389 Cleanups.push_back(Elt: CGF.EHStack.stable_begin());
1390 }
1391 }
1392 }
1393
1394 // Deactivate all the partial cleanups in reverse order, which
1395 // generally means popping them.
1396 for (unsigned i = Cleanups.size(); i != 0; --i)
1397 CGF.DeactivateCleanupBlock(Cleanup: Cleanups[i-1], DominatingIP: CleanupDominator);
1398
1399 // Destroy the placeholder if we made one.
1400 if (CleanupDominator)
1401 CleanupDominator->eraseFromParent();
1402}
1403
1404void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1405 CodeGenFunction::RunCleanupsScope cleanups(CGF);
1406 Visit(E: E->getSubExpr());
1407}
1408
1409void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1410 QualType T = E->getType();
1411 AggValueSlot Slot = EnsureSlot(T);
1412 EmitNullInitializationToLValue(Address: CGF.MakeAddrLValue(Addr: Slot.getAddress(), T));
1413}
1414
1415void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1416 QualType T = E->getType();
1417 AggValueSlot Slot = EnsureSlot(T);
1418 EmitNullInitializationToLValue(Address: CGF.MakeAddrLValue(Addr: Slot.getAddress(), T));
1419}
1420
1421/// Determine whether the given cast kind is known to always convert values
1422/// with all zero bits in their value representation to values with all zero
1423/// bits in their value representation.
1424static bool castPreservesZero(const CastExpr *CE) {
1425 switch (CE->getCastKind()) {
1426 // No-ops.
1427 case CK_NoOp:
1428 case CK_UserDefinedConversion:
1429 case CK_ConstructorConversion:
1430 case CK_BitCast:
1431 case CK_ToUnion:
1432 case CK_ToVoid:
1433 // Conversions between (possibly-complex) integral, (possibly-complex)
1434 // floating-point, and bool.
1435 case CK_BooleanToSignedIntegral:
1436 case CK_FloatingCast:
1437 case CK_FloatingComplexCast:
1438 case CK_FloatingComplexToBoolean:
1439 case CK_FloatingComplexToIntegralComplex:
1440 case CK_FloatingComplexToReal:
1441 case CK_FloatingRealToComplex:
1442 case CK_FloatingToBoolean:
1443 case CK_FloatingToIntegral:
1444 case CK_IntegralCast:
1445 case CK_IntegralComplexCast:
1446 case CK_IntegralComplexToBoolean:
1447 case CK_IntegralComplexToFloatingComplex:
1448 case CK_IntegralComplexToReal:
1449 case CK_IntegralRealToComplex:
1450 case CK_IntegralToBoolean:
1451 case CK_IntegralToFloating:
1452 // Reinterpreting integers as pointers and vice versa.
1453 case CK_IntegralToPointer:
1454 case CK_PointerToIntegral:
1455 // Language extensions.
1456 case CK_VectorSplat:
1457 case CK_MatrixCast:
1458 case CK_NonAtomicToAtomic:
1459 case CK_AtomicToNonAtomic:
1460 return true;
1461
1462 case CK_BaseToDerivedMemberPointer:
1463 case CK_DerivedToBaseMemberPointer:
1464 case CK_MemberPointerToBoolean:
1465 case CK_NullToMemberPointer:
1466 case CK_ReinterpretMemberPointer:
1467 // FIXME: ABI-dependent.
1468 return false;
1469
1470 case CK_AnyPointerToBlockPointerCast:
1471 case CK_BlockPointerToObjCPointerCast:
1472 case CK_CPointerToObjCPointerCast:
1473 case CK_ObjCObjectLValueCast:
1474 case CK_IntToOCLSampler:
1475 case CK_ZeroToOCLOpaqueType:
1476 // FIXME: Check these.
1477 return false;
1478
1479 case CK_FixedPointCast:
1480 case CK_FixedPointToBoolean:
1481 case CK_FixedPointToFloating:
1482 case CK_FixedPointToIntegral:
1483 case CK_FloatingToFixedPoint:
1484 case CK_IntegralToFixedPoint:
1485 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1486 return false;
1487
1488 case CK_AddressSpaceConversion:
1489 case CK_BaseToDerived:
1490 case CK_DerivedToBase:
1491 case CK_Dynamic:
1492 case CK_NullToPointer:
1493 case CK_PointerToBoolean:
1494 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1495 // same representation in all involved address spaces.
1496 return false;
1497
1498 case CK_ARCConsumeObject:
1499 case CK_ARCExtendBlockObject:
1500 case CK_ARCProduceObject:
1501 case CK_ARCReclaimReturnedObject:
1502 case CK_CopyAndAutoreleaseBlockObject:
1503 case CK_ArrayToPointerDecay:
1504 case CK_FunctionToPointerDecay:
1505 case CK_BuiltinFnToFnPtr:
1506 case CK_Dependent:
1507 case CK_LValueBitCast:
1508 case CK_LValueToRValue:
1509 case CK_LValueToRValueBitCast:
1510 case CK_UncheckedDerivedToBase:
1511 return false;
1512 }
1513 llvm_unreachable("Unhandled clang::CastKind enum");
1514}
1515
1516/// isSimpleZero - If emitting this value will obviously just cause a store of
1517/// zero to memory, return true. This can return false if uncertain, so it just
1518/// handles simple cases.
1519static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1520 E = E->IgnoreParens();
1521 while (auto *CE = dyn_cast<CastExpr>(Val: E)) {
1522 if (!castPreservesZero(CE))
1523 break;
1524 E = CE->getSubExpr()->IgnoreParens();
1525 }
1526
1527 // 0
1528 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(Val: E))
1529 return IL->getValue() == 0;
1530 // +0.0
1531 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(Val: E))
1532 return FL->getValue().isPosZero();
1533 // int()
1534 if ((isa<ImplicitValueInitExpr>(Val: E) || isa<CXXScalarValueInitExpr>(Val: E)) &&
1535 CGF.getTypes().isZeroInitializable(T: E->getType()))
1536 return true;
1537 // (int*)0 - Null pointer expressions.
1538 if (const CastExpr *ICE = dyn_cast<CastExpr>(Val: E))
1539 return ICE->getCastKind() == CK_NullToPointer &&
1540 CGF.getTypes().isPointerZeroInitializable(T: E->getType()) &&
1541 !E->HasSideEffects(Ctx: CGF.getContext());
1542 // '\0'
1543 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(Val: E))
1544 return CL->getValue() == 0;
1545
1546 // Otherwise, hard case: conservatively return false.
1547 return false;
1548}
1549
1550
1551void
1552AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1553 QualType type = LV.getType();
1554 // FIXME: Ignore result?
1555 // FIXME: Are initializers affected by volatile?
1556 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1557 // Storing "i32 0" to a zero'd memory location is a noop.
1558 return;
1559 } else if (isa<ImplicitValueInitExpr>(Val: E) || isa<CXXScalarValueInitExpr>(Val: E)) {
1560 return EmitNullInitializationToLValue(Address: LV);
1561 } else if (isa<NoInitExpr>(Val: E)) {
1562 // Do nothing.
1563 return;
1564 } else if (type->isReferenceType()) {
1565 RValue RV = CGF.EmitReferenceBindingToExpr(E);
1566 return CGF.EmitStoreThroughLValue(Src: RV, Dst: LV);
1567 }
1568
1569 switch (CGF.getEvaluationKind(T: type)) {
1570 case TEK_Complex:
1571 CGF.EmitComplexExprIntoLValue(E, dest: LV, /*isInit*/ true);
1572 return;
1573 case TEK_Aggregate:
1574 CGF.EmitAggExpr(
1575 E, AS: AggValueSlot::forLValue(LV, CGF, isDestructed: AggValueSlot::IsDestructed,
1576 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
1577 isAliased: AggValueSlot::IsNotAliased,
1578 mayOverlap: AggValueSlot::MayOverlap, isZeroed: Dest.isZeroed()));
1579 return;
1580 case TEK_Scalar:
1581 if (LV.isSimple()) {
1582 CGF.EmitScalarInit(init: E, /*D=*/nullptr, lvalue: LV, /*Captured=*/capturedByInit: false);
1583 } else {
1584 CGF.EmitStoreThroughLValue(Src: RValue::get(V: CGF.EmitScalarExpr(E)), Dst: LV);
1585 }
1586 return;
1587 }
1588 llvm_unreachable("bad evaluation kind");
1589}
1590
1591void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1592 QualType type = lv.getType();
1593
1594 // If the destination slot is already zeroed out before the aggregate is
1595 // copied into it, we don't have to emit any zeros here.
1596 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(T: type))
1597 return;
1598
1599 if (CGF.hasScalarEvaluationKind(T: type)) {
1600 // For non-aggregates, we can store the appropriate null constant.
1601 llvm::Value *null = CGF.CGM.EmitNullConstant(T: type);
1602 // Note that the following is not equivalent to
1603 // EmitStoreThroughBitfieldLValue for ARC types.
1604 if (lv.isBitField()) {
1605 CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: null), Dst: lv);
1606 } else {
1607 assert(lv.isSimple());
1608 CGF.EmitStoreOfScalar(value: null, lvalue: lv, /* isInitialization */ isInit: true);
1609 }
1610 } else {
1611 // There's a potential optimization opportunity in combining
1612 // memsets; that would be easy for arrays, but relatively
1613 // difficult for structures with the current code.
1614 CGF.EmitNullInitialization(DestPtr: lv.getAddress(CGF), Ty: lv.getType());
1615 }
1616}
1617
1618void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
1619 VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
1620 E->getInitializedFieldInUnion(),
1621 E->getArrayFiller());
1622}
1623
1624void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1625 if (E->hadArrayRangeDesignator())
1626 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1627
1628 if (E->isTransparent())
1629 return Visit(E: E->getInit(Init: 0));
1630
1631 VisitCXXParenListOrInitListExpr(
1632 E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());
1633}
1634
1635void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1636 Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,
1637 FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {
1638#if 0
1639 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1640 // (Length of globals? Chunks of zeroed-out space?).
1641 //
1642 // If we can, prefer a copy from a global; this is a lot less code for long
1643 // globals, and it's easier for the current optimizers to analyze.
1644 if (llvm::Constant *C =
1645 CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {
1646 llvm::GlobalVariable* GV =
1647 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1648 llvm::GlobalValue::InternalLinkage, C, "");
1649 EmitFinalDestCopy(ExprToVisit->getType(),
1650 CGF.MakeAddrLValue(GV, ExprToVisit->getType()));
1651 return;
1652 }
1653#endif
1654
1655 AggValueSlot Dest = EnsureSlot(T: ExprToVisit->getType());
1656
1657 LValue DestLV = CGF.MakeAddrLValue(Addr: Dest.getAddress(), T: ExprToVisit->getType());
1658
1659 // Handle initialization of an array.
1660 if (ExprToVisit->getType()->isConstantArrayType()) {
1661 auto AType = cast<llvm::ArrayType>(Val: Dest.getAddress().getElementType());
1662 EmitArrayInit(DestPtr: Dest.getAddress(), AType, ArrayQTy: ExprToVisit->getType(), ExprToVisit,
1663 Args: InitExprs, ArrayFiller);
1664 return;
1665 } else if (ExprToVisit->getType()->isVariableArrayType()) {
1666 // A variable array type that has an initializer can only do empty
1667 // initialization. And because this feature is not exposed as an extension
1668 // in C++, we can safely memset the array memory to zero.
1669 assert(InitExprs.size() == 0 &&
1670 "you can only use an empty initializer with VLAs");
1671 CGF.EmitNullInitialization(DestPtr: Dest.getAddress(), Ty: ExprToVisit->getType());
1672 return;
1673 }
1674
1675 assert(ExprToVisit->getType()->isRecordType() &&
1676 "Only support structs/unions here!");
1677
1678 // Do struct initialization; this code just sets each individual member
1679 // to the approprate value. This makes bitfield support automatic;
1680 // the disadvantage is that the generated code is more difficult for
1681 // the optimizer, especially with bitfields.
1682 unsigned NumInitElements = InitExprs.size();
1683 RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl();
1684
1685 // We'll need to enter cleanup scopes in case any of the element
1686 // initializers throws an exception.
1687 SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1688 llvm::Instruction *cleanupDominator = nullptr;
1689 auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) {
1690 cleanups.push_back(Elt: cleanup);
1691 if (!cleanupDominator) // create placeholder once needed
1692 cleanupDominator = CGF.Builder.CreateAlignedLoad(
1693 Ty: CGF.Int8Ty, Addr: llvm::Constant::getNullValue(Ty: CGF.Int8PtrTy),
1694 Align: CharUnits::One());
1695 };
1696
1697 unsigned curInitIndex = 0;
1698
1699 // Emit initialization of base classes.
1700 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: record)) {
1701 assert(NumInitElements >= CXXRD->getNumBases() &&
1702 "missing initializer for base class");
1703 for (auto &Base : CXXRD->bases()) {
1704 assert(!Base.isVirtual() && "should not see vbases here");
1705 auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1706 Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1707 Value: Dest.getAddress(), Derived: CXXRD, Base: BaseRD,
1708 /*isBaseVirtual*/ BaseIsVirtual: false);
1709 AggValueSlot AggSlot = AggValueSlot::forAddr(
1710 addr: V, quals: Qualifiers(),
1711 isDestructed: AggValueSlot::IsDestructed,
1712 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
1713 isAliased: AggValueSlot::IsNotAliased,
1714 mayOverlap: CGF.getOverlapForBaseInit(RD: CXXRD, BaseRD, IsVirtual: Base.isVirtual()));
1715 CGF.EmitAggExpr(E: InitExprs[curInitIndex++], AS: AggSlot);
1716
1717 if (QualType::DestructionKind dtorKind =
1718 Base.getType().isDestructedType()) {
1719 CGF.pushDestroy(dtorKind, addr: V, type: Base.getType());
1720 addCleanup(CGF.EHStack.stable_begin());
1721 }
1722 }
1723 }
1724
1725 // Prepare a 'this' for CXXDefaultInitExprs.
1726 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1727
1728 if (record->isUnion()) {
1729 // Only initialize one field of a union. The field itself is
1730 // specified by the initializer list.
1731 if (!InitializedFieldInUnion) {
1732 // Empty union; we have nothing to do.
1733
1734#ifndef NDEBUG
1735 // Make sure that it's really an empty and not a failure of
1736 // semantic analysis.
1737 for (const auto *Field : record->fields())
1738 assert((Field->isUnnamedBitfield() || Field->isAnonymousStructOrUnion()) && "Only unnamed bitfields or ananymous class allowed");
1739#endif
1740 return;
1741 }
1742
1743 // FIXME: volatility
1744 FieldDecl *Field = InitializedFieldInUnion;
1745
1746 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(Base: DestLV, Field);
1747 if (NumInitElements) {
1748 // Store the initializer into the field
1749 EmitInitializationToLValue(E: InitExprs[0], LV: FieldLoc);
1750 } else {
1751 // Default-initialize to null.
1752 EmitNullInitializationToLValue(lv: FieldLoc);
1753 }
1754
1755 return;
1756 }
1757
1758 // Here we iterate over the fields; this makes it simpler to both
1759 // default-initialize fields and skip over unnamed fields.
1760 for (const auto *field : record->fields()) {
1761 // We're done once we hit the flexible array member.
1762 if (field->getType()->isIncompleteArrayType())
1763 break;
1764
1765 // Always skip anonymous bitfields.
1766 if (field->isUnnamedBitfield())
1767 continue;
1768
1769 // We're done if we reach the end of the explicit initializers, we
1770 // have a zeroed object, and the rest of the fields are
1771 // zero-initializable.
1772 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1773 CGF.getTypes().isZeroInitializable(T: ExprToVisit->getType()))
1774 break;
1775
1776
1777 LValue LV = CGF.EmitLValueForFieldInitialization(Base: DestLV, Field: field);
1778 // We never generate write-barries for initialized fields.
1779 LV.setNonGC(true);
1780
1781 if (curInitIndex < NumInitElements) {
1782 // Store the initializer into the field.
1783 EmitInitializationToLValue(E: InitExprs[curInitIndex++], LV);
1784 } else {
1785 // We're out of initializers; default-initialize to null
1786 EmitNullInitializationToLValue(lv: LV);
1787 }
1788
1789 // Push a destructor if necessary.
1790 // FIXME: if we have an array of structures, all explicitly
1791 // initialized, we can end up pushing a linear number of cleanups.
1792 bool pushedCleanup = false;
1793 if (QualType::DestructionKind dtorKind
1794 = field->getType().isDestructedType()) {
1795 assert(LV.isSimple());
1796 if (CGF.needsEHCleanup(kind: dtorKind)) {
1797 CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(),
1798 CGF.getDestroyer(destructionKind: dtorKind), false);
1799 addCleanup(CGF.EHStack.stable_begin());
1800 pushedCleanup = true;
1801 }
1802 }
1803
1804 // If the GEP didn't get used because of a dead zero init or something
1805 // else, clean it up for -O0 builds and general tidiness.
1806 if (!pushedCleanup && LV.isSimple())
1807 if (llvm::GetElementPtrInst *GEP =
1808 dyn_cast<llvm::GetElementPtrInst>(Val: LV.getPointer(CGF)))
1809 if (GEP->use_empty())
1810 GEP->eraseFromParent();
1811 }
1812
1813 // Deactivate all the partial cleanups in reverse order, which
1814 // generally means popping them.
1815 assert((cleanupDominator || cleanups.empty()) &&
1816 "Missing cleanupDominator before deactivating cleanup blocks");
1817 for (unsigned i = cleanups.size(); i != 0; --i)
1818 CGF.DeactivateCleanupBlock(Cleanup: cleanups[i-1], DominatingIP: cleanupDominator);
1819
1820 // Destroy the placeholder if we made one.
1821 if (cleanupDominator)
1822 cleanupDominator->eraseFromParent();
1823}
1824
1825void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1826 llvm::Value *outerBegin) {
1827 // Emit the common subexpression.
1828 CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1829
1830 Address destPtr = EnsureSlot(T: E->getType()).getAddress();
1831 uint64_t numElements = E->getArraySize().getZExtValue();
1832
1833 if (!numElements)
1834 return;
1835
1836 // destPtr is an array*. Construct an elementType* by drilling down a level.
1837 llvm::Value *zero = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: 0);
1838 llvm::Value *indices[] = {zero, zero};
1839 llvm::Value *begin = Builder.CreateInBoundsGEP(
1840 Ty: destPtr.getElementType(), Ptr: destPtr.getPointer(), IdxList: indices,
1841 Name: "arrayinit.begin");
1842
1843 // Prepare to special-case multidimensional array initialization: we avoid
1844 // emitting multiple destructor loops in that case.
1845 if (!outerBegin)
1846 outerBegin = begin;
1847 ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(Val: E->getSubExpr());
1848
1849 QualType elementType =
1850 CGF.getContext().getAsArrayType(T: E->getType())->getElementType();
1851 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(T: elementType);
1852 CharUnits elementAlign =
1853 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1854 llvm::Type *llvmElementType = CGF.ConvertTypeForMem(T: elementType);
1855
1856 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1857 llvm::BasicBlock *bodyBB = CGF.createBasicBlock(name: "arrayinit.body");
1858
1859 // Jump into the body.
1860 CGF.EmitBlock(BB: bodyBB);
1861 llvm::PHINode *index =
1862 Builder.CreatePHI(Ty: zero->getType(), NumReservedValues: 2, Name: "arrayinit.index");
1863 index->addIncoming(V: zero, BB: entryBB);
1864 llvm::Value *element =
1865 Builder.CreateInBoundsGEP(Ty: llvmElementType, Ptr: begin, IdxList: index);
1866
1867 // Prepare for a cleanup.
1868 QualType::DestructionKind dtorKind = elementType.isDestructedType();
1869 EHScopeStack::stable_iterator cleanup;
1870 if (CGF.needsEHCleanup(kind: dtorKind) && !InnerLoop) {
1871 if (outerBegin->getType() != element->getType())
1872 outerBegin = Builder.CreateBitCast(V: outerBegin, DestTy: element->getType());
1873 CGF.pushRegularPartialArrayCleanup(arrayBegin: outerBegin, arrayEnd: element, elementType,
1874 elementAlignment: elementAlign,
1875 destroyer: CGF.getDestroyer(destructionKind: dtorKind));
1876 cleanup = CGF.EHStack.stable_begin();
1877 } else {
1878 dtorKind = QualType::DK_none;
1879 }
1880
1881 // Emit the actual filler expression.
1882 {
1883 // Temporaries created in an array initialization loop are destroyed
1884 // at the end of each iteration.
1885 CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1886 CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1887 LValue elementLV = CGF.MakeAddrLValue(
1888 Addr: Address(element, llvmElementType, elementAlign), T: elementType);
1889
1890 if (InnerLoop) {
1891 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1892 auto elementSlot = AggValueSlot::forLValue(
1893 LV: elementLV, CGF, isDestructed: AggValueSlot::IsDestructed,
1894 needsGC: AggValueSlot::DoesNotNeedGCBarriers, isAliased: AggValueSlot::IsNotAliased,
1895 mayOverlap: AggValueSlot::DoesNotOverlap);
1896 AggExprEmitter(CGF, elementSlot, false)
1897 .VisitArrayInitLoopExpr(E: InnerLoop, outerBegin);
1898 } else
1899 EmitInitializationToLValue(E: E->getSubExpr(), LV: elementLV);
1900 }
1901
1902 // Move on to the next element.
1903 llvm::Value *nextIndex = Builder.CreateNUWAdd(
1904 LHS: index, RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: 1), Name: "arrayinit.next");
1905 index->addIncoming(V: nextIndex, BB: Builder.GetInsertBlock());
1906
1907 // Leave the loop if we're done.
1908 llvm::Value *done = Builder.CreateICmpEQ(
1909 LHS: nextIndex, RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: numElements),
1910 Name: "arrayinit.done");
1911 llvm::BasicBlock *endBB = CGF.createBasicBlock(name: "arrayinit.end");
1912 Builder.CreateCondBr(Cond: done, True: endBB, False: bodyBB);
1913
1914 CGF.EmitBlock(BB: endBB);
1915
1916 // Leave the partial-array cleanup if we entered one.
1917 if (dtorKind)
1918 CGF.DeactivateCleanupBlock(Cleanup: cleanup, DominatingIP: index);
1919}
1920
1921void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1922 AggValueSlot Dest = EnsureSlot(T: E->getType());
1923
1924 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1925 EmitInitializationToLValue(E: E->getBase(), LV: DestLV);
1926 VisitInitListExpr(E: E->getUpdater());
1927}
1928
1929//===----------------------------------------------------------------------===//
1930// Entry Points into this File
1931//===----------------------------------------------------------------------===//
1932
1933/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1934/// non-zero bytes that will be stored when outputting the initializer for the
1935/// specified initializer expression.
1936static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1937 if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Val: E))
1938 E = MTE->getSubExpr();
1939 E = E->IgnoreParenNoopCasts(Ctx: CGF.getContext());
1940
1941 // 0 and 0.0 won't require any non-zero stores!
1942 if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1943
1944 // If this is an initlist expr, sum up the size of sizes of the (present)
1945 // elements. If this is something weird, assume the whole thing is non-zero.
1946 const InitListExpr *ILE = dyn_cast<InitListExpr>(Val: E);
1947 while (ILE && ILE->isTransparent())
1948 ILE = dyn_cast<InitListExpr>(Val: ILE->getInit(Init: 0));
1949 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1950 return CGF.getContext().getTypeSizeInChars(T: E->getType());
1951
1952 // InitListExprs for structs have to be handled carefully. If there are
1953 // reference members, we need to consider the size of the reference, not the
1954 // referencee. InitListExprs for unions and arrays can't have references.
1955 if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1956 if (!RT->isUnionType()) {
1957 RecordDecl *SD = RT->getDecl();
1958 CharUnits NumNonZeroBytes = CharUnits::Zero();
1959
1960 unsigned ILEElement = 0;
1961 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: SD))
1962 while (ILEElement != CXXRD->getNumBases())
1963 NumNonZeroBytes +=
1964 GetNumNonZeroBytesInInit(E: ILE->getInit(Init: ILEElement++), CGF);
1965 for (const auto *Field : SD->fields()) {
1966 // We're done once we hit the flexible array member or run out of
1967 // InitListExpr elements.
1968 if (Field->getType()->isIncompleteArrayType() ||
1969 ILEElement == ILE->getNumInits())
1970 break;
1971 if (Field->isUnnamedBitfield())
1972 continue;
1973
1974 const Expr *E = ILE->getInit(Init: ILEElement++);
1975
1976 // Reference values are always non-null and have the width of a pointer.
1977 if (Field->getType()->isReferenceType())
1978 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1979 BitSize: CGF.getTarget().getPointerWidth(AddrSpace: LangAS::Default));
1980 else
1981 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1982 }
1983
1984 return NumNonZeroBytes;
1985 }
1986 }
1987
1988 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
1989 CharUnits NumNonZeroBytes = CharUnits::Zero();
1990 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1991 NumNonZeroBytes += GetNumNonZeroBytesInInit(E: ILE->getInit(Init: i), CGF);
1992 return NumNonZeroBytes;
1993}
1994
1995/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1996/// zeros in it, emit a memset and avoid storing the individual zeros.
1997///
1998static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1999 CodeGenFunction &CGF) {
2000 // If the slot is already known to be zeroed, nothing to do. Don't mess with
2001 // volatile stores.
2002 if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
2003 return;
2004
2005 // C++ objects with a user-declared constructor don't need zero'ing.
2006 if (CGF.getLangOpts().CPlusPlus)
2007 if (const RecordType *RT = CGF.getContext()
2008 .getBaseElementType(QT: E->getType())->getAs<RecordType>()) {
2009 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: RT->getDecl());
2010 if (RD->hasUserDeclaredConstructor())
2011 return;
2012 }
2013
2014 // If the type is 16-bytes or smaller, prefer individual stores over memset.
2015 CharUnits Size = Slot.getPreferredSize(Ctx&: CGF.getContext(), Type: E->getType());
2016 if (Size <= CharUnits::fromQuantity(Quantity: 16))
2017 return;
2018
2019 // Check to see if over 3/4 of the initializer are known to be zero. If so,
2020 // we prefer to emit memset + individual stores for the rest.
2021 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
2022 if (NumNonZeroBytes*4 > Size)
2023 return;
2024
2025 // Okay, it seems like a good idea to use an initial memset, emit the call.
2026 llvm::Constant *SizeVal = CGF.Builder.getInt64(C: Size.getQuantity());
2027
2028 Address Loc = Slot.getAddress().withElementType(ElemTy: CGF.Int8Ty);
2029 CGF.Builder.CreateMemSet(Dest: Loc, Value: CGF.Builder.getInt8(C: 0), Size: SizeVal, IsVolatile: false);
2030
2031 // Tell the AggExprEmitter that the slot is known zero.
2032 Slot.setZeroed();
2033}
2034
2035
2036
2037
2038/// EmitAggExpr - Emit the computation of the specified expression of aggregate
2039/// type. The result is computed into DestPtr. Note that if DestPtr is null,
2040/// the value of the aggregate expression is not needed. If VolatileDest is
2041/// true, DestPtr cannot be 0.
2042void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
2043 assert(E && hasAggregateEvaluationKind(E->getType()) &&
2044 "Invalid aggregate expression to emit");
2045 assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
2046 "slot has bits but no address");
2047
2048 // Optimize the slot if possible.
2049 CheckAggExprForMemSetUse(Slot, E, CGF&: *this);
2050
2051 AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(E: const_cast<Expr*>(E));
2052}
2053
2054LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
2055 assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
2056 Address Temp = CreateMemTemp(T: E->getType());
2057 LValue LV = MakeAddrLValue(Addr: Temp, T: E->getType());
2058 EmitAggExpr(E, Slot: AggValueSlot::forLValue(
2059 LV, CGF&: *this, isDestructed: AggValueSlot::IsNotDestructed,
2060 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
2061 isAliased: AggValueSlot::IsNotAliased, mayOverlap: AggValueSlot::DoesNotOverlap));
2062 return LV;
2063}
2064
2065AggValueSlot::Overlap_t
2066CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) {
2067 if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
2068 return AggValueSlot::DoesNotOverlap;
2069
2070 // If the field lies entirely within the enclosing class's nvsize, its tail
2071 // padding cannot overlap any already-initialized object. (The only subobjects
2072 // with greater addresses that might already be initialized are vbases.)
2073 const RecordDecl *ClassRD = FD->getParent();
2074 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D: ClassRD);
2075 if (Layout.getFieldOffset(FieldNo: FD->getFieldIndex()) +
2076 getContext().getTypeSize(FD->getType()) <=
2077 (uint64_t)getContext().toBits(CharSize: Layout.getNonVirtualSize()))
2078 return AggValueSlot::DoesNotOverlap;
2079
2080 // The tail padding may contain values we need to preserve.
2081 return AggValueSlot::MayOverlap;
2082}
2083
2084AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit(
2085 const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
2086 // If the most-derived object is a field declared with [[no_unique_address]],
2087 // the tail padding of any virtual base could be reused for other subobjects
2088 // of that field's class.
2089 if (IsVirtual)
2090 return AggValueSlot::MayOverlap;
2091
2092 // If the base class is laid out entirely within the nvsize of the derived
2093 // class, its tail padding cannot yet be initialized, so we can issue
2094 // stores at the full width of the base class.
2095 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2096 if (Layout.getBaseClassOffset(Base: BaseRD) +
2097 getContext().getASTRecordLayout(BaseRD).getSize() <=
2098 Layout.getNonVirtualSize())
2099 return AggValueSlot::DoesNotOverlap;
2100
2101 // The tail padding may contain values we need to preserve.
2102 return AggValueSlot::MayOverlap;
2103}
2104
2105void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
2106 AggValueSlot::Overlap_t MayOverlap,
2107 bool isVolatile) {
2108 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
2109
2110 Address DestPtr = Dest.getAddress(CGF&: *this);
2111 Address SrcPtr = Src.getAddress(CGF&: *this);
2112
2113 if (getLangOpts().CPlusPlus) {
2114 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2115 CXXRecordDecl *Record = cast<CXXRecordDecl>(Val: RT->getDecl());
2116 assert((Record->hasTrivialCopyConstructor() ||
2117 Record->hasTrivialCopyAssignment() ||
2118 Record->hasTrivialMoveConstructor() ||
2119 Record->hasTrivialMoveAssignment() ||
2120 Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
2121 "Trying to aggregate-copy a type without a trivial copy/move "
2122 "constructor or assignment operator");
2123 // Ignore empty classes in C++.
2124 if (Record->isEmpty())
2125 return;
2126 }
2127 }
2128
2129 if (getLangOpts().CUDAIsDevice) {
2130 if (Ty->isCUDADeviceBuiltinSurfaceType()) {
2131 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(CGF&: *this, Dst: Dest,
2132 Src))
2133 return;
2134 } else if (Ty->isCUDADeviceBuiltinTextureType()) {
2135 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(CGF&: *this, Dst: Dest,
2136 Src))
2137 return;
2138 }
2139 }
2140
2141 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2142 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2143 // read from another object that overlaps in anyway the storage of the first
2144 // object, then the overlap shall be exact and the two objects shall have
2145 // qualified or unqualified versions of a compatible type."
2146 //
2147 // memcpy is not defined if the source and destination pointers are exactly
2148 // equal, but other compilers do this optimization, and almost every memcpy
2149 // implementation handles this case safely. If there is a libc that does not
2150 // safely handle this, we can add a target hook.
2151
2152 // Get data size info for this aggregate. Don't copy the tail padding if this
2153 // might be a potentially-overlapping subobject, since the tail padding might
2154 // be occupied by a different object. Otherwise, copying it is fine.
2155 TypeInfoChars TypeInfo;
2156 if (MayOverlap)
2157 TypeInfo = getContext().getTypeInfoDataSizeInChars(T: Ty);
2158 else
2159 TypeInfo = getContext().getTypeInfoInChars(T: Ty);
2160
2161 llvm::Value *SizeVal = nullptr;
2162 if (TypeInfo.Width.isZero()) {
2163 // But note that getTypeInfo returns 0 for a VLA.
2164 if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
2165 Val: getContext().getAsArrayType(T: Ty))) {
2166 QualType BaseEltTy;
2167 SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
2168 TypeInfo = getContext().getTypeInfoInChars(T: BaseEltTy);
2169 assert(!TypeInfo.Width.isZero());
2170 SizeVal = Builder.CreateNUWMul(
2171 LHS: SizeVal,
2172 RHS: llvm::ConstantInt::get(Ty: SizeTy, V: TypeInfo.Width.getQuantity()));
2173 }
2174 }
2175 if (!SizeVal) {
2176 SizeVal = llvm::ConstantInt::get(Ty: SizeTy, V: TypeInfo.Width.getQuantity());
2177 }
2178
2179 // FIXME: If we have a volatile struct, the optimizer can remove what might
2180 // appear to be `extra' memory ops:
2181 //
2182 // volatile struct { int i; } a, b;
2183 //
2184 // int main() {
2185 // a = b;
2186 // a = b;
2187 // }
2188 //
2189 // we need to use a different call here. We use isVolatile to indicate when
2190 // either the source or the destination is volatile.
2191
2192 DestPtr = DestPtr.withElementType(ElemTy: Int8Ty);
2193 SrcPtr = SrcPtr.withElementType(ElemTy: Int8Ty);
2194
2195 // Don't do any of the memmove_collectable tests if GC isn't set.
2196 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
2197 // fall through
2198 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
2199 RecordDecl *Record = RecordTy->getDecl();
2200 if (Record->hasObjectMember()) {
2201 CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF&: *this, DestPtr, SrcPtr,
2202 Size: SizeVal);
2203 return;
2204 }
2205 } else if (Ty->isArrayType()) {
2206 QualType BaseType = getContext().getBaseElementType(QT: Ty);
2207 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
2208 if (RecordTy->getDecl()->hasObjectMember()) {
2209 CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF&: *this, DestPtr, SrcPtr,
2210 Size: SizeVal);
2211 return;
2212 }
2213 }
2214 }
2215
2216 auto Inst = Builder.CreateMemCpy(Dest: DestPtr, Src: SrcPtr, Size: SizeVal, IsVolatile: isVolatile);
2217
2218 // Determine the metadata to describe the position of any padding in this
2219 // memcpy, as well as the TBAA tags for the members of the struct, in case
2220 // the optimizer wishes to expand it in to scalar memory operations.
2221 if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(QTy: Ty))
2222 Inst->setMetadata(KindID: llvm::LLVMContext::MD_tbaa_struct, Node: TBAAStructTag);
2223
2224 if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2225 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
2226 DestInfo: Dest.getTBAAInfo(), SrcInfo: Src.getTBAAInfo());
2227 CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2228 }
2229}
2230

source code of clang/lib/CodeGen/CGExprAgg.cpp