1 | //===--- CGCall.cpp - Encapsulate calling convention details --------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // These classes wrap the information about a call or function |
10 | // definition used to handle ABI compliancy. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "CGCall.h" |
15 | #include "ABIInfo.h" |
16 | #include "CGBlocks.h" |
17 | #include "CGCXXABI.h" |
18 | #include "CGCleanup.h" |
19 | #include "CGRecordLayout.h" |
20 | #include "CodeGenFunction.h" |
21 | #include "CodeGenModule.h" |
22 | #include "TargetInfo.h" |
23 | #include "clang/AST/Attr.h" |
24 | #include "clang/AST/Decl.h" |
25 | #include "clang/AST/DeclCXX.h" |
26 | #include "clang/AST/DeclObjC.h" |
27 | #include "clang/Basic/CodeGenOptions.h" |
28 | #include "clang/Basic/TargetBuiltins.h" |
29 | #include "clang/Basic/TargetInfo.h" |
30 | #include "clang/CodeGen/CGFunctionInfo.h" |
31 | #include "clang/CodeGen/SwiftCallingConv.h" |
32 | #include "llvm/ADT/StringExtras.h" |
33 | #include "llvm/Analysis/ValueTracking.h" |
34 | #include "llvm/IR/Assumptions.h" |
35 | #include "llvm/IR/Attributes.h" |
36 | #include "llvm/IR/CallingConv.h" |
37 | #include "llvm/IR/DataLayout.h" |
38 | #include "llvm/IR/InlineAsm.h" |
39 | #include "llvm/IR/IntrinsicInst.h" |
40 | #include "llvm/IR/Intrinsics.h" |
41 | #include "llvm/Transforms/Utils/Local.h" |
42 | using namespace clang; |
43 | using namespace CodeGen; |
44 | |
45 | /***/ |
46 | |
47 | unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { |
48 | switch (CC) { |
49 | default: return llvm::CallingConv::C; |
50 | case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; |
51 | case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; |
52 | case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; |
53 | case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; |
54 | case CC_Win64: return llvm::CallingConv::Win64; |
55 | case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; |
56 | case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; |
57 | case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; |
58 | case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; |
59 | // TODO: Add support for __pascal to LLVM. |
60 | case CC_X86Pascal: return llvm::CallingConv::C; |
61 | // TODO: Add support for __vectorcall to LLVM. |
62 | case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; |
63 | case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; |
64 | case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; |
65 | case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); |
66 | case CC_PreserveMost: return llvm::CallingConv::PreserveMost; |
67 | case CC_PreserveAll: return llvm::CallingConv::PreserveAll; |
68 | case CC_Swift: return llvm::CallingConv::Swift; |
69 | } |
70 | } |
71 | |
72 | /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR |
73 | /// qualification. Either or both of RD and MD may be null. A null RD indicates |
74 | /// that there is no meaningful 'this' type, and a null MD can occur when |
75 | /// calling a method pointer. |
76 | CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, |
77 | const CXXMethodDecl *MD) { |
78 | QualType RecTy; |
79 | if (RD) |
80 | RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); |
81 | else |
82 | RecTy = Context.VoidTy; |
83 | |
84 | if (MD) |
85 | RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); |
86 | return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); |
87 | } |
88 | |
89 | /// Returns the canonical formal type of the given C++ method. |
90 | static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { |
91 | return MD->getType()->getCanonicalTypeUnqualified() |
92 | .getAs<FunctionProtoType>(); |
93 | } |
94 | |
95 | /// Returns the "extra-canonicalized" return type, which discards |
96 | /// qualifiers on the return type. Codegen doesn't care about them, |
97 | /// and it makes ABI code a little easier to be able to assume that |
98 | /// all parameter and return types are top-level unqualified. |
99 | static CanQualType GetReturnType(QualType RetTy) { |
100 | return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); |
101 | } |
102 | |
103 | /// Arrange the argument and result information for a value of the given |
104 | /// unprototyped freestanding function type. |
105 | const CGFunctionInfo & |
106 | CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { |
107 | // When translating an unprototyped function type, always use a |
108 | // variadic type. |
109 | return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), |
110 | /*instanceMethod=*/false, |
111 | /*chainCall=*/false, None, |
112 | FTNP->getExtInfo(), {}, RequiredArgs(0)); |
113 | } |
114 | |
115 | static void addExtParameterInfosForCall( |
116 | llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, |
117 | const FunctionProtoType *proto, |
118 | unsigned prefixArgs, |
119 | unsigned totalArgs) { |
120 | assert(proto->hasExtParameterInfos()); |
121 | assert(paramInfos.size() <= prefixArgs); |
122 | assert(proto->getNumParams() + prefixArgs <= totalArgs); |
123 | |
124 | paramInfos.reserve(totalArgs); |
125 | |
126 | // Add default infos for any prefix args that don't already have infos. |
127 | paramInfos.resize(prefixArgs); |
128 | |
129 | // Add infos for the prototype. |
130 | for (const auto &ParamInfo : proto->getExtParameterInfos()) { |
131 | paramInfos.push_back(ParamInfo); |
132 | // pass_object_size params have no parameter info. |
133 | if (ParamInfo.hasPassObjectSize()) |
134 | paramInfos.emplace_back(); |
135 | } |
136 | |
137 | assert(paramInfos.size() <= totalArgs && |
138 | "Did we forget to insert pass_object_size args?" ); |
139 | // Add default infos for the variadic and/or suffix arguments. |
140 | paramInfos.resize(totalArgs); |
141 | } |
142 | |
143 | /// Adds the formal parameters in FPT to the given prefix. If any parameter in |
144 | /// FPT has pass_object_size attrs, then we'll add parameters for those, too. |
145 | static void appendParameterTypes(const CodeGenTypes &CGT, |
146 | SmallVectorImpl<CanQualType> &prefix, |
147 | SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, |
148 | CanQual<FunctionProtoType> FPT) { |
149 | // Fast path: don't touch param info if we don't need to. |
150 | if (!FPT->hasExtParameterInfos()) { |
151 | assert(paramInfos.empty() && |
152 | "We have paramInfos, but the prototype doesn't?" ); |
153 | prefix.append(FPT->param_type_begin(), FPT->param_type_end()); |
154 | return; |
155 | } |
156 | |
157 | unsigned PrefixSize = prefix.size(); |
158 | // In the vast majority of cases, we'll have precisely FPT->getNumParams() |
159 | // parameters; the only thing that can change this is the presence of |
160 | // pass_object_size. So, we preallocate for the common case. |
161 | prefix.reserve(prefix.size() + FPT->getNumParams()); |
162 | |
163 | auto ExtInfos = FPT->getExtParameterInfos(); |
164 | assert(ExtInfos.size() == FPT->getNumParams()); |
165 | for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { |
166 | prefix.push_back(FPT->getParamType(I)); |
167 | if (ExtInfos[I].hasPassObjectSize()) |
168 | prefix.push_back(CGT.getContext().getSizeType()); |
169 | } |
170 | |
171 | addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, |
172 | prefix.size()); |
173 | } |
174 | |
175 | /// Arrange the LLVM function layout for a value of the given function |
176 | /// type, on top of any implicit parameters already stored. |
177 | static const CGFunctionInfo & |
178 | arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, |
179 | SmallVectorImpl<CanQualType> &prefix, |
180 | CanQual<FunctionProtoType> FTP) { |
181 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
182 | RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); |
183 | // FIXME: Kill copy. |
184 | appendParameterTypes(CGT, prefix, paramInfos, FTP); |
185 | CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); |
186 | |
187 | return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, |
188 | /*chainCall=*/false, prefix, |
189 | FTP->getExtInfo(), paramInfos, |
190 | Required); |
191 | } |
192 | |
193 | /// Arrange the argument and result information for a value of the |
194 | /// given freestanding function type. |
195 | const CGFunctionInfo & |
196 | CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { |
197 | SmallVector<CanQualType, 16> argTypes; |
198 | return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, |
199 | FTP); |
200 | } |
201 | |
202 | static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, |
203 | bool IsWindows) { |
204 | // Set the appropriate calling convention for the Function. |
205 | if (D->hasAttr<StdCallAttr>()) |
206 | return CC_X86StdCall; |
207 | |
208 | if (D->hasAttr<FastCallAttr>()) |
209 | return CC_X86FastCall; |
210 | |
211 | if (D->hasAttr<RegCallAttr>()) |
212 | return CC_X86RegCall; |
213 | |
214 | if (D->hasAttr<ThisCallAttr>()) |
215 | return CC_X86ThisCall; |
216 | |
217 | if (D->hasAttr<VectorCallAttr>()) |
218 | return CC_X86VectorCall; |
219 | |
220 | if (D->hasAttr<PascalAttr>()) |
221 | return CC_X86Pascal; |
222 | |
223 | if (PcsAttr *PCS = D->getAttr<PcsAttr>()) |
224 | return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); |
225 | |
226 | if (D->hasAttr<AArch64VectorPcsAttr>()) |
227 | return CC_AArch64VectorCall; |
228 | |
229 | if (D->hasAttr<IntelOclBiccAttr>()) |
230 | return CC_IntelOclBicc; |
231 | |
232 | if (D->hasAttr<MSABIAttr>()) |
233 | return IsWindows ? CC_C : CC_Win64; |
234 | |
235 | if (D->hasAttr<SysVABIAttr>()) |
236 | return IsWindows ? CC_X86_64SysV : CC_C; |
237 | |
238 | if (D->hasAttr<PreserveMostAttr>()) |
239 | return CC_PreserveMost; |
240 | |
241 | if (D->hasAttr<PreserveAllAttr>()) |
242 | return CC_PreserveAll; |
243 | |
244 | return CC_C; |
245 | } |
246 | |
247 | /// Arrange the argument and result information for a call to an |
248 | /// unknown C++ non-static member function of the given abstract type. |
249 | /// (A null RD means we don't have any meaningful "this" argument type, |
250 | /// so fall back to a generic pointer type). |
251 | /// The member function must be an ordinary function, i.e. not a |
252 | /// constructor or destructor. |
253 | const CGFunctionInfo & |
254 | CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, |
255 | const FunctionProtoType *FTP, |
256 | const CXXMethodDecl *MD) { |
257 | SmallVector<CanQualType, 16> argTypes; |
258 | |
259 | // Add the 'this' pointer. |
260 | argTypes.push_back(DeriveThisType(RD, MD)); |
261 | |
262 | return ::arrangeLLVMFunctionInfo( |
263 | *this, true, argTypes, |
264 | FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); |
265 | } |
266 | |
267 | /// Set calling convention for CUDA/HIP kernel. |
268 | static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, |
269 | const FunctionDecl *FD) { |
270 | if (FD->hasAttr<CUDAGlobalAttr>()) { |
271 | const FunctionType *FT = FTy->getAs<FunctionType>(); |
272 | CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); |
273 | FTy = FT->getCanonicalTypeUnqualified(); |
274 | } |
275 | } |
276 | |
277 | /// Arrange the argument and result information for a declaration or |
278 | /// definition of the given C++ non-static member function. The |
279 | /// member function must be an ordinary function, i.e. not a |
280 | /// constructor or destructor. |
281 | const CGFunctionInfo & |
282 | CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { |
283 | assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!" ); |
284 | assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!" ); |
285 | |
286 | CanQualType FT = GetFormalType(MD).getAs<Type>(); |
287 | setCUDAKernelCallingConvention(FT, CGM, MD); |
288 | auto prototype = FT.getAs<FunctionProtoType>(); |
289 | |
290 | if (MD->isInstance()) { |
291 | // The abstract case is perfectly fine. |
292 | const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); |
293 | return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); |
294 | } |
295 | |
296 | return arrangeFreeFunctionType(prototype); |
297 | } |
298 | |
299 | bool CodeGenTypes::inheritingCtorHasParams( |
300 | const InheritedConstructor &Inherited, CXXCtorType Type) { |
301 | // Parameters are unnecessary if we're constructing a base class subobject |
302 | // and the inherited constructor lives in a virtual base. |
303 | return Type == Ctor_Complete || |
304 | !Inherited.getShadowDecl()->constructsVirtualBase() || |
305 | !Target.getCXXABI().hasConstructorVariants(); |
306 | } |
307 | |
308 | const CGFunctionInfo & |
309 | CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { |
310 | auto *MD = cast<CXXMethodDecl>(GD.getDecl()); |
311 | |
312 | SmallVector<CanQualType, 16> argTypes; |
313 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
314 | argTypes.push_back(DeriveThisType(MD->getParent(), MD)); |
315 | |
316 | bool PassParams = true; |
317 | |
318 | if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { |
319 | // A base class inheriting constructor doesn't get forwarded arguments |
320 | // needed to construct a virtual base (or base class thereof). |
321 | if (auto Inherited = CD->getInheritedConstructor()) |
322 | PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); |
323 | } |
324 | |
325 | CanQual<FunctionProtoType> FTP = GetFormalType(MD); |
326 | |
327 | // Add the formal parameters. |
328 | if (PassParams) |
329 | appendParameterTypes(*this, argTypes, paramInfos, FTP); |
330 | |
331 | CGCXXABI::AddedStructorArgCounts AddedArgs = |
332 | TheCXXABI.buildStructorSignature(GD, argTypes); |
333 | if (!paramInfos.empty()) { |
334 | // Note: prefix implies after the first param. |
335 | if (AddedArgs.Prefix) |
336 | paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, |
337 | FunctionProtoType::ExtParameterInfo{}); |
338 | if (AddedArgs.Suffix) |
339 | paramInfos.append(AddedArgs.Suffix, |
340 | FunctionProtoType::ExtParameterInfo{}); |
341 | } |
342 | |
343 | RequiredArgs required = |
344 | (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) |
345 | : RequiredArgs::All); |
346 | |
347 | FunctionType::ExtInfo extInfo = FTP->getExtInfo(); |
348 | CanQualType resultType = TheCXXABI.HasThisReturn(GD) |
349 | ? argTypes.front() |
350 | : TheCXXABI.hasMostDerivedReturn(GD) |
351 | ? CGM.getContext().VoidPtrTy |
352 | : Context.VoidTy; |
353 | return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, |
354 | /*chainCall=*/false, argTypes, extInfo, |
355 | paramInfos, required); |
356 | } |
357 | |
358 | static SmallVector<CanQualType, 16> |
359 | getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { |
360 | SmallVector<CanQualType, 16> argTypes; |
361 | for (auto &arg : args) |
362 | argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); |
363 | return argTypes; |
364 | } |
365 | |
366 | static SmallVector<CanQualType, 16> |
367 | getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { |
368 | SmallVector<CanQualType, 16> argTypes; |
369 | for (auto &arg : args) |
370 | argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); |
371 | return argTypes; |
372 | } |
373 | |
374 | static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> |
375 | getExtParameterInfosForCall(const FunctionProtoType *proto, |
376 | unsigned prefixArgs, unsigned totalArgs) { |
377 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; |
378 | if (proto->hasExtParameterInfos()) { |
379 | addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); |
380 | } |
381 | return result; |
382 | } |
383 | |
384 | /// Arrange a call to a C++ method, passing the given arguments. |
385 | /// |
386 | /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` |
387 | /// parameter. |
388 | /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of |
389 | /// args. |
390 | /// PassProtoArgs indicates whether `args` has args for the parameters in the |
391 | /// given CXXConstructorDecl. |
392 | const CGFunctionInfo & |
393 | CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, |
394 | const CXXConstructorDecl *D, |
395 | CXXCtorType CtorKind, |
396 | unsigned , |
397 | unsigned , |
398 | bool PassProtoArgs) { |
399 | // FIXME: Kill copy. |
400 | SmallVector<CanQualType, 16> ArgTypes; |
401 | for (const auto &Arg : args) |
402 | ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); |
403 | |
404 | // +1 for implicit this, which should always be args[0]. |
405 | unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; |
406 | |
407 | CanQual<FunctionProtoType> FPT = GetFormalType(D); |
408 | RequiredArgs Required = PassProtoArgs |
409 | ? RequiredArgs::forPrototypePlus( |
410 | FPT, TotalPrefixArgs + ExtraSuffixArgs) |
411 | : RequiredArgs::All; |
412 | |
413 | GlobalDecl GD(D, CtorKind); |
414 | CanQualType ResultType = TheCXXABI.HasThisReturn(GD) |
415 | ? ArgTypes.front() |
416 | : TheCXXABI.hasMostDerivedReturn(GD) |
417 | ? CGM.getContext().VoidPtrTy |
418 | : Context.VoidTy; |
419 | |
420 | FunctionType::ExtInfo Info = FPT->getExtInfo(); |
421 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; |
422 | // If the prototype args are elided, we should only have ABI-specific args, |
423 | // which never have param info. |
424 | if (PassProtoArgs && FPT->hasExtParameterInfos()) { |
425 | // ABI-specific suffix arguments are treated the same as variadic arguments. |
426 | addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, |
427 | ArgTypes.size()); |
428 | } |
429 | return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, |
430 | /*chainCall=*/false, ArgTypes, Info, |
431 | ParamInfos, Required); |
432 | } |
433 | |
434 | /// Arrange the argument and result information for the declaration or |
435 | /// definition of the given function. |
436 | const CGFunctionInfo & |
437 | CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { |
438 | if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) |
439 | if (MD->isInstance()) |
440 | return arrangeCXXMethodDeclaration(MD); |
441 | |
442 | CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); |
443 | |
444 | assert(isa<FunctionType>(FTy)); |
445 | setCUDAKernelCallingConvention(FTy, CGM, FD); |
446 | |
447 | // When declaring a function without a prototype, always use a |
448 | // non-variadic type. |
449 | if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { |
450 | return arrangeLLVMFunctionInfo( |
451 | noProto->getReturnType(), /*instanceMethod=*/false, |
452 | /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); |
453 | } |
454 | |
455 | return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); |
456 | } |
457 | |
458 | /// Arrange the argument and result information for the declaration or |
459 | /// definition of an Objective-C method. |
460 | const CGFunctionInfo & |
461 | CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { |
462 | // It happens that this is the same as a call with no optional |
463 | // arguments, except also using the formal 'self' type. |
464 | return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); |
465 | } |
466 | |
467 | /// Arrange the argument and result information for the function type |
468 | /// through which to perform a send to the given Objective-C method, |
469 | /// using the given receiver type. The receiver type is not always |
470 | /// the 'self' type of the method or even an Objective-C pointer type. |
471 | /// This is *not* the right method for actually performing such a |
472 | /// message send, due to the possibility of optional arguments. |
473 | const CGFunctionInfo & |
474 | CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, |
475 | QualType receiverType) { |
476 | SmallVector<CanQualType, 16> argTys; |
477 | SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2); |
478 | argTys.push_back(Context.getCanonicalParamType(receiverType)); |
479 | argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); |
480 | // FIXME: Kill copy? |
481 | for (const auto *I : MD->parameters()) { |
482 | argTys.push_back(Context.getCanonicalParamType(I->getType())); |
483 | auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( |
484 | I->hasAttr<NoEscapeAttr>()); |
485 | extParamInfos.push_back(extParamInfo); |
486 | } |
487 | |
488 | FunctionType::ExtInfo einfo; |
489 | bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); |
490 | einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); |
491 | |
492 | if (getContext().getLangOpts().ObjCAutoRefCount && |
493 | MD->hasAttr<NSReturnsRetainedAttr>()) |
494 | einfo = einfo.withProducesResult(true); |
495 | |
496 | RequiredArgs required = |
497 | (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); |
498 | |
499 | return arrangeLLVMFunctionInfo( |
500 | GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, |
501 | /*chainCall=*/false, argTys, einfo, extParamInfos, required); |
502 | } |
503 | |
504 | const CGFunctionInfo & |
505 | CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, |
506 | const CallArgList &args) { |
507 | auto argTypes = getArgTypesForCall(Context, args); |
508 | FunctionType::ExtInfo einfo; |
509 | |
510 | return arrangeLLVMFunctionInfo( |
511 | GetReturnType(returnType), /*instanceMethod=*/false, |
512 | /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); |
513 | } |
514 | |
515 | const CGFunctionInfo & |
516 | CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { |
517 | // FIXME: Do we need to handle ObjCMethodDecl? |
518 | const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); |
519 | |
520 | if (isa<CXXConstructorDecl>(GD.getDecl()) || |
521 | isa<CXXDestructorDecl>(GD.getDecl())) |
522 | return arrangeCXXStructorDeclaration(GD); |
523 | |
524 | return arrangeFunctionDeclaration(FD); |
525 | } |
526 | |
527 | /// Arrange a thunk that takes 'this' as the first parameter followed by |
528 | /// varargs. Return a void pointer, regardless of the actual return type. |
529 | /// The body of the thunk will end in a musttail call to a function of the |
530 | /// correct type, and the caller will bitcast the function to the correct |
531 | /// prototype. |
532 | const CGFunctionInfo & |
533 | CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { |
534 | assert(MD->isVirtual() && "only methods have thunks" ); |
535 | CanQual<FunctionProtoType> FTP = GetFormalType(MD); |
536 | CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; |
537 | return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, |
538 | /*chainCall=*/false, ArgTys, |
539 | FTP->getExtInfo(), {}, RequiredArgs(1)); |
540 | } |
541 | |
542 | const CGFunctionInfo & |
543 | CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, |
544 | CXXCtorType CT) { |
545 | assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); |
546 | |
547 | CanQual<FunctionProtoType> FTP = GetFormalType(CD); |
548 | SmallVector<CanQualType, 2> ArgTys; |
549 | const CXXRecordDecl *RD = CD->getParent(); |
550 | ArgTys.push_back(DeriveThisType(RD, CD)); |
551 | if (CT == Ctor_CopyingClosure) |
552 | ArgTys.push_back(*FTP->param_type_begin()); |
553 | if (RD->getNumVBases() > 0) |
554 | ArgTys.push_back(Context.IntTy); |
555 | CallingConv CC = Context.getDefaultCallingConvention( |
556 | /*IsVariadic=*/false, /*IsCXXMethod=*/true); |
557 | return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, |
558 | /*chainCall=*/false, ArgTys, |
559 | FunctionType::ExtInfo(CC), {}, |
560 | RequiredArgs::All); |
561 | } |
562 | |
563 | /// Arrange a call as unto a free function, except possibly with an |
564 | /// additional number of formal parameters considered required. |
565 | static const CGFunctionInfo & |
566 | arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, |
567 | CodeGenModule &CGM, |
568 | const CallArgList &args, |
569 | const FunctionType *fnType, |
570 | unsigned , |
571 | bool chainCall) { |
572 | assert(args.size() >= numExtraRequiredArgs); |
573 | |
574 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
575 | |
576 | // In most cases, there are no optional arguments. |
577 | RequiredArgs required = RequiredArgs::All; |
578 | |
579 | // If we have a variadic prototype, the required arguments are the |
580 | // extra prefix plus the arguments in the prototype. |
581 | if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { |
582 | if (proto->isVariadic()) |
583 | required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); |
584 | |
585 | if (proto->hasExtParameterInfos()) |
586 | addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, |
587 | args.size()); |
588 | |
589 | // If we don't have a prototype at all, but we're supposed to |
590 | // explicitly use the variadic convention for unprototyped calls, |
591 | // treat all of the arguments as required but preserve the nominal |
592 | // possibility of variadics. |
593 | } else if (CGM.getTargetCodeGenInfo() |
594 | .isNoProtoCallVariadic(args, |
595 | cast<FunctionNoProtoType>(fnType))) { |
596 | required = RequiredArgs(args.size()); |
597 | } |
598 | |
599 | // FIXME: Kill copy. |
600 | SmallVector<CanQualType, 16> argTypes; |
601 | for (const auto &arg : args) |
602 | argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); |
603 | return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), |
604 | /*instanceMethod=*/false, chainCall, |
605 | argTypes, fnType->getExtInfo(), paramInfos, |
606 | required); |
607 | } |
608 | |
609 | /// Figure out the rules for calling a function with the given formal |
610 | /// type using the given arguments. The arguments are necessary |
611 | /// because the function might be unprototyped, in which case it's |
612 | /// target-dependent in crazy ways. |
613 | const CGFunctionInfo & |
614 | CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, |
615 | const FunctionType *fnType, |
616 | bool chainCall) { |
617 | return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, |
618 | chainCall ? 1 : 0, chainCall); |
619 | } |
620 | |
621 | /// A block function is essentially a free function with an |
622 | /// extra implicit argument. |
623 | const CGFunctionInfo & |
624 | CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, |
625 | const FunctionType *fnType) { |
626 | return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, |
627 | /*chainCall=*/false); |
628 | } |
629 | |
630 | const CGFunctionInfo & |
631 | CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, |
632 | const FunctionArgList ¶ms) { |
633 | auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); |
634 | auto argTypes = getArgTypesForDeclaration(Context, params); |
635 | |
636 | return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), |
637 | /*instanceMethod*/ false, /*chainCall*/ false, |
638 | argTypes, proto->getExtInfo(), paramInfos, |
639 | RequiredArgs::forPrototypePlus(proto, 1)); |
640 | } |
641 | |
642 | const CGFunctionInfo & |
643 | CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, |
644 | const CallArgList &args) { |
645 | // FIXME: Kill copy. |
646 | SmallVector<CanQualType, 16> argTypes; |
647 | for (const auto &Arg : args) |
648 | argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); |
649 | return arrangeLLVMFunctionInfo( |
650 | GetReturnType(resultType), /*instanceMethod=*/false, |
651 | /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), |
652 | /*paramInfos=*/ {}, RequiredArgs::All); |
653 | } |
654 | |
655 | const CGFunctionInfo & |
656 | CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, |
657 | const FunctionArgList &args) { |
658 | auto argTypes = getArgTypesForDeclaration(Context, args); |
659 | |
660 | return arrangeLLVMFunctionInfo( |
661 | GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, |
662 | argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); |
663 | } |
664 | |
665 | const CGFunctionInfo & |
666 | CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, |
667 | ArrayRef<CanQualType> argTypes) { |
668 | return arrangeLLVMFunctionInfo( |
669 | resultType, /*instanceMethod=*/false, /*chainCall=*/false, |
670 | argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); |
671 | } |
672 | |
673 | /// Arrange a call to a C++ method, passing the given arguments. |
674 | /// |
675 | /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It |
676 | /// does not count `this`. |
677 | const CGFunctionInfo & |
678 | CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, |
679 | const FunctionProtoType *proto, |
680 | RequiredArgs required, |
681 | unsigned numPrefixArgs) { |
682 | assert(numPrefixArgs + 1 <= args.size() && |
683 | "Emitting a call with less args than the required prefix?" ); |
684 | // Add one to account for `this`. It's a bit awkward here, but we don't count |
685 | // `this` in similar places elsewhere. |
686 | auto paramInfos = |
687 | getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); |
688 | |
689 | // FIXME: Kill copy. |
690 | auto argTypes = getArgTypesForCall(Context, args); |
691 | |
692 | FunctionType::ExtInfo info = proto->getExtInfo(); |
693 | return arrangeLLVMFunctionInfo( |
694 | GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, |
695 | /*chainCall=*/false, argTypes, info, paramInfos, required); |
696 | } |
697 | |
698 | const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { |
699 | return arrangeLLVMFunctionInfo( |
700 | getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, |
701 | None, FunctionType::ExtInfo(), {}, RequiredArgs::All); |
702 | } |
703 | |
704 | const CGFunctionInfo & |
705 | CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, |
706 | const CallArgList &args) { |
707 | assert(signature.arg_size() <= args.size()); |
708 | if (signature.arg_size() == args.size()) |
709 | return signature; |
710 | |
711 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
712 | auto sigParamInfos = signature.getExtParameterInfos(); |
713 | if (!sigParamInfos.empty()) { |
714 | paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); |
715 | paramInfos.resize(args.size()); |
716 | } |
717 | |
718 | auto argTypes = getArgTypesForCall(Context, args); |
719 | |
720 | assert(signature.getRequiredArgs().allowsOptionalArgs()); |
721 | return arrangeLLVMFunctionInfo(signature.getReturnType(), |
722 | signature.isInstanceMethod(), |
723 | signature.isChainCall(), |
724 | argTypes, |
725 | signature.getExtInfo(), |
726 | paramInfos, |
727 | signature.getRequiredArgs()); |
728 | } |
729 | |
730 | namespace clang { |
731 | namespace CodeGen { |
732 | void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); |
733 | } |
734 | } |
735 | |
736 | /// Arrange the argument and result information for an abstract value |
737 | /// of a given function type. This is the method which all of the |
738 | /// above functions ultimately defer to. |
739 | const CGFunctionInfo & |
740 | CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, |
741 | bool instanceMethod, |
742 | bool chainCall, |
743 | ArrayRef<CanQualType> argTypes, |
744 | FunctionType::ExtInfo info, |
745 | ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, |
746 | RequiredArgs required) { |
747 | assert(llvm::all_of(argTypes, |
748 | [](CanQualType T) { return T.isCanonicalAsParam(); })); |
749 | |
750 | // Lookup or create unique function info. |
751 | llvm::FoldingSetNodeID ID; |
752 | CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, |
753 | required, resultType, argTypes); |
754 | |
755 | void *insertPos = nullptr; |
756 | CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); |
757 | if (FI) |
758 | return *FI; |
759 | |
760 | unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); |
761 | |
762 | // Construct the function info. We co-allocate the ArgInfos. |
763 | FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, |
764 | paramInfos, resultType, argTypes, required); |
765 | FunctionInfos.InsertNode(FI, insertPos); |
766 | |
767 | bool inserted = FunctionsBeingProcessed.insert(FI).second; |
768 | (void)inserted; |
769 | assert(inserted && "Recursively being processed?" ); |
770 | |
771 | // Compute ABI information. |
772 | if (CC == llvm::CallingConv::SPIR_KERNEL) { |
773 | // Force target independent argument handling for the host visible |
774 | // kernel functions. |
775 | computeSPIRKernelABIInfo(CGM, *FI); |
776 | } else if (info.getCC() == CC_Swift) { |
777 | swiftcall::computeABIInfo(CGM, *FI); |
778 | } else { |
779 | getABIInfo().computeInfo(*FI); |
780 | } |
781 | |
782 | // Loop over all of the computed argument and return value info. If any of |
783 | // them are direct or extend without a specified coerce type, specify the |
784 | // default now. |
785 | ABIArgInfo &retInfo = FI->getReturnInfo(); |
786 | if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) |
787 | retInfo.setCoerceToType(ConvertType(FI->getReturnType())); |
788 | |
789 | for (auto &I : FI->arguments()) |
790 | if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) |
791 | I.info.setCoerceToType(ConvertType(I.type)); |
792 | |
793 | bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; |
794 | assert(erased && "Not in set?" ); |
795 | |
796 | return *FI; |
797 | } |
798 | |
799 | CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, |
800 | bool instanceMethod, |
801 | bool chainCall, |
802 | const FunctionType::ExtInfo &info, |
803 | ArrayRef<ExtParameterInfo> paramInfos, |
804 | CanQualType resultType, |
805 | ArrayRef<CanQualType> argTypes, |
806 | RequiredArgs required) { |
807 | assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); |
808 | assert(!required.allowsOptionalArgs() || |
809 | required.getNumRequiredArgs() <= argTypes.size()); |
810 | |
811 | void *buffer = |
812 | operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( |
813 | argTypes.size() + 1, paramInfos.size())); |
814 | |
815 | CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); |
816 | FI->CallingConvention = llvmCC; |
817 | FI->EffectiveCallingConvention = llvmCC; |
818 | FI->ASTCallingConvention = info.getCC(); |
819 | FI->InstanceMethod = instanceMethod; |
820 | FI->ChainCall = chainCall; |
821 | FI->CmseNSCall = info.getCmseNSCall(); |
822 | FI->NoReturn = info.getNoReturn(); |
823 | FI->ReturnsRetained = info.getProducesResult(); |
824 | FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); |
825 | FI->NoCfCheck = info.getNoCfCheck(); |
826 | FI->Required = required; |
827 | FI->HasRegParm = info.getHasRegParm(); |
828 | FI->RegParm = info.getRegParm(); |
829 | FI->ArgStruct = nullptr; |
830 | FI->ArgStructAlign = 0; |
831 | FI->NumArgs = argTypes.size(); |
832 | FI->HasExtParameterInfos = !paramInfos.empty(); |
833 | FI->getArgsBuffer()[0].type = resultType; |
834 | for (unsigned i = 0, e = argTypes.size(); i != e; ++i) |
835 | FI->getArgsBuffer()[i + 1].type = argTypes[i]; |
836 | for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) |
837 | FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; |
838 | return FI; |
839 | } |
840 | |
841 | /***/ |
842 | |
843 | namespace { |
844 | // ABIArgInfo::Expand implementation. |
845 | |
846 | // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. |
847 | struct TypeExpansion { |
848 | enum TypeExpansionKind { |
849 | // Elements of constant arrays are expanded recursively. |
850 | TEK_ConstantArray, |
851 | // Record fields are expanded recursively (but if record is a union, only |
852 | // the field with the largest size is expanded). |
853 | TEK_Record, |
854 | // For complex types, real and imaginary parts are expanded recursively. |
855 | TEK_Complex, |
856 | // All other types are not expandable. |
857 | TEK_None |
858 | }; |
859 | |
860 | const TypeExpansionKind Kind; |
861 | |
862 | TypeExpansion(TypeExpansionKind K) : Kind(K) {} |
863 | virtual ~TypeExpansion() {} |
864 | }; |
865 | |
866 | struct ConstantArrayExpansion : TypeExpansion { |
867 | QualType EltTy; |
868 | uint64_t NumElts; |
869 | |
870 | ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) |
871 | : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} |
872 | static bool classof(const TypeExpansion *TE) { |
873 | return TE->Kind == TEK_ConstantArray; |
874 | } |
875 | }; |
876 | |
877 | struct RecordExpansion : TypeExpansion { |
878 | SmallVector<const CXXBaseSpecifier *, 1> Bases; |
879 | |
880 | SmallVector<const FieldDecl *, 1> Fields; |
881 | |
882 | RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, |
883 | SmallVector<const FieldDecl *, 1> &&Fields) |
884 | : TypeExpansion(TEK_Record), Bases(std::move(Bases)), |
885 | Fields(std::move(Fields)) {} |
886 | static bool classof(const TypeExpansion *TE) { |
887 | return TE->Kind == TEK_Record; |
888 | } |
889 | }; |
890 | |
891 | struct ComplexExpansion : TypeExpansion { |
892 | QualType EltTy; |
893 | |
894 | ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} |
895 | static bool classof(const TypeExpansion *TE) { |
896 | return TE->Kind == TEK_Complex; |
897 | } |
898 | }; |
899 | |
900 | struct NoExpansion : TypeExpansion { |
901 | NoExpansion() : TypeExpansion(TEK_None) {} |
902 | static bool classof(const TypeExpansion *TE) { |
903 | return TE->Kind == TEK_None; |
904 | } |
905 | }; |
906 | } // namespace |
907 | |
908 | static std::unique_ptr<TypeExpansion> |
909 | getTypeExpansion(QualType Ty, const ASTContext &Context) { |
910 | if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { |
911 | return std::make_unique<ConstantArrayExpansion>( |
912 | AT->getElementType(), AT->getSize().getZExtValue()); |
913 | } |
914 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
915 | SmallVector<const CXXBaseSpecifier *, 1> Bases; |
916 | SmallVector<const FieldDecl *, 1> Fields; |
917 | const RecordDecl *RD = RT->getDecl(); |
918 | assert(!RD->hasFlexibleArrayMember() && |
919 | "Cannot expand structure with flexible array." ); |
920 | if (RD->isUnion()) { |
921 | // Unions can be here only in degenerative cases - all the fields are same |
922 | // after flattening. Thus we have to use the "largest" field. |
923 | const FieldDecl *LargestFD = nullptr; |
924 | CharUnits UnionSize = CharUnits::Zero(); |
925 | |
926 | for (const auto *FD : RD->fields()) { |
927 | if (FD->isZeroLengthBitField(Context)) |
928 | continue; |
929 | assert(!FD->isBitField() && |
930 | "Cannot expand structure with bit-field members." ); |
931 | CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); |
932 | if (UnionSize < FieldSize) { |
933 | UnionSize = FieldSize; |
934 | LargestFD = FD; |
935 | } |
936 | } |
937 | if (LargestFD) |
938 | Fields.push_back(LargestFD); |
939 | } else { |
940 | if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
941 | assert(!CXXRD->isDynamicClass() && |
942 | "cannot expand vtable pointers in dynamic classes" ); |
943 | for (const CXXBaseSpecifier &BS : CXXRD->bases()) |
944 | Bases.push_back(&BS); |
945 | } |
946 | |
947 | for (const auto *FD : RD->fields()) { |
948 | if (FD->isZeroLengthBitField(Context)) |
949 | continue; |
950 | assert(!FD->isBitField() && |
951 | "Cannot expand structure with bit-field members." ); |
952 | Fields.push_back(FD); |
953 | } |
954 | } |
955 | return std::make_unique<RecordExpansion>(std::move(Bases), |
956 | std::move(Fields)); |
957 | } |
958 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { |
959 | return std::make_unique<ComplexExpansion>(CT->getElementType()); |
960 | } |
961 | return std::make_unique<NoExpansion>(); |
962 | } |
963 | |
964 | static int getExpansionSize(QualType Ty, const ASTContext &Context) { |
965 | auto Exp = getTypeExpansion(Ty, Context); |
966 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { |
967 | return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); |
968 | } |
969 | if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { |
970 | int Res = 0; |
971 | for (auto BS : RExp->Bases) |
972 | Res += getExpansionSize(BS->getType(), Context); |
973 | for (auto FD : RExp->Fields) |
974 | Res += getExpansionSize(FD->getType(), Context); |
975 | return Res; |
976 | } |
977 | if (isa<ComplexExpansion>(Exp.get())) |
978 | return 2; |
979 | assert(isa<NoExpansion>(Exp.get())); |
980 | return 1; |
981 | } |
982 | |
983 | void |
984 | CodeGenTypes::getExpandedTypes(QualType Ty, |
985 | SmallVectorImpl<llvm::Type *>::iterator &TI) { |
986 | auto Exp = getTypeExpansion(Ty, Context); |
987 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { |
988 | for (int i = 0, n = CAExp->NumElts; i < n; i++) { |
989 | getExpandedTypes(CAExp->EltTy, TI); |
990 | } |
991 | } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { |
992 | for (auto BS : RExp->Bases) |
993 | getExpandedTypes(BS->getType(), TI); |
994 | for (auto FD : RExp->Fields) |
995 | getExpandedTypes(FD->getType(), TI); |
996 | } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { |
997 | llvm::Type *EltTy = ConvertType(CExp->EltTy); |
998 | *TI++ = EltTy; |
999 | *TI++ = EltTy; |
1000 | } else { |
1001 | assert(isa<NoExpansion>(Exp.get())); |
1002 | *TI++ = ConvertType(Ty); |
1003 | } |
1004 | } |
1005 | |
1006 | static void forConstantArrayExpansion(CodeGenFunction &CGF, |
1007 | ConstantArrayExpansion *CAE, |
1008 | Address BaseAddr, |
1009 | llvm::function_ref<void(Address)> Fn) { |
1010 | CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); |
1011 | CharUnits EltAlign = |
1012 | BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); |
1013 | |
1014 | for (int i = 0, n = CAE->NumElts; i < n; i++) { |
1015 | llvm::Value *EltAddr = |
1016 | CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); |
1017 | Fn(Address(EltAddr, EltAlign)); |
1018 | } |
1019 | } |
1020 | |
1021 | void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, |
1022 | llvm::Function::arg_iterator &AI) { |
1023 | assert(LV.isSimple() && |
1024 | "Unexpected non-simple lvalue during struct expansion." ); |
1025 | |
1026 | auto Exp = getTypeExpansion(Ty, getContext()); |
1027 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { |
1028 | forConstantArrayExpansion( |
1029 | *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { |
1030 | LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); |
1031 | ExpandTypeFromArgs(CAExp->EltTy, LV, AI); |
1032 | }); |
1033 | } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { |
1034 | Address This = LV.getAddress(*this); |
1035 | for (const CXXBaseSpecifier *BS : RExp->Bases) { |
1036 | // Perform a single step derived-to-base conversion. |
1037 | Address Base = |
1038 | GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, |
1039 | /*NullCheckValue=*/false, SourceLocation()); |
1040 | LValue SubLV = MakeAddrLValue(Base, BS->getType()); |
1041 | |
1042 | // Recurse onto bases. |
1043 | ExpandTypeFromArgs(BS->getType(), SubLV, AI); |
1044 | } |
1045 | for (auto FD : RExp->Fields) { |
1046 | // FIXME: What are the right qualifiers here? |
1047 | LValue SubLV = EmitLValueForFieldInitialization(LV, FD); |
1048 | ExpandTypeFromArgs(FD->getType(), SubLV, AI); |
1049 | } |
1050 | } else if (isa<ComplexExpansion>(Exp.get())) { |
1051 | auto realValue = &*AI++; |
1052 | auto imagValue = &*AI++; |
1053 | EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); |
1054 | } else { |
1055 | // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a |
1056 | // primitive store. |
1057 | assert(isa<NoExpansion>(Exp.get())); |
1058 | if (LV.isBitField()) |
1059 | EmitStoreThroughLValue(RValue::get(&*AI++), LV); |
1060 | else |
1061 | EmitStoreOfScalar(&*AI++, LV); |
1062 | } |
1063 | } |
1064 | |
1065 | void CodeGenFunction::ExpandTypeToArgs( |
1066 | QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, |
1067 | SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { |
1068 | auto Exp = getTypeExpansion(Ty, getContext()); |
1069 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { |
1070 | Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) |
1071 | : Arg.getKnownRValue().getAggregateAddress(); |
1072 | forConstantArrayExpansion( |
1073 | *this, CAExp, Addr, [&](Address EltAddr) { |
1074 | CallArg EltArg = CallArg( |
1075 | convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), |
1076 | CAExp->EltTy); |
1077 | ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, |
1078 | IRCallArgPos); |
1079 | }); |
1080 | } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { |
1081 | Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) |
1082 | : Arg.getKnownRValue().getAggregateAddress(); |
1083 | for (const CXXBaseSpecifier *BS : RExp->Bases) { |
1084 | // Perform a single step derived-to-base conversion. |
1085 | Address Base = |
1086 | GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, |
1087 | /*NullCheckValue=*/false, SourceLocation()); |
1088 | CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); |
1089 | |
1090 | // Recurse onto bases. |
1091 | ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, |
1092 | IRCallArgPos); |
1093 | } |
1094 | |
1095 | LValue LV = MakeAddrLValue(This, Ty); |
1096 | for (auto FD : RExp->Fields) { |
1097 | CallArg FldArg = |
1098 | CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); |
1099 | ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, |
1100 | IRCallArgPos); |
1101 | } |
1102 | } else if (isa<ComplexExpansion>(Exp.get())) { |
1103 | ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); |
1104 | IRCallArgs[IRCallArgPos++] = CV.first; |
1105 | IRCallArgs[IRCallArgPos++] = CV.second; |
1106 | } else { |
1107 | assert(isa<NoExpansion>(Exp.get())); |
1108 | auto RV = Arg.getKnownRValue(); |
1109 | assert(RV.isScalar() && |
1110 | "Unexpected non-scalar rvalue during struct expansion." ); |
1111 | |
1112 | // Insert a bitcast as needed. |
1113 | llvm::Value *V = RV.getScalarVal(); |
1114 | if (IRCallArgPos < IRFuncTy->getNumParams() && |
1115 | V->getType() != IRFuncTy->getParamType(IRCallArgPos)) |
1116 | V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); |
1117 | |
1118 | IRCallArgs[IRCallArgPos++] = V; |
1119 | } |
1120 | } |
1121 | |
1122 | /// Create a temporary allocation for the purposes of coercion. |
1123 | static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, |
1124 | CharUnits MinAlign, |
1125 | const Twine &Name = "tmp" ) { |
1126 | // Don't use an alignment that's worse than what LLVM would prefer. |
1127 | auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); |
1128 | CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); |
1129 | |
1130 | return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce" ); |
1131 | } |
1132 | |
1133 | /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are |
1134 | /// accessing some number of bytes out of it, try to gep into the struct to get |
1135 | /// at its inner goodness. Dive as deep as possible without entering an element |
1136 | /// with an in-memory size smaller than DstSize. |
1137 | static Address |
1138 | EnterStructPointerForCoercedAccess(Address SrcPtr, |
1139 | llvm::StructType *SrcSTy, |
1140 | uint64_t DstSize, CodeGenFunction &CGF) { |
1141 | // We can't dive into a zero-element struct. |
1142 | if (SrcSTy->getNumElements() == 0) return SrcPtr; |
1143 | |
1144 | llvm::Type *FirstElt = SrcSTy->getElementType(0); |
1145 | |
1146 | // If the first elt is at least as large as what we're looking for, or if the |
1147 | // first element is the same size as the whole struct, we can enter it. The |
1148 | // comparison must be made on the store size and not the alloca size. Using |
1149 | // the alloca size may overstate the size of the load. |
1150 | uint64_t FirstEltSize = |
1151 | CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); |
1152 | if (FirstEltSize < DstSize && |
1153 | FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) |
1154 | return SrcPtr; |
1155 | |
1156 | // GEP into the first element. |
1157 | SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive" ); |
1158 | |
1159 | // If the first element is a struct, recurse. |
1160 | llvm::Type *SrcTy = SrcPtr.getElementType(); |
1161 | if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) |
1162 | return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); |
1163 | |
1164 | return SrcPtr; |
1165 | } |
1166 | |
1167 | /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both |
1168 | /// are either integers or pointers. This does a truncation of the value if it |
1169 | /// is too large or a zero extension if it is too small. |
1170 | /// |
1171 | /// This behaves as if the value were coerced through memory, so on big-endian |
1172 | /// targets the high bits are preserved in a truncation, while little-endian |
1173 | /// targets preserve the low bits. |
1174 | static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, |
1175 | llvm::Type *Ty, |
1176 | CodeGenFunction &CGF) { |
1177 | if (Val->getType() == Ty) |
1178 | return Val; |
1179 | |
1180 | if (isa<llvm::PointerType>(Val->getType())) { |
1181 | // If this is Pointer->Pointer avoid conversion to and from int. |
1182 | if (isa<llvm::PointerType>(Ty)) |
1183 | return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val" ); |
1184 | |
1185 | // Convert the pointer to an integer so we can play with its width. |
1186 | Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi" ); |
1187 | } |
1188 | |
1189 | llvm::Type *DestIntTy = Ty; |
1190 | if (isa<llvm::PointerType>(DestIntTy)) |
1191 | DestIntTy = CGF.IntPtrTy; |
1192 | |
1193 | if (Val->getType() != DestIntTy) { |
1194 | const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); |
1195 | if (DL.isBigEndian()) { |
1196 | // Preserve the high bits on big-endian targets. |
1197 | // That is what memory coercion does. |
1198 | uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); |
1199 | uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); |
1200 | |
1201 | if (SrcSize > DstSize) { |
1202 | Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits" ); |
1203 | Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii" ); |
1204 | } else { |
1205 | Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii" ); |
1206 | Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits" ); |
1207 | } |
1208 | } else { |
1209 | // Little-endian targets preserve the low bits. No shifts required. |
1210 | Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii" ); |
1211 | } |
1212 | } |
1213 | |
1214 | if (isa<llvm::PointerType>(Ty)) |
1215 | Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip" ); |
1216 | return Val; |
1217 | } |
1218 | |
1219 | |
1220 | |
1221 | /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as |
1222 | /// a pointer to an object of type \arg Ty, known to be aligned to |
1223 | /// \arg SrcAlign bytes. |
1224 | /// |
1225 | /// This safely handles the case when the src type is smaller than the |
1226 | /// destination type; in this situation the values of bits which not |
1227 | /// present in the src are undefined. |
1228 | static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, |
1229 | CodeGenFunction &CGF) { |
1230 | llvm::Type *SrcTy = Src.getElementType(); |
1231 | |
1232 | // If SrcTy and Ty are the same, just do a load. |
1233 | if (SrcTy == Ty) |
1234 | return CGF.Builder.CreateLoad(Src); |
1235 | |
1236 | llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); |
1237 | |
1238 | if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { |
1239 | Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, |
1240 | DstSize.getFixedSize(), CGF); |
1241 | SrcTy = Src.getElementType(); |
1242 | } |
1243 | |
1244 | llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); |
1245 | |
1246 | // If the source and destination are integer or pointer types, just do an |
1247 | // extension or truncation to the desired type. |
1248 | if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && |
1249 | (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { |
1250 | llvm::Value *Load = CGF.Builder.CreateLoad(Src); |
1251 | return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); |
1252 | } |
1253 | |
1254 | // If load is legal, just bitcast the src pointer. |
1255 | if (!SrcSize.isScalable() && !DstSize.isScalable() && |
1256 | SrcSize.getFixedSize() >= DstSize.getFixedSize()) { |
1257 | // Generally SrcSize is never greater than DstSize, since this means we are |
1258 | // losing bits. However, this can happen in cases where the structure has |
1259 | // additional padding, for example due to a user specified alignment. |
1260 | // |
1261 | // FIXME: Assert that we aren't truncating non-padding bits when have access |
1262 | // to that information. |
1263 | Src = CGF.Builder.CreateBitCast(Src, |
1264 | Ty->getPointerTo(Src.getAddressSpace())); |
1265 | return CGF.Builder.CreateLoad(Src); |
1266 | } |
1267 | |
1268 | // If coercing a fixed vector to a scalable vector for ABI compatibility, and |
1269 | // the types match, use the llvm.experimental.vector.insert intrinsic to |
1270 | // perform the conversion. |
1271 | if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) { |
1272 | if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { |
1273 | if (ScalableDst->getElementType() == FixedSrc->getElementType()) { |
1274 | auto *Load = CGF.Builder.CreateLoad(Src); |
1275 | auto *UndefVec = llvm::UndefValue::get(ScalableDst); |
1276 | auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); |
1277 | return CGF.Builder.CreateInsertVector(ScalableDst, UndefVec, Load, Zero, |
1278 | "castScalableSve" ); |
1279 | } |
1280 | } |
1281 | } |
1282 | |
1283 | // Otherwise do coercion through memory. This is stupid, but simple. |
1284 | Address Tmp = |
1285 | CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); |
1286 | CGF.Builder.CreateMemCpy( |
1287 | Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), |
1288 | Src.getAlignment().getAsAlign(), |
1289 | llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize())); |
1290 | return CGF.Builder.CreateLoad(Tmp); |
1291 | } |
1292 | |
1293 | // Function to store a first-class aggregate into memory. We prefer to |
1294 | // store the elements rather than the aggregate to be more friendly to |
1295 | // fast-isel. |
1296 | // FIXME: Do we need to recurse here? |
1297 | void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, |
1298 | bool DestIsVolatile) { |
1299 | // Prefer scalar stores to first-class aggregate stores. |
1300 | if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) { |
1301 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
1302 | Address EltPtr = Builder.CreateStructGEP(Dest, i); |
1303 | llvm::Value *Elt = Builder.CreateExtractValue(Val, i); |
1304 | Builder.CreateStore(Elt, EltPtr, DestIsVolatile); |
1305 | } |
1306 | } else { |
1307 | Builder.CreateStore(Val, Dest, DestIsVolatile); |
1308 | } |
1309 | } |
1310 | |
1311 | /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, |
1312 | /// where the source and destination may have different types. The |
1313 | /// destination is known to be aligned to \arg DstAlign bytes. |
1314 | /// |
1315 | /// This safely handles the case when the src type is larger than the |
1316 | /// destination type; the upper bits of the src will be lost. |
1317 | static void CreateCoercedStore(llvm::Value *Src, |
1318 | Address Dst, |
1319 | bool DstIsVolatile, |
1320 | CodeGenFunction &CGF) { |
1321 | llvm::Type *SrcTy = Src->getType(); |
1322 | llvm::Type *DstTy = Dst.getElementType(); |
1323 | if (SrcTy == DstTy) { |
1324 | CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); |
1325 | return; |
1326 | } |
1327 | |
1328 | llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); |
1329 | |
1330 | if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { |
1331 | Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, |
1332 | SrcSize.getFixedSize(), CGF); |
1333 | DstTy = Dst.getElementType(); |
1334 | } |
1335 | |
1336 | llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); |
1337 | llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); |
1338 | if (SrcPtrTy && DstPtrTy && |
1339 | SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { |
1340 | Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); |
1341 | CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); |
1342 | return; |
1343 | } |
1344 | |
1345 | // If the source and destination are integer or pointer types, just do an |
1346 | // extension or truncation to the desired type. |
1347 | if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && |
1348 | (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { |
1349 | Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); |
1350 | CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); |
1351 | return; |
1352 | } |
1353 | |
1354 | llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); |
1355 | |
1356 | // If store is legal, just bitcast the src pointer. |
1357 | if (isa<llvm::ScalableVectorType>(SrcTy) || |
1358 | isa<llvm::ScalableVectorType>(DstTy) || |
1359 | SrcSize.getFixedSize() <= DstSize.getFixedSize()) { |
1360 | Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); |
1361 | CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); |
1362 | } else { |
1363 | // Otherwise do coercion through memory. This is stupid, but |
1364 | // simple. |
1365 | |
1366 | // Generally SrcSize is never greater than DstSize, since this means we are |
1367 | // losing bits. However, this can happen in cases where the structure has |
1368 | // additional padding, for example due to a user specified alignment. |
1369 | // |
1370 | // FIXME: Assert that we aren't truncating non-padding bits when have access |
1371 | // to that information. |
1372 | Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); |
1373 | CGF.Builder.CreateStore(Src, Tmp); |
1374 | CGF.Builder.CreateMemCpy( |
1375 | Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), |
1376 | Tmp.getAlignment().getAsAlign(), |
1377 | llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize())); |
1378 | } |
1379 | } |
1380 | |
1381 | static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, |
1382 | const ABIArgInfo &info) { |
1383 | if (unsigned offset = info.getDirectOffset()) { |
1384 | addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); |
1385 | addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, |
1386 | CharUnits::fromQuantity(offset)); |
1387 | addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); |
1388 | } |
1389 | return addr; |
1390 | } |
1391 | |
1392 | namespace { |
1393 | |
1394 | /// Encapsulates information about the way function arguments from |
1395 | /// CGFunctionInfo should be passed to actual LLVM IR function. |
1396 | class ClangToLLVMArgMapping { |
1397 | static const unsigned InvalidIndex = ~0U; |
1398 | unsigned InallocaArgNo; |
1399 | unsigned SRetArgNo; |
1400 | unsigned TotalIRArgs; |
1401 | |
1402 | /// Arguments of LLVM IR function corresponding to single Clang argument. |
1403 | struct IRArgs { |
1404 | unsigned PaddingArgIndex; |
1405 | // Argument is expanded to IR arguments at positions |
1406 | // [FirstArgIndex, FirstArgIndex + NumberOfArgs). |
1407 | unsigned FirstArgIndex; |
1408 | unsigned NumberOfArgs; |
1409 | |
1410 | IRArgs() |
1411 | : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), |
1412 | NumberOfArgs(0) {} |
1413 | }; |
1414 | |
1415 | SmallVector<IRArgs, 8> ArgInfo; |
1416 | |
1417 | public: |
1418 | ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, |
1419 | bool OnlyRequiredArgs = false) |
1420 | : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), |
1421 | ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { |
1422 | construct(Context, FI, OnlyRequiredArgs); |
1423 | } |
1424 | |
1425 | bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } |
1426 | unsigned getInallocaArgNo() const { |
1427 | assert(hasInallocaArg()); |
1428 | return InallocaArgNo; |
1429 | } |
1430 | |
1431 | bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } |
1432 | unsigned getSRetArgNo() const { |
1433 | assert(hasSRetArg()); |
1434 | return SRetArgNo; |
1435 | } |
1436 | |
1437 | unsigned totalIRArgs() const { return TotalIRArgs; } |
1438 | |
1439 | bool hasPaddingArg(unsigned ArgNo) const { |
1440 | assert(ArgNo < ArgInfo.size()); |
1441 | return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; |
1442 | } |
1443 | unsigned getPaddingArgNo(unsigned ArgNo) const { |
1444 | assert(hasPaddingArg(ArgNo)); |
1445 | return ArgInfo[ArgNo].PaddingArgIndex; |
1446 | } |
1447 | |
1448 | /// Returns index of first IR argument corresponding to ArgNo, and their |
1449 | /// quantity. |
1450 | std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { |
1451 | assert(ArgNo < ArgInfo.size()); |
1452 | return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, |
1453 | ArgInfo[ArgNo].NumberOfArgs); |
1454 | } |
1455 | |
1456 | private: |
1457 | void construct(const ASTContext &Context, const CGFunctionInfo &FI, |
1458 | bool OnlyRequiredArgs); |
1459 | }; |
1460 | |
1461 | void ClangToLLVMArgMapping::construct(const ASTContext &Context, |
1462 | const CGFunctionInfo &FI, |
1463 | bool OnlyRequiredArgs) { |
1464 | unsigned IRArgNo = 0; |
1465 | bool SwapThisWithSRet = false; |
1466 | const ABIArgInfo &RetAI = FI.getReturnInfo(); |
1467 | |
1468 | if (RetAI.getKind() == ABIArgInfo::Indirect) { |
1469 | SwapThisWithSRet = RetAI.isSRetAfterThis(); |
1470 | SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; |
1471 | } |
1472 | |
1473 | unsigned ArgNo = 0; |
1474 | unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); |
1475 | for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; |
1476 | ++I, ++ArgNo) { |
1477 | assert(I != FI.arg_end()); |
1478 | QualType ArgType = I->type; |
1479 | const ABIArgInfo &AI = I->info; |
1480 | // Collect data about IR arguments corresponding to Clang argument ArgNo. |
1481 | auto &IRArgs = ArgInfo[ArgNo]; |
1482 | |
1483 | if (AI.getPaddingType()) |
1484 | IRArgs.PaddingArgIndex = IRArgNo++; |
1485 | |
1486 | switch (AI.getKind()) { |
1487 | case ABIArgInfo::Extend: |
1488 | case ABIArgInfo::Direct: { |
1489 | // FIXME: handle sseregparm someday... |
1490 | llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); |
1491 | if (AI.isDirect() && AI.getCanBeFlattened() && STy) { |
1492 | IRArgs.NumberOfArgs = STy->getNumElements(); |
1493 | } else { |
1494 | IRArgs.NumberOfArgs = 1; |
1495 | } |
1496 | break; |
1497 | } |
1498 | case ABIArgInfo::Indirect: |
1499 | case ABIArgInfo::IndirectAliased: |
1500 | IRArgs.NumberOfArgs = 1; |
1501 | break; |
1502 | case ABIArgInfo::Ignore: |
1503 | case ABIArgInfo::InAlloca: |
1504 | // ignore and inalloca doesn't have matching LLVM parameters. |
1505 | IRArgs.NumberOfArgs = 0; |
1506 | break; |
1507 | case ABIArgInfo::CoerceAndExpand: |
1508 | IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); |
1509 | break; |
1510 | case ABIArgInfo::Expand: |
1511 | IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); |
1512 | break; |
1513 | } |
1514 | |
1515 | if (IRArgs.NumberOfArgs > 0) { |
1516 | IRArgs.FirstArgIndex = IRArgNo; |
1517 | IRArgNo += IRArgs.NumberOfArgs; |
1518 | } |
1519 | |
1520 | // Skip over the sret parameter when it comes second. We already handled it |
1521 | // above. |
1522 | if (IRArgNo == 1 && SwapThisWithSRet) |
1523 | IRArgNo++; |
1524 | } |
1525 | assert(ArgNo == ArgInfo.size()); |
1526 | |
1527 | if (FI.usesInAlloca()) |
1528 | InallocaArgNo = IRArgNo++; |
1529 | |
1530 | TotalIRArgs = IRArgNo; |
1531 | } |
1532 | } // namespace |
1533 | |
1534 | /***/ |
1535 | |
1536 | bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { |
1537 | const auto &RI = FI.getReturnInfo(); |
1538 | return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); |
1539 | } |
1540 | |
1541 | bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { |
1542 | return ReturnTypeUsesSRet(FI) && |
1543 | getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); |
1544 | } |
1545 | |
1546 | bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { |
1547 | if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { |
1548 | switch (BT->getKind()) { |
1549 | default: |
1550 | return false; |
1551 | case BuiltinType::Float: |
1552 | return getTarget().useObjCFPRetForRealType(TargetInfo::Float); |
1553 | case BuiltinType::Double: |
1554 | return getTarget().useObjCFPRetForRealType(TargetInfo::Double); |
1555 | case BuiltinType::LongDouble: |
1556 | return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); |
1557 | } |
1558 | } |
1559 | |
1560 | return false; |
1561 | } |
1562 | |
1563 | bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { |
1564 | if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { |
1565 | if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { |
1566 | if (BT->getKind() == BuiltinType::LongDouble) |
1567 | return getTarget().useObjCFP2RetForComplexLongDouble(); |
1568 | } |
1569 | } |
1570 | |
1571 | return false; |
1572 | } |
1573 | |
1574 | llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { |
1575 | const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); |
1576 | return GetFunctionType(FI); |
1577 | } |
1578 | |
1579 | llvm::FunctionType * |
1580 | CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { |
1581 | |
1582 | bool Inserted = FunctionsBeingProcessed.insert(&FI).second; |
1583 | (void)Inserted; |
1584 | assert(Inserted && "Recursively being processed?" ); |
1585 | |
1586 | llvm::Type *resultType = nullptr; |
1587 | const ABIArgInfo &retAI = FI.getReturnInfo(); |
1588 | switch (retAI.getKind()) { |
1589 | case ABIArgInfo::Expand: |
1590 | case ABIArgInfo::IndirectAliased: |
1591 | llvm_unreachable("Invalid ABI kind for return argument" ); |
1592 | |
1593 | case ABIArgInfo::Extend: |
1594 | case ABIArgInfo::Direct: |
1595 | resultType = retAI.getCoerceToType(); |
1596 | break; |
1597 | |
1598 | case ABIArgInfo::InAlloca: |
1599 | if (retAI.getInAllocaSRet()) { |
1600 | // sret things on win32 aren't void, they return the sret pointer. |
1601 | QualType ret = FI.getReturnType(); |
1602 | llvm::Type *ty = ConvertType(ret); |
1603 | unsigned addressSpace = Context.getTargetAddressSpace(ret); |
1604 | resultType = llvm::PointerType::get(ty, addressSpace); |
1605 | } else { |
1606 | resultType = llvm::Type::getVoidTy(getLLVMContext()); |
1607 | } |
1608 | break; |
1609 | |
1610 | case ABIArgInfo::Indirect: |
1611 | case ABIArgInfo::Ignore: |
1612 | resultType = llvm::Type::getVoidTy(getLLVMContext()); |
1613 | break; |
1614 | |
1615 | case ABIArgInfo::CoerceAndExpand: |
1616 | resultType = retAI.getUnpaddedCoerceAndExpandType(); |
1617 | break; |
1618 | } |
1619 | |
1620 | ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); |
1621 | SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); |
1622 | |
1623 | // Add type for sret argument. |
1624 | if (IRFunctionArgs.hasSRetArg()) { |
1625 | QualType Ret = FI.getReturnType(); |
1626 | llvm::Type *Ty = ConvertType(Ret); |
1627 | unsigned AddressSpace = Context.getTargetAddressSpace(Ret); |
1628 | ArgTypes[IRFunctionArgs.getSRetArgNo()] = |
1629 | llvm::PointerType::get(Ty, AddressSpace); |
1630 | } |
1631 | |
1632 | // Add type for inalloca argument. |
1633 | if (IRFunctionArgs.hasInallocaArg()) { |
1634 | auto ArgStruct = FI.getArgStruct(); |
1635 | assert(ArgStruct); |
1636 | ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); |
1637 | } |
1638 | |
1639 | // Add in all of the required arguments. |
1640 | unsigned ArgNo = 0; |
1641 | CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), |
1642 | ie = it + FI.getNumRequiredArgs(); |
1643 | for (; it != ie; ++it, ++ArgNo) { |
1644 | const ABIArgInfo &ArgInfo = it->info; |
1645 | |
1646 | // Insert a padding type to ensure proper alignment. |
1647 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) |
1648 | ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = |
1649 | ArgInfo.getPaddingType(); |
1650 | |
1651 | unsigned FirstIRArg, NumIRArgs; |
1652 | std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
1653 | |
1654 | switch (ArgInfo.getKind()) { |
1655 | case ABIArgInfo::Ignore: |
1656 | case ABIArgInfo::InAlloca: |
1657 | assert(NumIRArgs == 0); |
1658 | break; |
1659 | |
1660 | case ABIArgInfo::Indirect: { |
1661 | assert(NumIRArgs == 1); |
1662 | // indirect arguments are always on the stack, which is alloca addr space. |
1663 | llvm::Type *LTy = ConvertTypeForMem(it->type); |
1664 | ArgTypes[FirstIRArg] = LTy->getPointerTo( |
1665 | CGM.getDataLayout().getAllocaAddrSpace()); |
1666 | break; |
1667 | } |
1668 | case ABIArgInfo::IndirectAliased: { |
1669 | assert(NumIRArgs == 1); |
1670 | llvm::Type *LTy = ConvertTypeForMem(it->type); |
1671 | ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace()); |
1672 | break; |
1673 | } |
1674 | case ABIArgInfo::Extend: |
1675 | case ABIArgInfo::Direct: { |
1676 | // Fast-isel and the optimizer generally like scalar values better than |
1677 | // FCAs, so we flatten them if this is safe to do for this argument. |
1678 | llvm::Type *argType = ArgInfo.getCoerceToType(); |
1679 | llvm::StructType *st = dyn_cast<llvm::StructType>(argType); |
1680 | if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { |
1681 | assert(NumIRArgs == st->getNumElements()); |
1682 | for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) |
1683 | ArgTypes[FirstIRArg + i] = st->getElementType(i); |
1684 | } else { |
1685 | assert(NumIRArgs == 1); |
1686 | ArgTypes[FirstIRArg] = argType; |
1687 | } |
1688 | break; |
1689 | } |
1690 | |
1691 | case ABIArgInfo::CoerceAndExpand: { |
1692 | auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; |
1693 | for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { |
1694 | *ArgTypesIter++ = EltTy; |
1695 | } |
1696 | assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); |
1697 | break; |
1698 | } |
1699 | |
1700 | case ABIArgInfo::Expand: |
1701 | auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; |
1702 | getExpandedTypes(it->type, ArgTypesIter); |
1703 | assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); |
1704 | break; |
1705 | } |
1706 | } |
1707 | |
1708 | bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; |
1709 | assert(Erased && "Not in set?" ); |
1710 | |
1711 | return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); |
1712 | } |
1713 | |
1714 | llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { |
1715 | const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); |
1716 | const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); |
1717 | |
1718 | if (!isFuncTypeConvertible(FPT)) |
1719 | return llvm::StructType::get(getLLVMContext()); |
1720 | |
1721 | return GetFunctionType(GD); |
1722 | } |
1723 | |
1724 | static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, |
1725 | llvm::AttrBuilder &FuncAttrs, |
1726 | const FunctionProtoType *FPT) { |
1727 | if (!FPT) |
1728 | return; |
1729 | |
1730 | if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && |
1731 | FPT->isNothrow()) |
1732 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
1733 | } |
1734 | |
1735 | bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context, |
1736 | QualType ReturnType) { |
1737 | // We can't just discard the return value for a record type with a |
1738 | // complex destructor or a non-trivially copyable type. |
1739 | if (const RecordType *RT = |
1740 | ReturnType.getCanonicalType()->getAs<RecordType>()) { |
1741 | if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) |
1742 | return ClassDecl->hasTrivialDestructor(); |
1743 | } |
1744 | return ReturnType.isTriviallyCopyableType(Context); |
1745 | } |
1746 | |
1747 | void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, |
1748 | bool HasOptnone, |
1749 | bool AttrOnCallSite, |
1750 | llvm::AttrBuilder &FuncAttrs) { |
1751 | // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. |
1752 | if (!HasOptnone) { |
1753 | if (CodeGenOpts.OptimizeSize) |
1754 | FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); |
1755 | if (CodeGenOpts.OptimizeSize == 2) |
1756 | FuncAttrs.addAttribute(llvm::Attribute::MinSize); |
1757 | } |
1758 | |
1759 | if (CodeGenOpts.DisableRedZone) |
1760 | FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); |
1761 | if (CodeGenOpts.IndirectTlsSegRefs) |
1762 | FuncAttrs.addAttribute("indirect-tls-seg-refs" ); |
1763 | if (CodeGenOpts.NoImplicitFloat) |
1764 | FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); |
1765 | |
1766 | if (AttrOnCallSite) { |
1767 | // Attributes that should go on the call site only. |
1768 | if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name)) |
1769 | FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); |
1770 | if (!CodeGenOpts.TrapFuncName.empty()) |
1771 | FuncAttrs.addAttribute("trap-func-name" , CodeGenOpts.TrapFuncName); |
1772 | } else { |
1773 | StringRef FpKind; |
1774 | switch (CodeGenOpts.getFramePointer()) { |
1775 | case CodeGenOptions::FramePointerKind::None: |
1776 | FpKind = "none" ; |
1777 | break; |
1778 | case CodeGenOptions::FramePointerKind::NonLeaf: |
1779 | FpKind = "non-leaf" ; |
1780 | break; |
1781 | case CodeGenOptions::FramePointerKind::All: |
1782 | FpKind = "all" ; |
1783 | break; |
1784 | } |
1785 | FuncAttrs.addAttribute("frame-pointer" , FpKind); |
1786 | |
1787 | if (CodeGenOpts.LessPreciseFPMAD) |
1788 | FuncAttrs.addAttribute("less-precise-fpmad" , "true" ); |
1789 | |
1790 | if (CodeGenOpts.NullPointerIsValid) |
1791 | FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); |
1792 | |
1793 | if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE()) |
1794 | FuncAttrs.addAttribute("denormal-fp-math" , |
1795 | CodeGenOpts.FPDenormalMode.str()); |
1796 | if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) { |
1797 | FuncAttrs.addAttribute( |
1798 | "denormal-fp-math-f32" , |
1799 | CodeGenOpts.FP32DenormalMode.str()); |
1800 | } |
1801 | |
1802 | if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore) |
1803 | FuncAttrs.addAttribute("no-trapping-math" , "true" ); |
1804 | |
1805 | // Strict (compliant) code is the default, so only add this attribute to |
1806 | // indicate that we are trying to workaround a problem case. |
1807 | if (!CodeGenOpts.StrictFloatCastOverflow) |
1808 | FuncAttrs.addAttribute("strict-float-cast-overflow" , "false" ); |
1809 | |
1810 | // TODO: Are these all needed? |
1811 | // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. |
1812 | if (LangOpts.NoHonorInfs) |
1813 | FuncAttrs.addAttribute("no-infs-fp-math" , "true" ); |
1814 | if (LangOpts.NoHonorNaNs) |
1815 | FuncAttrs.addAttribute("no-nans-fp-math" , "true" ); |
1816 | if (LangOpts.UnsafeFPMath) |
1817 | FuncAttrs.addAttribute("unsafe-fp-math" , "true" ); |
1818 | if (CodeGenOpts.SoftFloat) |
1819 | FuncAttrs.addAttribute("use-soft-float" , "true" ); |
1820 | FuncAttrs.addAttribute("stack-protector-buffer-size" , |
1821 | llvm::utostr(CodeGenOpts.SSPBufferSize)); |
1822 | if (LangOpts.NoSignedZero) |
1823 | FuncAttrs.addAttribute("no-signed-zeros-fp-math" , "true" ); |
1824 | |
1825 | // TODO: Reciprocal estimate codegen options should apply to instructions? |
1826 | const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; |
1827 | if (!Recips.empty()) |
1828 | FuncAttrs.addAttribute("reciprocal-estimates" , |
1829 | llvm::join(Recips, "," )); |
1830 | |
1831 | if (!CodeGenOpts.PreferVectorWidth.empty() && |
1832 | CodeGenOpts.PreferVectorWidth != "none" ) |
1833 | FuncAttrs.addAttribute("prefer-vector-width" , |
1834 | CodeGenOpts.PreferVectorWidth); |
1835 | |
1836 | if (CodeGenOpts.StackRealignment) |
1837 | FuncAttrs.addAttribute("stackrealign" ); |
1838 | if (CodeGenOpts.Backchain) |
1839 | FuncAttrs.addAttribute("backchain" ); |
1840 | if (CodeGenOpts.EnableSegmentedStacks) |
1841 | FuncAttrs.addAttribute("split-stack" ); |
1842 | |
1843 | if (CodeGenOpts.SpeculativeLoadHardening) |
1844 | FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); |
1845 | } |
1846 | |
1847 | if (getLangOpts().assumeFunctionsAreConvergent()) { |
1848 | // Conservatively, mark all functions and calls in CUDA and OpenCL as |
1849 | // convergent (meaning, they may call an intrinsically convergent op, such |
1850 | // as __syncthreads() / barrier(), and so can't have certain optimizations |
1851 | // applied around them). LLVM will remove this attribute where it safely |
1852 | // can. |
1853 | FuncAttrs.addAttribute(llvm::Attribute::Convergent); |
1854 | } |
1855 | |
1856 | if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { |
1857 | // Exceptions aren't supported in CUDA device code. |
1858 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
1859 | } |
1860 | |
1861 | for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { |
1862 | StringRef Var, Value; |
1863 | std::tie(Var, Value) = Attr.split('='); |
1864 | FuncAttrs.addAttribute(Var, Value); |
1865 | } |
1866 | } |
1867 | |
1868 | void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) { |
1869 | llvm::AttrBuilder FuncAttrs; |
1870 | getDefaultFunctionAttributes(F.getName(), F.hasOptNone(), |
1871 | /* AttrOnCallSite = */ false, FuncAttrs); |
1872 | // TODO: call GetCPUAndFeaturesAttributes? |
1873 | F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs); |
1874 | } |
1875 | |
1876 | void CodeGenModule::addDefaultFunctionDefinitionAttributes( |
1877 | llvm::AttrBuilder &attrs) { |
1878 | getDefaultFunctionAttributes(/*function name*/ "" , /*optnone*/ false, |
1879 | /*for call*/ false, attrs); |
1880 | GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); |
1881 | } |
1882 | |
1883 | static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, |
1884 | const LangOptions &LangOpts, |
1885 | const NoBuiltinAttr *NBA = nullptr) { |
1886 | auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { |
1887 | SmallString<32> AttributeName; |
1888 | AttributeName += "no-builtin-" ; |
1889 | AttributeName += BuiltinName; |
1890 | FuncAttrs.addAttribute(AttributeName); |
1891 | }; |
1892 | |
1893 | // First, handle the language options passed through -fno-builtin. |
1894 | if (LangOpts.NoBuiltin) { |
1895 | // -fno-builtin disables them all. |
1896 | FuncAttrs.addAttribute("no-builtins" ); |
1897 | return; |
1898 | } |
1899 | |
1900 | // Then, add attributes for builtins specified through -fno-builtin-<name>. |
1901 | llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); |
1902 | |
1903 | // Now, let's check the __attribute__((no_builtin("...")) attribute added to |
1904 | // the source. |
1905 | if (!NBA) |
1906 | return; |
1907 | |
1908 | // If there is a wildcard in the builtin names specified through the |
1909 | // attribute, disable them all. |
1910 | if (llvm::is_contained(NBA->builtinNames(), "*" )) { |
1911 | FuncAttrs.addAttribute("no-builtins" ); |
1912 | return; |
1913 | } |
1914 | |
1915 | // And last, add the rest of the builtin names. |
1916 | llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); |
1917 | } |
1918 | |
1919 | static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, |
1920 | const llvm::DataLayout &DL, const ABIArgInfo &AI, |
1921 | bool CheckCoerce = true) { |
1922 | llvm::Type *Ty = Types.ConvertTypeForMem(QTy); |
1923 | if (AI.getKind() == ABIArgInfo::Indirect) |
1924 | return true; |
1925 | if (AI.getKind() == ABIArgInfo::Extend) |
1926 | return true; |
1927 | if (!DL.typeSizeEqualsStoreSize(Ty)) |
1928 | // TODO: This will result in a modest amount of values not marked noundef |
1929 | // when they could be. We care about values that *invisibly* contain undef |
1930 | // bits from the perspective of LLVM IR. |
1931 | return false; |
1932 | if (CheckCoerce && AI.canHaveCoerceToType()) { |
1933 | llvm::Type *CoerceTy = AI.getCoerceToType(); |
1934 | if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy), |
1935 | DL.getTypeSizeInBits(Ty))) |
1936 | // If we're coercing to a type with a greater size than the canonical one, |
1937 | // we're introducing new undef bits. |
1938 | // Coercing to a type of smaller or equal size is ok, as we know that |
1939 | // there's no internal padding (typeSizeEqualsStoreSize). |
1940 | return false; |
1941 | } |
1942 | if (QTy->isExtIntType()) |
1943 | return true; |
1944 | if (QTy->isReferenceType()) |
1945 | return true; |
1946 | if (QTy->isNullPtrType()) |
1947 | return false; |
1948 | if (QTy->isMemberPointerType()) |
1949 | // TODO: Some member pointers are `noundef`, but it depends on the ABI. For |
1950 | // now, never mark them. |
1951 | return false; |
1952 | if (QTy->isScalarType()) { |
1953 | if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy)) |
1954 | return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false); |
1955 | return true; |
1956 | } |
1957 | if (const VectorType *Vector = dyn_cast<VectorType>(QTy)) |
1958 | return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false); |
1959 | if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy)) |
1960 | return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false); |
1961 | if (const ArrayType *Array = dyn_cast<ArrayType>(QTy)) |
1962 | return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false); |
1963 | |
1964 | // TODO: Some structs may be `noundef`, in specific situations. |
1965 | return false; |
1966 | } |
1967 | |
1968 | /// Construct the IR attribute list of a function or call. |
1969 | /// |
1970 | /// When adding an attribute, please consider where it should be handled: |
1971 | /// |
1972 | /// - getDefaultFunctionAttributes is for attributes that are essentially |
1973 | /// part of the global target configuration (but perhaps can be |
1974 | /// overridden on a per-function basis). Adding attributes there |
1975 | /// will cause them to also be set in frontends that build on Clang's |
1976 | /// target-configuration logic, as well as for code defined in library |
1977 | /// modules such as CUDA's libdevice. |
1978 | /// |
1979 | /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes |
1980 | /// and adds declaration-specific, convention-specific, and |
1981 | /// frontend-specific logic. The last is of particular importance: |
1982 | /// attributes that restrict how the frontend generates code must be |
1983 | /// added here rather than getDefaultFunctionAttributes. |
1984 | ///< |
---|