clang 20.0.0git
ARM.cpp
Go to the documentation of this file.
1//===- ARM.cpp ------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
11
12using namespace clang;
13using namespace clang::CodeGen;
14
15//===----------------------------------------------------------------------===//
16// ARM ABI Implementation
17//===----------------------------------------------------------------------===//
18
19namespace {
20
21class ARMABIInfo : public ABIInfo {
23 bool IsFloatABISoftFP;
24
25public:
26 ARMABIInfo(CodeGenTypes &CGT, ARMABIKind Kind) : ABIInfo(CGT), Kind(Kind) {
27 setCCs();
28 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
29 CGT.getCodeGenOpts().FloatABI == ""; // default
30 }
31
32 bool isEABI() const {
33 switch (getTarget().getTriple().getEnvironment()) {
34 case llvm::Triple::Android:
35 case llvm::Triple::EABI:
36 case llvm::Triple::EABIHF:
37 case llvm::Triple::GNUEABI:
38 case llvm::Triple::GNUEABIT64:
39 case llvm::Triple::GNUEABIHF:
40 case llvm::Triple::GNUEABIHFT64:
41 case llvm::Triple::MuslEABI:
42 case llvm::Triple::MuslEABIHF:
43 return true;
44 default:
45 return getTarget().getTriple().isOHOSFamily();
46 }
47 }
48
49 bool isEABIHF() const {
50 switch (getTarget().getTriple().getEnvironment()) {
51 case llvm::Triple::EABIHF:
52 case llvm::Triple::GNUEABIHF:
53 case llvm::Triple::GNUEABIHFT64:
54 case llvm::Triple::MuslEABIHF:
55 return true;
56 default:
57 return false;
58 }
59 }
60
61 ARMABIKind getABIKind() const { return Kind; }
62
63 bool allowBFloatArgsAndRet() const override {
64 return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
65 }
66
67private:
68 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
69 unsigned functionCallConv) const;
70 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
71 unsigned functionCallConv) const;
72 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
73 uint64_t Members) const;
74 ABIArgInfo coerceIllegalVector(QualType Ty) const;
75 bool isIllegalVectorType(QualType Ty) const;
76 bool containsAnyFP16Vectors(QualType Ty) const;
77
78 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
80 uint64_t Members) const override;
82
83 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
84
85 void computeInfo(CGFunctionInfo &FI) const override;
86
87 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
88 AggValueSlot Slot) const override;
89
90 llvm::CallingConv::ID getLLVMDefaultCC() const;
91 llvm::CallingConv::ID getABIDefaultCC() const;
92 void setCCs();
93};
94
95class ARMSwiftABIInfo : public SwiftABIInfo {
96public:
97 explicit ARMSwiftABIInfo(CodeGenTypes &CGT)
98 : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
99
100 bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
101 unsigned NumElts) const override;
102};
103
104class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
105public:
106 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
107 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {
108 SwiftInfo = std::make_unique<ARMSwiftABIInfo>(CGT);
109 }
110
111 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
112 return 13;
113 }
114
115 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
116 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
117 }
118
120 llvm::Value *Address) const override {
121 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
122
123 // 0-15 are the 16 integer registers.
124 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
125 return false;
126 }
127
128 unsigned getSizeOfUnwindException() const override {
129 if (getABIInfo<ARMABIInfo>().isEABI())
130 return 88;
132 }
133
134 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
135 CodeGen::CodeGenModule &CGM) const override {
136 if (GV->isDeclaration())
137 return;
138 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
139 if (!FD)
140 return;
141 auto *Fn = cast<llvm::Function>(GV);
142
143 if (const auto *TA = FD->getAttr<TargetAttr>()) {
145 CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
146 if (!Attr.BranchProtection.empty()) {
148 StringRef DiagMsg;
149 StringRef Arch =
150 Attr.CPU.empty() ? CGM.getTarget().getTargetOpts().CPU : Attr.CPU;
151 if (!CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
152 Arch, BPI, DiagMsg)) {
153 CGM.getDiags().Report(
154 D->getLocation(),
155 diag::warn_target_unsupported_branch_protection_attribute)
156 << Arch;
157 } else
159 } else if (CGM.getLangOpts().BranchTargetEnforcement ||
161 // If the Branch Protection attribute is missing, validate the target
162 // Architecture attribute against Branch Protection command line
163 // settings.
165 CGM.getDiags().Report(
166 D->getLocation(),
167 diag::warn_target_unsupported_branch_protection_attribute)
168 << Attr.CPU;
169 }
171 CGM.getTarget().getTargetOpts().CPU)) {
174 }
175
176 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
177 if (!Attr)
178 return;
179
180 const char *Kind;
181 switch (Attr->getInterrupt()) {
182 case ARMInterruptAttr::Generic: Kind = ""; break;
183 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
184 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
185 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
186 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
187 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
188 }
189
190 Fn->addFnAttr("interrupt", Kind);
191
192 ARMABIKind ABI = getABIInfo<ARMABIInfo>().getABIKind();
193 if (ABI == ARMABIKind::APCS)
194 return;
195
196 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
197 // however this is not necessarily true on taking any interrupt. Instruct
198 // the backend to perform a realignment as part of the function prologue.
199 llvm::AttrBuilder B(Fn->getContext());
200 B.addStackAlignmentAttr(8);
201 Fn->addFnAttrs(B);
202 }
203};
204
205class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
206public:
207 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
208 : ARMTargetCodeGenInfo(CGT, K) {}
209
210 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
211 CodeGen::CodeGenModule &CGM) const override;
212
213 void getDependentLibraryOption(llvm::StringRef Lib,
214 llvm::SmallString<24> &Opt) const override {
215 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
216 }
217
218 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
219 llvm::SmallString<32> &Opt) const override {
220 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
221 }
222};
223
224void WindowsARMTargetCodeGenInfo::setTargetAttributes(
225 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
226 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
227 if (GV->isDeclaration())
228 return;
229 addStackProbeTargetAttributes(D, GV, CGM);
230}
231}
232
233void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
234 if (!::classifyReturnType(getCXXABI(), FI, *this))
237
238 for (auto &I : FI.arguments())
239 I.info = classifyArgumentType(I.type, FI.isVariadic(),
241
242
243 // Always honor user-specified calling convention.
244 if (FI.getCallingConvention() != llvm::CallingConv::C)
245 return;
246
247 llvm::CallingConv::ID cc = getRuntimeCC();
248 if (cc != llvm::CallingConv::C)
250}
251
252/// Return the default calling convention that LLVM will use.
253llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
254 // The default calling convention that LLVM will infer.
255 if (isEABIHF() || getTarget().getTriple().isWatchABI())
256 return llvm::CallingConv::ARM_AAPCS_VFP;
257 else if (isEABI())
258 return llvm::CallingConv::ARM_AAPCS;
259 else
260 return llvm::CallingConv::ARM_APCS;
261}
262
263/// Return the calling convention that our ABI would like us to use
264/// as the C calling convention.
265llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
266 switch (getABIKind()) {
267 case ARMABIKind::APCS:
268 return llvm::CallingConv::ARM_APCS;
269 case ARMABIKind::AAPCS:
270 return llvm::CallingConv::ARM_AAPCS;
271 case ARMABIKind::AAPCS_VFP:
272 return llvm::CallingConv::ARM_AAPCS_VFP;
273 case ARMABIKind::AAPCS16_VFP:
274 return llvm::CallingConv::ARM_AAPCS_VFP;
275 }
276 llvm_unreachable("bad ABI kind");
277}
278
279void ARMABIInfo::setCCs() {
280 assert(getRuntimeCC() == llvm::CallingConv::C);
281
282 // Don't muddy up the IR with a ton of explicit annotations if
283 // they'd just match what LLVM will infer from the triple.
284 llvm::CallingConv::ID abiCC = getABIDefaultCC();
285 if (abiCC != getLLVMDefaultCC())
286 RuntimeCC = abiCC;
287}
288
289ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
290 uint64_t Size = getContext().getTypeSize(Ty);
291 if (Size <= 32) {
292 llvm::Type *ResType =
293 llvm::Type::getInt32Ty(getVMContext());
294 return ABIArgInfo::getDirect(ResType);
295 }
296 if (Size == 64 || Size == 128) {
297 auto *ResType = llvm::FixedVectorType::get(
298 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
299 return ABIArgInfo::getDirect(ResType);
300 }
301 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
302}
303
304ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
305 const Type *Base,
306 uint64_t Members) const {
307 assert(Base && "Base class should be set for homogeneous aggregate");
308 // Base can be a floating-point or a vector.
309 if (const VectorType *VT = Base->getAs<VectorType>()) {
310 // FP16 vectors should be converted to integer vectors
311 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
312 uint64_t Size = getContext().getTypeSize(VT);
313 auto *NewVecTy = llvm::FixedVectorType::get(
314 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
315 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
316 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
317 }
318 }
319 unsigned Align = 0;
320 if (getABIKind() == ARMABIKind::AAPCS ||
321 getABIKind() == ARMABIKind::AAPCS_VFP) {
322 // For alignment adjusted HFAs, cap the argument alignment to 8, leave it
323 // default otherwise.
324 Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
325 unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
326 Align = (Align > BaseAlign && Align >= 8) ? 8 : 0;
327 }
328 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align);
329}
330
331ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
332 unsigned functionCallConv) const {
333 // 6.1.2.1 The following argument types are VFP CPRCs:
334 // A single-precision floating-point type (including promoted
335 // half-precision types); A double-precision floating-point type;
336 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
337 // with a Base Type of a single- or double-precision floating-point type,
338 // 64-bit containerized vectors or 128-bit containerized vectors with one
339 // to four Elements.
340 // Variadic functions should always marshal to the base standard.
341 bool IsAAPCS_VFP =
342 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
343
345
346 // Handle illegal vector types here.
347 if (isIllegalVectorType(Ty))
348 return coerceIllegalVector(Ty);
349
350 if (!isAggregateTypeForABI(Ty)) {
351 // Treat an enum type as its underlying type.
352 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
353 Ty = EnumTy->getDecl()->getIntegerType();
354 }
355
356 if (const auto *EIT = Ty->getAs<BitIntType>())
357 if (EIT->getNumBits() > 64)
358 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
359
360 return (isPromotableIntegerTypeForABI(Ty)
363 }
364
365 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
366 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
367 }
368
369 // Ignore empty records.
370 if (isEmptyRecord(getContext(), Ty, true))
371 return ABIArgInfo::getIgnore();
372
373 if (IsAAPCS_VFP) {
374 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
375 // into VFP registers.
376 const Type *Base = nullptr;
377 uint64_t Members = 0;
378 if (isHomogeneousAggregate(Ty, Base, Members))
379 return classifyHomogeneousAggregate(Ty, Base, Members);
380 } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) {
381 // WatchOS does have homogeneous aggregates. Note that we intentionally use
382 // this convention even for a variadic function: the backend will use GPRs
383 // if needed.
384 const Type *Base = nullptr;
385 uint64_t Members = 0;
386 if (isHomogeneousAggregate(Ty, Base, Members)) {
387 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
388 llvm::Type *Ty =
389 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
390 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
391 }
392 }
393
394 if (getABIKind() == ARMABIKind::AAPCS16_VFP &&
395 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
396 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
397 // bigger than 128-bits, they get placed in space allocated by the caller,
398 // and a pointer is passed.
400 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
401 }
402
403 // Support byval for ARM.
404 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
405 // most 8-byte. We realign the indirect argument if type alignment is bigger
406 // than ABI alignment.
407 uint64_t ABIAlign = 4;
408 uint64_t TyAlign;
409 if (getABIKind() == ARMABIKind::AAPCS_VFP ||
410 getABIKind() == ARMABIKind::AAPCS) {
411 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
412 ABIAlign = std::clamp(TyAlign, (uint64_t)4, (uint64_t)8);
413 } else {
414 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
415 }
416 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
417 assert(getABIKind() != ARMABIKind::AAPCS16_VFP && "unexpected byval");
419 /*ByVal=*/true,
420 /*Realign=*/TyAlign > ABIAlign);
421 }
422
423 // Otherwise, pass by coercing to a structure of the appropriate size.
424 llvm::Type* ElemTy;
425 unsigned SizeRegs;
426 // FIXME: Try to match the types of the arguments more accurately where
427 // we can.
428 if (TyAlign <= 4) {
429 ElemTy = llvm::Type::getInt32Ty(getVMContext());
430 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
431 } else {
432 ElemTy = llvm::Type::getInt64Ty(getVMContext());
433 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
434 }
435
436 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
437}
438
439static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
440 llvm::LLVMContext &VMContext) {
441 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
442 // is called integer-like if its size is less than or equal to one word, and
443 // the offset of each of its addressable sub-fields is zero.
444
445 uint64_t Size = Context.getTypeSize(Ty);
446
447 // Check that the type fits in a word.
448 if (Size > 32)
449 return false;
450
451 // FIXME: Handle vector types!
452 if (Ty->isVectorType())
453 return false;
454
455 // Float types are never treated as "integer like".
456 if (Ty->isRealFloatingType())
457 return false;
458
459 // If this is a builtin or pointer type then it is ok.
460 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
461 return true;
462
463 // Small complex integer types are "integer like".
464 if (const ComplexType *CT = Ty->getAs<ComplexType>())
465 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
466
467 // Single element and zero sized arrays should be allowed, by the definition
468 // above, but they are not.
469
470 // Otherwise, it must be a record type.
471 const RecordType *RT = Ty->getAs<RecordType>();
472 if (!RT) return false;
473
474 // Ignore records with flexible arrays.
475 const RecordDecl *RD = RT->getDecl();
476 if (RD->hasFlexibleArrayMember())
477 return false;
478
479 // Check that all sub-fields are at offset 0, and are themselves "integer
480 // like".
481 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
482
483 bool HadField = false;
484 unsigned idx = 0;
485 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
486 i != e; ++i, ++idx) {
487 const FieldDecl *FD = *i;
488
489 // Bit-fields are not addressable, we only need to verify they are "integer
490 // like". We still have to disallow a subsequent non-bitfield, for example:
491 // struct { int : 0; int x }
492 // is non-integer like according to gcc.
493 if (FD->isBitField()) {
494 if (!RD->isUnion())
495 HadField = true;
496
497 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
498 return false;
499
500 continue;
501 }
502
503 // Check if this field is at offset 0.
504 if (Layout.getFieldOffset(idx) != 0)
505 return false;
506
507 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
508 return false;
509
510 // Only allow at most one field in a structure. This doesn't match the
511 // wording above, but follows gcc in situations with a field following an
512 // empty structure.
513 if (!RD->isUnion()) {
514 if (HadField)
515 return false;
516
517 HadField = true;
518 }
519 }
520
521 return true;
522}
523
524ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
525 unsigned functionCallConv) const {
526
527 // Variadic functions should always marshal to the base standard.
528 bool IsAAPCS_VFP =
529 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
530
531 if (RetTy->isVoidType())
532 return ABIArgInfo::getIgnore();
533
534 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
535 // Large vector types should be returned via memory.
536 if (getContext().getTypeSize(RetTy) > 128)
537 return getNaturalAlignIndirect(RetTy);
538 // TODO: FP16/BF16 vectors should be converted to integer vectors
539 // This check is similar to isIllegalVectorType - refactor?
540 if ((!getTarget().hasLegalHalfType() &&
541 (VT->getElementType()->isFloat16Type() ||
542 VT->getElementType()->isHalfType())) ||
543 (IsFloatABISoftFP &&
544 VT->getElementType()->isBFloat16Type()))
545 return coerceIllegalVector(RetTy);
546 }
547
548 if (!isAggregateTypeForABI(RetTy)) {
549 // Treat an enum type as its underlying type.
550 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
551 RetTy = EnumTy->getDecl()->getIntegerType();
552
553 if (const auto *EIT = RetTy->getAs<BitIntType>())
554 if (EIT->getNumBits() > 64)
555 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
556
557 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
558 : ABIArgInfo::getDirect();
559 }
560
561 // Are we following APCS?
562 if (getABIKind() == ARMABIKind::APCS) {
563 if (isEmptyRecord(getContext(), RetTy, false))
564 return ABIArgInfo::getIgnore();
565
566 // Complex types are all returned as packed integers.
567 //
568 // FIXME: Consider using 2 x vector types if the back end handles them
569 // correctly.
570 if (RetTy->isAnyComplexType())
571 return ABIArgInfo::getDirect(llvm::IntegerType::get(
572 getVMContext(), getContext().getTypeSize(RetTy)));
573
574 // Integer like structures are returned in r0.
575 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
576 // Return in the smallest viable integer type.
577 uint64_t Size = getContext().getTypeSize(RetTy);
578 if (Size <= 8)
579 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
580 if (Size <= 16)
581 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
582 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
583 }
584
585 // Otherwise return in memory.
586 return getNaturalAlignIndirect(RetTy);
587 }
588
589 // Otherwise this is an AAPCS variant.
590
591 if (isEmptyRecord(getContext(), RetTy, true))
592 return ABIArgInfo::getIgnore();
593
594 // Check for homogeneous aggregates with AAPCS-VFP.
595 if (IsAAPCS_VFP) {
596 const Type *Base = nullptr;
597 uint64_t Members = 0;
598 if (isHomogeneousAggregate(RetTy, Base, Members))
599 return classifyHomogeneousAggregate(RetTy, Base, Members);
600 }
601
602 // Aggregates <= 4 bytes are returned in r0; other aggregates
603 // are returned indirectly.
604 uint64_t Size = getContext().getTypeSize(RetTy);
605 if (Size <= 32) {
606 if (getDataLayout().isBigEndian())
607 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
608 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
609
610 // Return in the smallest viable integer type.
611 if (Size <= 8)
612 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
613 if (Size <= 16)
614 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
615 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
616 } else if (Size <= 128 && getABIKind() == ARMABIKind::AAPCS16_VFP) {
617 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
618 llvm::Type *CoerceTy =
619 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
620 return ABIArgInfo::getDirect(CoerceTy);
621 }
622
623 return getNaturalAlignIndirect(RetTy);
624}
625
626/// isIllegalVector - check whether Ty is an illegal vector type.
627bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
628 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
629 // On targets that don't support half, fp16 or bfloat, they are expanded
630 // into float, and we don't want the ABI to depend on whether or not they
631 // are supported in hardware. Thus return false to coerce vectors of these
632 // types into integer vectors.
633 // We do not depend on hasLegalHalfType for bfloat as it is a
634 // separate IR type.
635 if ((!getTarget().hasLegalHalfType() &&
636 (VT->getElementType()->isFloat16Type() ||
637 VT->getElementType()->isHalfType())) ||
638 (IsFloatABISoftFP &&
639 VT->getElementType()->isBFloat16Type()))
640 return true;
641 if (isAndroid()) {
642 // Android shipped using Clang 3.1, which supported a slightly different
643 // vector ABI. The primary differences were that 3-element vector types
644 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
645 // accepts that legacy behavior for Android only.
646 // Check whether VT is legal.
647 unsigned NumElements = VT->getNumElements();
648 // NumElements should be power of 2 or equal to 3.
649 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
650 return true;
651 } else {
652 // Check whether VT is legal.
653 unsigned NumElements = VT->getNumElements();
654 uint64_t Size = getContext().getTypeSize(VT);
655 // NumElements should be power of 2.
656 if (!llvm::isPowerOf2_32(NumElements))
657 return true;
658 // Size should be greater than 32 bits.
659 return Size <= 32;
660 }
661 }
662 return false;
663}
664
665/// Return true if a type contains any 16-bit floating point vectors
666bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
667 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
668 uint64_t NElements = AT->getZExtSize();
669 if (NElements == 0)
670 return false;
671 return containsAnyFP16Vectors(AT->getElementType());
672 } else if (const RecordType *RT = Ty->