1 | //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Implementation of the abstract lowering for the Swift calling convention. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "clang/CodeGen/SwiftCallingConv.h" |
14 | #include "ABIInfo.h" |
15 | #include "CodeGenModule.h" |
16 | #include "TargetInfo.h" |
17 | #include "clang/Basic/TargetInfo.h" |
18 | #include <optional> |
19 | |
20 | using namespace clang; |
21 | using namespace CodeGen; |
22 | using namespace swiftcall; |
23 | |
24 | static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) { |
25 | return CGM.getTargetCodeGenInfo().getSwiftABIInfo(); |
26 | } |
27 | |
28 | static bool isPowerOf2(unsigned n) { |
29 | return n == (n & -n); |
30 | } |
31 | |
32 | /// Given two types with the same size, try to find a common type. |
33 | static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) { |
34 | assert(first != second); |
35 | |
36 | // Allow pointers to merge with integers, but prefer the integer type. |
37 | if (first->isIntegerTy()) { |
38 | if (second->isPointerTy()) return first; |
39 | } else if (first->isPointerTy()) { |
40 | if (second->isIntegerTy()) return second; |
41 | if (second->isPointerTy()) return first; |
42 | |
43 | // Allow two vectors to be merged (given that they have the same size). |
44 | // This assumes that we never have two different vector register sets. |
45 | } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(Val: first)) { |
46 | if (auto secondVecTy = dyn_cast<llvm::VectorType>(Val: second)) { |
47 | if (auto commonTy = getCommonType(first: firstVecTy->getElementType(), |
48 | second: secondVecTy->getElementType())) { |
49 | return (commonTy == firstVecTy->getElementType() ? first : second); |
50 | } |
51 | } |
52 | } |
53 | |
54 | return nullptr; |
55 | } |
56 | |
57 | static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) { |
58 | return CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getTypeStoreSize(Ty: type)); |
59 | } |
60 | |
61 | static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type) { |
62 | return CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getTypeAllocSize(Ty: type)); |
63 | } |
64 | |
65 | void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) { |
66 | // Deal with various aggregate types as special cases: |
67 | |
68 | // Record types. |
69 | if (auto recType = type->getAs<RecordType>()) { |
70 | addTypedData(record: recType->getDecl(), begin); |
71 | |
72 | // Array types. |
73 | } else if (type->isArrayType()) { |
74 | // Incomplete array types (flexible array members?) don't provide |
75 | // data to lay out, and the other cases shouldn't be possible. |
76 | auto arrayType = CGM.getContext().getAsConstantArrayType(T: type); |
77 | if (!arrayType) return; |
78 | |
79 | QualType eltType = arrayType->getElementType(); |
80 | auto eltSize = CGM.getContext().getTypeSizeInChars(T: eltType); |
81 | for (uint64_t i = 0, e = arrayType->getZExtSize(); i != e; ++i) { |
82 | addTypedData(eltType, begin + i * eltSize); |
83 | } |
84 | |
85 | // Complex types. |
86 | } else if (auto complexType = type->getAs<ComplexType>()) { |
87 | auto eltType = complexType->getElementType(); |
88 | auto eltSize = CGM.getContext().getTypeSizeInChars(T: eltType); |
89 | auto eltLLVMType = CGM.getTypes().ConvertType(T: eltType); |
90 | addTypedData(type: eltLLVMType, begin, end: begin + eltSize); |
91 | addTypedData(type: eltLLVMType, begin: begin + eltSize, end: begin + 2 * eltSize); |
92 | |
93 | // Member pointer types. |
94 | } else if (type->getAs<MemberPointerType>()) { |
95 | // Just add it all as opaque. |
96 | addOpaqueData(begin, end: begin + CGM.getContext().getTypeSizeInChars(T: type)); |
97 | |
98 | // Atomic types. |
99 | } else if (const auto *atomicType = type->getAs<AtomicType>()) { |
100 | auto valueType = atomicType->getValueType(); |
101 | auto atomicSize = CGM.getContext().getTypeSizeInChars(atomicType); |
102 | auto valueSize = CGM.getContext().getTypeSizeInChars(T: valueType); |
103 | |
104 | addTypedData(type: atomicType->getValueType(), begin); |
105 | |
106 | // Add atomic padding. |
107 | auto atomicPadding = atomicSize - valueSize; |
108 | if (atomicPadding > CharUnits::Zero()) |
109 | addOpaqueData(begin: begin + valueSize, end: begin + atomicSize); |
110 | |
111 | // Everything else is scalar and should not convert as an LLVM aggregate. |
112 | } else { |
113 | // We intentionally convert as !ForMem because we want to preserve |
114 | // that a type was an i1. |
115 | auto *llvmType = CGM.getTypes().ConvertType(T: type); |
116 | addTypedData(type: llvmType, begin); |
117 | } |
118 | } |
119 | |
120 | void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) { |
121 | addTypedData(record, begin, layout: CGM.getContext().getASTRecordLayout(D: record)); |
122 | } |
123 | |
124 | void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin, |
125 | const ASTRecordLayout &layout) { |
126 | // Unions are a special case. |
127 | if (record->isUnion()) { |
128 | for (auto *field : record->fields()) { |
129 | if (field->isBitField()) { |
130 | addBitFieldData(field, begin, bitOffset: 0); |
131 | } else { |
132 | addTypedData(field->getType(), begin); |
133 | } |
134 | } |
135 | return; |
136 | } |
137 | |
138 | // Note that correctness does not rely on us adding things in |
139 | // their actual order of layout; it's just somewhat more efficient |
140 | // for the builder. |
141 | |
142 | // With that in mind, add "early" C++ data. |
143 | auto cxxRecord = dyn_cast<CXXRecordDecl>(Val: record); |
144 | if (cxxRecord) { |
145 | // - a v-table pointer, if the class adds its own |
146 | if (layout.hasOwnVFPtr()) { |
147 | addTypedData(type: CGM.Int8PtrTy, begin); |
148 | } |
149 | |
150 | // - non-virtual bases |
151 | for (auto &baseSpecifier : cxxRecord->bases()) { |
152 | if (baseSpecifier.isVirtual()) continue; |
153 | |
154 | auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl(); |
155 | addTypedData(baseRecord, begin + layout.getBaseClassOffset(Base: baseRecord)); |
156 | } |
157 | |
158 | // - a vbptr if the class adds its own |
159 | if (layout.hasOwnVBPtr()) { |
160 | addTypedData(type: CGM.Int8PtrTy, begin: begin + layout.getVBPtrOffset()); |
161 | } |
162 | } |
163 | |
164 | // Add fields. |
165 | for (auto *field : record->fields()) { |
166 | auto fieldOffsetInBits = layout.getFieldOffset(FieldNo: field->getFieldIndex()); |
167 | if (field->isBitField()) { |
168 | addBitFieldData(field, begin, bitOffset: fieldOffsetInBits); |
169 | } else { |
170 | addTypedData(field->getType(), |
171 | begin + CGM.getContext().toCharUnitsFromBits(BitSize: fieldOffsetInBits)); |
172 | } |
173 | } |
174 | |
175 | // Add "late" C++ data: |
176 | if (cxxRecord) { |
177 | // - virtual bases |
178 | for (auto &vbaseSpecifier : cxxRecord->vbases()) { |
179 | auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl(); |
180 | addTypedData(baseRecord, begin + layout.getVBaseClassOffset(VBase: baseRecord)); |
181 | } |
182 | } |
183 | } |
184 | |
185 | void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield, |
186 | CharUnits recordBegin, |
187 | uint64_t bitfieldBitBegin) { |
188 | assert(bitfield->isBitField()); |
189 | auto &ctx = CGM.getContext(); |
190 | auto width = bitfield->getBitWidthValue(Ctx: ctx); |
191 | |
192 | // We can ignore zero-width bit-fields. |
193 | if (width == 0) return; |
194 | |
195 | // toCharUnitsFromBits rounds down. |
196 | CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(BitSize: bitfieldBitBegin); |
197 | |
198 | // Find the offset of the last byte that is partially occupied by the |
199 | // bit-field; since we otherwise expect exclusive ends, the end is the |
200 | // next byte. |
201 | uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1; |
202 | CharUnits bitfieldByteEnd = |
203 | ctx.toCharUnitsFromBits(BitSize: bitfieldBitLast) + CharUnits::One(); |
204 | addOpaqueData(begin: recordBegin + bitfieldByteBegin, |
205 | end: recordBegin + bitfieldByteEnd); |
206 | } |
207 | |
208 | void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) { |
209 | assert(type && "didn't provide type for typed data" ); |
210 | addTypedData(type, begin, end: begin + getTypeStoreSize(CGM, type)); |
211 | } |
212 | |
213 | void SwiftAggLowering::addTypedData(llvm::Type *type, |
214 | CharUnits begin, CharUnits end) { |
215 | assert(type && "didn't provide type for typed data" ); |
216 | assert(getTypeStoreSize(CGM, type) == end - begin); |
217 | |
218 | // Legalize vector types. |
219 | if (auto vecTy = dyn_cast<llvm::VectorType>(Val: type)) { |
220 | SmallVector<llvm::Type*, 4> componentTys; |
221 | legalizeVectorType(CGM, vectorSize: end - begin, vectorTy: vecTy, types&: componentTys); |
222 | assert(componentTys.size() >= 1); |
223 | |
224 | // Walk the initial components. |
225 | for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) { |
226 | llvm::Type *componentTy = componentTys[i]; |
227 | auto componentSize = getTypeStoreSize(CGM, type: componentTy); |
228 | assert(componentSize < end - begin); |
229 | addLegalTypedData(type: componentTy, begin, end: begin + componentSize); |
230 | begin += componentSize; |
231 | } |
232 | |
233 | return addLegalTypedData(type: componentTys.back(), begin, end); |
234 | } |
235 | |
236 | // Legalize integer types. |
237 | if (auto intTy = dyn_cast<llvm::IntegerType>(Val: type)) { |
238 | if (!isLegalIntegerType(CGM, type: intTy)) |
239 | return addOpaqueData(begin, end); |
240 | } |
241 | |
242 | // All other types should be legal. |
243 | return addLegalTypedData(type, begin, end); |
244 | } |
245 | |
246 | void SwiftAggLowering::addLegalTypedData(llvm::Type *type, |
247 | CharUnits begin, CharUnits end) { |
248 | // Require the type to be naturally aligned. |
249 | if (!begin.isZero() && !begin.isMultipleOf(N: getNaturalAlignment(CGM, type))) { |
250 | |
251 | // Try splitting vector types. |
252 | if (auto vecTy = dyn_cast<llvm::VectorType>(Val: type)) { |
253 | auto split = splitLegalVectorType(CGM, vectorSize: end - begin, vectorTy: vecTy); |
254 | auto eltTy = split.first; |
255 | auto numElts = split.second; |
256 | |
257 | auto eltSize = (end - begin) / numElts; |
258 | assert(eltSize == getTypeStoreSize(CGM, eltTy)); |
259 | for (size_t i = 0, e = numElts; i != e; ++i) { |
260 | addLegalTypedData(type: eltTy, begin, end: begin + eltSize); |
261 | begin += eltSize; |
262 | } |
263 | assert(begin == end); |
264 | return; |
265 | } |
266 | |
267 | return addOpaqueData(begin, end); |
268 | } |
269 | |
270 | addEntry(type, begin, end); |
271 | } |
272 | |
273 | void SwiftAggLowering::addEntry(llvm::Type *type, |
274 | CharUnits begin, CharUnits end) { |
275 | assert((!type || |
276 | (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) && |
277 | "cannot add aggregate-typed data" ); |
278 | assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type))); |
279 | |
280 | // Fast path: we can just add entries to the end. |
281 | if (Entries.empty() || Entries.back().End <= begin) { |
282 | Entries.push_back(Elt: {.Begin: begin, .End: end, .Type: type}); |
283 | return; |
284 | } |
285 | |
286 | // Find the first existing entry that ends after the start of the new data. |
287 | // TODO: do a binary search if Entries is big enough for it to matter. |
288 | size_t index = Entries.size() - 1; |
289 | while (index != 0) { |
290 | if (Entries[index - 1].End <= begin) break; |
291 | --index; |
292 | } |
293 | |
294 | // The entry ends after the start of the new data. |
295 | // If the entry starts after the end of the new data, there's no conflict. |
296 | if (Entries[index].Begin >= end) { |
297 | // This insertion is potentially O(n), but the way we generally build |
298 | // these layouts makes that unlikely to matter: we'd need a union of |
299 | // several very large types. |
300 | Entries.insert(I: Entries.begin() + index, Elt: {.Begin: begin, .End: end, .Type: type}); |
301 | return; |
302 | } |
303 | |
304 | // Otherwise, the ranges overlap. The new range might also overlap |
305 | // with later ranges. |
306 | restartAfterSplit: |
307 | |
308 | // Simplest case: an exact overlap. |
309 | if (Entries[index].Begin == begin && Entries[index].End == end) { |
310 | // If the types match exactly, great. |
311 | if (Entries[index].Type == type) return; |
312 | |
313 | // If either type is opaque, make the entry opaque and return. |
314 | if (Entries[index].Type == nullptr) { |
315 | return; |
316 | } else if (type == nullptr) { |
317 | Entries[index].Type = nullptr; |
318 | return; |
319 | } |
320 | |
321 | // If they disagree in an ABI-agnostic way, just resolve the conflict |
322 | // arbitrarily. |
323 | if (auto entryType = getCommonType(first: Entries[index].Type, second: type)) { |
324 | Entries[index].Type = entryType; |
325 | return; |
326 | } |
327 | |
328 | // Otherwise, make the entry opaque. |
329 | Entries[index].Type = nullptr; |
330 | return; |
331 | } |
332 | |
333 | // Okay, we have an overlapping conflict of some sort. |
334 | |
335 | // If we have a vector type, split it. |
336 | if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(Val: type)) { |
337 | auto eltTy = vecTy->getElementType(); |
338 | CharUnits eltSize = |
339 | (end - begin) / cast<llvm::FixedVectorType>(Val: vecTy)->getNumElements(); |
340 | assert(eltSize == getTypeStoreSize(CGM, eltTy)); |
341 | for (unsigned i = 0, |
342 | e = cast<llvm::FixedVectorType>(Val: vecTy)->getNumElements(); |
343 | i != e; ++i) { |
344 | addEntry(type: eltTy, begin, end: begin + eltSize); |
345 | begin += eltSize; |
346 | } |
347 | assert(begin == end); |
348 | return; |
349 | } |
350 | |
351 | // If the entry is a vector type, split it and try again. |
352 | if (Entries[index].Type && Entries[index].Type->isVectorTy()) { |
353 | splitVectorEntry(index); |
354 | goto restartAfterSplit; |
355 | } |
356 | |
357 | // Okay, we have no choice but to make the existing entry opaque. |
358 | |
359 | Entries[index].Type = nullptr; |
360 | |
361 | // Stretch the start of the entry to the beginning of the range. |
362 | if (begin < Entries[index].Begin) { |
363 | Entries[index].Begin = begin; |
364 | assert(index == 0 || begin >= Entries[index - 1].End); |
365 | } |
366 | |
367 | // Stretch the end of the entry to the end of the range; but if we run |
368 | // into the start of the next entry, just leave the range there and repeat. |
369 | while (end > Entries[index].End) { |
370 | assert(Entries[index].Type == nullptr); |
371 | |
372 | // If the range doesn't overlap the next entry, we're done. |
373 | if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) { |
374 | Entries[index].End = end; |
375 | break; |
376 | } |
377 | |
378 | // Otherwise, stretch to the start of the next entry. |
379 | Entries[index].End = Entries[index + 1].Begin; |
380 | |
381 | // Continue with the next entry. |
382 | index++; |
383 | |
384 | // This entry needs to be made opaque if it is not already. |
385 | if (Entries[index].Type == nullptr) |
386 | continue; |
387 | |
388 | // Split vector entries unless we completely subsume them. |
389 | if (Entries[index].Type->isVectorTy() && |
390 | end < Entries[index].End) { |
391 | splitVectorEntry(index); |
392 | } |
393 | |
394 | // Make the entry opaque. |
395 | Entries[index].Type = nullptr; |
396 | } |
397 | } |
398 | |
399 | /// Replace the entry of vector type at offset 'index' with a sequence |
400 | /// of its component vectors. |
401 | void SwiftAggLowering::splitVectorEntry(unsigned index) { |
402 | auto vecTy = cast<llvm::VectorType>(Val: Entries[index].Type); |
403 | auto split = splitLegalVectorType(CGM, vectorSize: Entries[index].getWidth(), vectorTy: vecTy); |
404 | |
405 | auto eltTy = split.first; |
406 | CharUnits eltSize = getTypeStoreSize(CGM, type: eltTy); |
407 | auto numElts = split.second; |
408 | Entries.insert(I: Entries.begin() + index + 1, NumToInsert: numElts - 1, Elt: StorageEntry()); |
409 | |
410 | CharUnits begin = Entries[index].Begin; |
411 | for (unsigned i = 0; i != numElts; ++i) { |
412 | unsigned idx = index + i; |
413 | Entries[idx].Type = eltTy; |
414 | Entries[idx].Begin = begin; |
415 | Entries[idx].End = begin + eltSize; |
416 | begin += eltSize; |
417 | } |
418 | } |
419 | |
420 | /// Given a power-of-two unit size, return the offset of the aligned unit |
421 | /// of that size which contains the given offset. |
422 | /// |
423 | /// In other words, round down to the nearest multiple of the unit size. |
424 | static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) { |
425 | assert(isPowerOf2(unitSize.getQuantity())); |
426 | auto unitMask = ~(unitSize.getQuantity() - 1); |
427 | return CharUnits::fromQuantity(Quantity: offset.getQuantity() & unitMask); |
428 | } |
429 | |
430 | static bool areBytesInSameUnit(CharUnits first, CharUnits second, |
431 | CharUnits chunkSize) { |
432 | return getOffsetAtStartOfUnit(offset: first, unitSize: chunkSize) |
433 | == getOffsetAtStartOfUnit(offset: second, unitSize: chunkSize); |
434 | } |
435 | |
436 | static bool isMergeableEntryType(llvm::Type *type) { |
437 | // Opaquely-typed memory is always mergeable. |
438 | if (type == nullptr) return true; |
439 | |
440 | // Pointers and integers are always mergeable. In theory we should not |
441 | // merge pointers, but (1) it doesn't currently matter in practice because |
442 | // the chunk size is never greater than the size of a pointer and (2) |
443 | // Swift IRGen uses integer types for a lot of things that are "really" |
444 | // just storing pointers (like std::optional<SomePointer>). If we ever have a |
445 | // target that would otherwise combine pointers, we should put some effort |
446 | // into fixing those cases in Swift IRGen and then call out pointer types |
447 | // here. |
448 | |
449 | // Floating-point and vector types should never be merged. |
450 | // Most such types are too large and highly-aligned to ever trigger merging |
451 | // in practice, but it's important for the rule to cover at least 'half' |
452 | // and 'float', as well as things like small vectors of 'i1' or 'i8'. |
453 | return (!type->isFloatingPointTy() && !type->isVectorTy()); |
454 | } |
455 | |
456 | bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first, |
457 | const StorageEntry &second, |
458 | CharUnits chunkSize) { |
459 | // Only merge entries that overlap the same chunk. We test this first |
460 | // despite being a bit more expensive because this is the condition that |
461 | // tends to prevent merging. |
462 | if (!areBytesInSameUnit(first: first.End - CharUnits::One(), second: second.Begin, |
463 | chunkSize)) |
464 | return false; |
465 | |
466 | return (isMergeableEntryType(type: first.Type) && |
467 | isMergeableEntryType(type: second.Type)); |
468 | } |
469 | |
470 | void SwiftAggLowering::finish() { |
471 | if (Entries.empty()) { |
472 | Finished = true; |
473 | return; |
474 | } |
475 | |
476 | // We logically split the layout down into a series of chunks of this size, |
477 | // which is generally the size of a pointer. |
478 | const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM); |
479 | |
480 | // First pass: if two entries should be merged, make them both opaque |
481 | // and stretch one to meet the next. |
482 | // Also, remember if there are any opaque entries. |
483 | bool hasOpaqueEntries = (Entries[0].Type == nullptr); |
484 | for (size_t i = 1, e = Entries.size(); i != e; ++i) { |
485 | if (shouldMergeEntries(first: Entries[i - 1], second: Entries[i], chunkSize)) { |
486 | Entries[i - 1].Type = nullptr; |
487 | Entries[i].Type = nullptr; |
488 | Entries[i - 1].End = Entries[i].Begin; |
489 | hasOpaqueEntries = true; |
490 | |
491 | } else if (Entries[i].Type == nullptr) { |
492 | hasOpaqueEntries = true; |
493 | } |
494 | } |
495 | |
496 | // The rest of the algorithm leaves non-opaque entries alone, so if we |
497 | // have no opaque entries, we're done. |
498 | if (!hasOpaqueEntries) { |
499 | Finished = true; |
500 | return; |
501 | } |
502 | |
503 | // Okay, move the entries to a temporary and rebuild Entries. |
504 | auto orig = std::move(Entries); |
505 | assert(Entries.empty()); |
506 | |
507 | for (size_t i = 0, e = orig.size(); i != e; ++i) { |
508 | // Just copy over non-opaque entries. |
509 | if (orig[i].Type != nullptr) { |
510 | Entries.push_back(Elt: orig[i]); |
511 | continue; |
512 | } |
513 | |
514 | // Scan forward to determine the full extent of the next opaque range. |
515 | // We know from the first pass that only contiguous ranges will overlap |
516 | // the same aligned chunk. |
517 | auto begin = orig[i].Begin; |
518 | auto end = orig[i].End; |
519 | while (i + 1 != e && |
520 | orig[i + 1].Type == nullptr && |
521 | end == orig[i + 1].Begin) { |
522 | end = orig[i + 1].End; |
523 | i++; |
524 | } |
525 | |
526 | // Add an entry per intersected chunk. |
527 | do { |
528 | // Find the smallest aligned storage unit in the maximal aligned |
529 | // storage unit containing 'begin' that contains all the bytes in |
530 | // the intersection between the range and this chunk. |
531 | CharUnits localBegin = begin; |
532 | CharUnits chunkBegin = getOffsetAtStartOfUnit(offset: localBegin, unitSize: chunkSize); |
533 | CharUnits chunkEnd = chunkBegin + chunkSize; |
534 | CharUnits localEnd = std::min(a: end, b: chunkEnd); |
535 | |
536 | // Just do a simple loop over ever-increasing unit sizes. |
537 | CharUnits unitSize = CharUnits::One(); |
538 | CharUnits unitBegin, unitEnd; |
539 | for (; ; unitSize *= 2) { |
540 | assert(unitSize <= chunkSize); |
541 | unitBegin = getOffsetAtStartOfUnit(offset: localBegin, unitSize); |
542 | unitEnd = unitBegin + unitSize; |
543 | if (unitEnd >= localEnd) break; |
544 | } |
545 | |
546 | // Add an entry for this unit. |
547 | auto entryTy = |
548 | llvm::IntegerType::get(C&: CGM.getLLVMContext(), |
549 | NumBits: CGM.getContext().toBits(CharSize: unitSize)); |
550 | Entries.push_back(Elt: {.Begin: unitBegin, .End: unitEnd, .Type: entryTy}); |
551 | |
552 | // The next chunk starts where this chunk left off. |
553 | begin = localEnd; |
554 | } while (begin != end); |
555 | } |
556 | |
557 | // Okay, finally finished. |
558 | Finished = true; |
559 | } |
560 | |
561 | void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const { |
562 | assert(Finished && "haven't yet finished lowering" ); |
563 | |
564 | for (auto &entry : Entries) { |
565 | callback(entry.Begin, entry.End, entry.Type); |
566 | } |
567 | } |
568 | |
569 | std::pair<llvm::StructType*, llvm::Type*> |
570 | SwiftAggLowering::getCoerceAndExpandTypes() const { |
571 | assert(Finished && "haven't yet finished lowering" ); |
572 | |
573 | auto &ctx = CGM.getLLVMContext(); |
574 | |
575 | if (Entries.empty()) { |
576 | auto type = llvm::StructType::get(Context&: ctx); |
577 | return { type, type }; |
578 | } |
579 | |
580 | SmallVector<llvm::Type*, 8> elts; |
581 | CharUnits lastEnd = CharUnits::Zero(); |
582 | bool hasPadding = false; |
583 | bool packed = false; |
584 | for (auto &entry : Entries) { |
585 | if (entry.Begin != lastEnd) { |
586 | auto paddingSize = entry.Begin - lastEnd; |
587 | assert(!paddingSize.isNegative()); |
588 | |
589 | auto padding = llvm::ArrayType::get(ElementType: llvm::Type::getInt8Ty(C&: ctx), |
590 | NumElements: paddingSize.getQuantity()); |
591 | elts.push_back(Elt: padding); |
592 | hasPadding = true; |
593 | } |
594 | |
595 | if (!packed && !entry.Begin.isMultipleOf(N: CharUnits::fromQuantity( |
596 | Quantity: CGM.getDataLayout().getABITypeAlign(Ty: entry.Type)))) |
597 | packed = true; |
598 | |
599 | elts.push_back(Elt: entry.Type); |
600 | |
601 | lastEnd = entry.Begin + getTypeAllocSize(CGM, type: entry.Type); |
602 | assert(entry.End <= lastEnd); |
603 | } |
604 | |
605 | // We don't need to adjust 'packed' to deal with possible tail padding |
606 | // because we never do that kind of access through the coercion type. |
607 | auto coercionType = llvm::StructType::get(Context&: ctx, Elements: elts, isPacked: packed); |
608 | |
609 | llvm::Type *unpaddedType = coercionType; |
610 | if (hasPadding) { |
611 | elts.clear(); |
612 | for (auto &entry : Entries) { |
613 | elts.push_back(Elt: entry.Type); |
614 | } |
615 | if (elts.size() == 1) { |
616 | unpaddedType = elts[0]; |
617 | } else { |
618 | unpaddedType = llvm::StructType::get(Context&: ctx, Elements: elts, /*packed*/ isPacked: false); |
619 | } |
620 | } else if (Entries.size() == 1) { |
621 | unpaddedType = Entries[0].Type; |
622 | } |
623 | |
624 | return { coercionType, unpaddedType }; |
625 | } |
626 | |
627 | bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const { |
628 | assert(Finished && "haven't yet finished lowering" ); |
629 | |
630 | // Empty types don't need to be passed indirectly. |
631 | if (Entries.empty()) return false; |
632 | |
633 | // Avoid copying the array of types when there's just a single element. |
634 | if (Entries.size() == 1) { |
635 | return getSwiftABIInfo(CGM).shouldPassIndirectly(ComponentTys: Entries.back().Type, |
636 | AsReturnValue: asReturnValue); |
637 | } |
638 | |
639 | SmallVector<llvm::Type*, 8> componentTys; |
640 | componentTys.reserve(N: Entries.size()); |
641 | for (auto &entry : Entries) { |
642 | componentTys.push_back(Elt: entry.Type); |
643 | } |
644 | return getSwiftABIInfo(CGM).shouldPassIndirectly(ComponentTys: componentTys, AsReturnValue: asReturnValue); |
645 | } |
646 | |
647 | bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM, |
648 | ArrayRef<llvm::Type*> componentTys, |
649 | bool asReturnValue) { |
650 | return getSwiftABIInfo(CGM).shouldPassIndirectly(ComponentTys: componentTys, AsReturnValue: asReturnValue); |
651 | } |
652 | |
653 | CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) { |
654 | // Currently always the size of an ordinary pointer. |
655 | return CGM.getContext().toCharUnitsFromBits( |
656 | BitSize: CGM.getContext().getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default)); |
657 | } |
658 | |
659 | CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) { |
660 | // For Swift's purposes, this is always just the store size of the type |
661 | // rounded up to a power of 2. |
662 | auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity(); |
663 | size = llvm::bit_ceil(Value: size); |
664 | assert(CGM.getDataLayout().getABITypeAlign(type) <= size); |
665 | return CharUnits::fromQuantity(Quantity: size); |
666 | } |
667 | |
668 | bool swiftcall::isLegalIntegerType(CodeGenModule &CGM, |
669 | llvm::IntegerType *intTy) { |
670 | auto size = intTy->getBitWidth(); |
671 | switch (size) { |
672 | case 1: |
673 | case 8: |
674 | case 16: |
675 | case 32: |
676 | case 64: |
677 | // Just assume that the above are always legal. |
678 | return true; |
679 | |
680 | case 128: |
681 | return CGM.getContext().getTargetInfo().hasInt128Type(); |
682 | |
683 | default: |
684 | return false; |
685 | } |
686 | } |
687 | |
688 | bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, |
689 | llvm::VectorType *vectorTy) { |
690 | return isLegalVectorType( |
691 | CGM, vectorSize, eltTy: vectorTy->getElementType(), |
692 | numElts: cast<llvm::FixedVectorType>(Val: vectorTy)->getNumElements()); |
693 | } |
694 | |
695 | bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, |
696 | llvm::Type *eltTy, unsigned numElts) { |
697 | assert(numElts > 1 && "illegal vector length" ); |
698 | return getSwiftABIInfo(CGM).isLegalVectorType(VectorSize: vectorSize, EltTy: eltTy, NumElts: numElts); |
699 | } |
700 | |
701 | std::pair<llvm::Type*, unsigned> |
702 | swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, |
703 | llvm::VectorType *vectorTy) { |
704 | auto numElts = cast<llvm::FixedVectorType>(Val: vectorTy)->getNumElements(); |
705 | auto eltTy = vectorTy->getElementType(); |
706 | |
707 | // Try to split the vector type in half. |
708 | if (numElts >= 4 && isPowerOf2(n: numElts)) { |
709 | if (isLegalVectorType(CGM, vectorSize: vectorSize / 2, eltTy, numElts: numElts / 2)) |
710 | return {llvm::FixedVectorType::get(ElementType: eltTy, NumElts: numElts / 2), 2}; |
711 | } |
712 | |
713 | return {eltTy, numElts}; |
714 | } |
715 | |
716 | void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize, |
717 | llvm::VectorType *origVectorTy, |
718 | llvm::SmallVectorImpl<llvm::Type*> &components) { |
719 | // If it's already a legal vector type, use it. |
720 | if (isLegalVectorType(CGM, vectorSize: origVectorSize, vectorTy: origVectorTy)) { |
721 | components.push_back(Elt: origVectorTy); |
722 | return; |
723 | } |
724 | |
725 | // Try to split the vector into legal subvectors. |
726 | auto numElts = cast<llvm::FixedVectorType>(Val: origVectorTy)->getNumElements(); |
727 | auto eltTy = origVectorTy->getElementType(); |
728 | assert(numElts != 1); |
729 | |
730 | // The largest size that we're still considering making subvectors of. |
731 | // Always a power of 2. |
732 | unsigned logCandidateNumElts = llvm::Log2_32(Value: numElts); |
733 | unsigned candidateNumElts = 1U << logCandidateNumElts; |
734 | assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts); |
735 | |
736 | // Minor optimization: don't check the legality of this exact size twice. |
737 | if (candidateNumElts == numElts) { |
738 | logCandidateNumElts--; |
739 | candidateNumElts >>= 1; |
740 | } |
741 | |
742 | CharUnits eltSize = (origVectorSize / numElts); |
743 | CharUnits candidateSize = eltSize * candidateNumElts; |
744 | |
745 | // The sensibility of this algorithm relies on the fact that we never |
746 | // have a legal non-power-of-2 vector size without having the power of 2 |
747 | // also be legal. |
748 | while (logCandidateNumElts > 0) { |
749 | assert(candidateNumElts == 1U << logCandidateNumElts); |
750 | assert(candidateNumElts <= numElts); |
751 | assert(candidateSize == eltSize * candidateNumElts); |
752 | |
753 | // Skip illegal vector sizes. |
754 | if (!isLegalVectorType(CGM, vectorSize: candidateSize, eltTy, numElts: candidateNumElts)) { |
755 | logCandidateNumElts--; |
756 | candidateNumElts /= 2; |
757 | candidateSize /= 2; |
758 | continue; |
759 | } |
760 | |
761 | // Add the right number of vectors of this size. |
762 | auto numVecs = numElts >> logCandidateNumElts; |
763 | components.append(NumInputs: numVecs, |
764 | Elt: llvm::FixedVectorType::get(ElementType: eltTy, NumElts: candidateNumElts)); |
765 | numElts -= (numVecs << logCandidateNumElts); |
766 | |
767 | if (numElts == 0) return; |
768 | |
769 | // It's possible that the number of elements remaining will be legal. |
770 | // This can happen with e.g. <7 x float> when <3 x float> is legal. |
771 | // This only needs to be separately checked if it's not a power of 2. |
772 | if (numElts > 2 && !isPowerOf2(n: numElts) && |
773 | isLegalVectorType(CGM, vectorSize: eltSize * numElts, eltTy, numElts)) { |
774 | components.push_back(Elt: llvm::FixedVectorType::get(ElementType: eltTy, NumElts: numElts)); |
775 | return; |
776 | } |
777 | |
778 | // Bring vecSize down to something no larger than numElts. |
779 | do { |
780 | logCandidateNumElts--; |
781 | candidateNumElts /= 2; |
782 | candidateSize /= 2; |
783 | } while (candidateNumElts > numElts); |
784 | } |
785 | |
786 | // Otherwise, just append a bunch of individual elements. |
787 | components.append(NumInputs: numElts, Elt: eltTy); |
788 | } |
789 | |
790 | bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM, |
791 | const RecordDecl *record) { |
792 | // FIXME: should we not rely on the standard computation in Sema, just in |
793 | // case we want to diverge from the platform ABI (e.g. on targets where |
794 | // that uses the MSVC rule)? |
795 | return !record->canPassInRegisters(); |
796 | } |
797 | |
798 | static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering, |
799 | bool forReturn, |
800 | CharUnits alignmentForIndirect) { |
801 | if (lowering.empty()) { |
802 | return ABIArgInfo::getIgnore(); |
803 | } else if (lowering.shouldPassIndirectly(asReturnValue: forReturn)) { |
804 | return ABIArgInfo::getIndirect(Alignment: alignmentForIndirect, /*byval*/ ByVal: false); |
805 | } else { |
806 | auto types = lowering.getCoerceAndExpandTypes(); |
807 | return ABIArgInfo::getCoerceAndExpand(coerceToType: types.first, unpaddedCoerceToType: types.second); |
808 | } |
809 | } |
810 | |
811 | static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, |
812 | bool forReturn) { |
813 | if (auto recordType = dyn_cast<RecordType>(Val&: type)) { |
814 | auto record = recordType->getDecl(); |
815 | auto &layout = CGM.getContext().getASTRecordLayout(D: record); |
816 | |
817 | if (mustPassRecordIndirectly(CGM, record)) |
818 | return ABIArgInfo::getIndirect(Alignment: layout.getAlignment(), /*byval*/ ByVal: false); |
819 | |
820 | SwiftAggLowering lowering(CGM); |
821 | lowering.addTypedData(record: recordType->getDecl(), begin: CharUnits::Zero(), layout); |
822 | lowering.finish(); |
823 | |
824 | return classifyExpandedType(lowering, forReturn, alignmentForIndirect: layout.getAlignment()); |
825 | } |
826 | |
827 | // Just assume that all of our target ABIs can support returning at least |
828 | // two integer or floating-point values. |
829 | if (isa<ComplexType>(Val: type)) { |
830 | return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand()); |
831 | } |
832 | |
833 | // Vector types may need to be legalized. |
834 | if (isa<VectorType>(Val: type)) { |
835 | SwiftAggLowering lowering(CGM); |
836 | lowering.addTypedData(type, begin: CharUnits::Zero()); |
837 | lowering.finish(); |
838 | |
839 | CharUnits alignment = CGM.getContext().getTypeAlignInChars(T: type); |
840 | return classifyExpandedType(lowering, forReturn, alignmentForIndirect: alignment); |
841 | } |
842 | |
843 | // Member pointer types need to be expanded, but it's a simple form of |
844 | // expansion that 'Direct' can handle. Note that CanBeFlattened should be |
845 | // true for this to work. |
846 | |
847 | // 'void' needs to be ignored. |
848 | if (type->isVoidType()) { |
849 | return ABIArgInfo::getIgnore(); |
850 | } |
851 | |
852 | // Everything else can be passed directly. |
853 | return ABIArgInfo::getDirect(); |
854 | } |
855 | |
856 | ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) { |
857 | return classifyType(CGM, type, /*forReturn*/ true); |
858 | } |
859 | |
860 | ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM, |
861 | CanQualType type) { |
862 | return classifyType(CGM, type, /*forReturn*/ false); |
863 | } |
864 | |
865 | void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { |
866 | auto &retInfo = FI.getReturnInfo(); |
867 | retInfo = classifyReturnType(CGM, type: FI.getReturnType()); |
868 | |
869 | for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) { |
870 | auto &argInfo = FI.arg_begin()[i]; |
871 | argInfo.info = classifyArgumentType(CGM, argInfo.type); |
872 | } |
873 | } |
874 | |
875 | // Is swifterror lowered to a register by the target ABI. |
876 | bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) { |
877 | return getSwiftABIInfo(CGM).isSwiftErrorInRegister(); |
878 | } |
879 | |