1#include "CIRGenTypes.h"
2
3#include "CIRGenFunctionInfo.h"
4#include "CIRGenModule.h"
5
6#include "clang/AST/ASTContext.h"
7#include "clang/AST/GlobalDecl.h"
8#include "clang/AST/Type.h"
9#include "clang/Basic/TargetInfo.h"
10
11#include <cassert>
12
13using namespace clang;
14using namespace clang::CIRGen;
15
16CIRGenTypes::CIRGenTypes(CIRGenModule &genModule)
17 : cgm(genModule), astContext(genModule.getASTContext()),
18 builder(cgm.getBuilder()), theCXXABI(cgm.getCXXABI()),
19 theABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) {}
20
21CIRGenTypes::~CIRGenTypes() {
22 for (auto i = functionInfos.begin(), e = functionInfos.end(); i != e;)
23 delete &*i++;
24}
25
26mlir::MLIRContext &CIRGenTypes::getMLIRContext() const {
27 return *builder.getContext();
28}
29
30/// Return true if the specified type in a function parameter or result position
31/// can be converted to a CIR type at this point. This boils down to being
32/// whether it is complete, as well as whether we've temporarily deferred
33/// expanding the type because we're in a recursive context.
34bool CIRGenTypes::isFuncParamTypeConvertible(clang::QualType type) {
35 // Some ABIs cannot have their member pointers represented in LLVM IR unless
36 // certain circumstances have been reached.
37 assert(!type->getAs<MemberPointerType>() && "NYI");
38
39 // If this isn't a tag type, we can convert it.
40 const TagType *tagType = type->getAs<TagType>();
41 if (!tagType)
42 return true;
43
44 // Function types involving incomplete class types are problematic in MLIR.
45 return !tagType->isIncompleteType();
46}
47
48/// Code to verify a given function type is complete, i.e. the return type and
49/// all of the parameter types are complete. Also check to see if we are in a
50/// RS_StructPointer context, and if so whether any struct types have been
51/// pended. If so, we don't want to ask the ABI lowering code to handle a type
52/// that cannot be converted to a CIR type.
53bool CIRGenTypes::isFuncTypeConvertible(const FunctionType *ft) {
54 if (!isFuncParamTypeConvertible(type: ft->getReturnType()))
55 return false;
56
57 if (const auto *fpt = dyn_cast<FunctionProtoType>(Val: ft))
58 for (unsigned i = 0, e = fpt->getNumParams(); i != e; i++)
59 if (!isFuncParamTypeConvertible(type: fpt->getParamType(i)))
60 return false;
61
62 return true;
63}
64
65mlir::Type CIRGenTypes::convertFunctionTypeInternal(QualType qft) {
66 assert(qft.isCanonical());
67 const FunctionType *ft = cast<FunctionType>(Val: qft.getTypePtr());
68 // First, check whether we can build the full function type. If the function
69 // type depends on an incomplete type (e.g. a struct or enum), we cannot lower
70 // the function type.
71 if (!isFuncTypeConvertible(ft)) {
72 cgm.errorNYI(loc: SourceLocation(), feature: "function type involving an incomplete type",
73 name: qft);
74 return cir::FuncType::get(SmallVector<mlir::Type, 1>{}, cgm.VoidTy);
75 }
76
77 const CIRGenFunctionInfo *fi;
78 if (const auto *fpt = dyn_cast<FunctionProtoType>(Val: ft)) {
79 fi = &arrangeFreeFunctionType(
80 fpt: CanQual<FunctionProtoType>::CreateUnsafe(Other: QualType(fpt, 0)));
81 } else {
82 const FunctionNoProtoType *fnpt = cast<FunctionNoProtoType>(Val: ft);
83 fi = &arrangeFreeFunctionType(
84 fnpt: CanQual<FunctionNoProtoType>::CreateUnsafe(Other: QualType(fnpt, 0)));
85 }
86
87 mlir::Type resultType = getFunctionType(*fi);
88
89 return resultType;
90}
91
92// This is CIR's version of CodeGenTypes::addRecordTypeName. It isn't shareable
93// because CIR has different uniquing requirements.
94std::string CIRGenTypes::getRecordTypeName(const clang::RecordDecl *recordDecl,
95 StringRef suffix) {
96 llvm::SmallString<256> typeName;
97 llvm::raw_svector_ostream outStream(typeName);
98
99 PrintingPolicy policy = recordDecl->getASTContext().getPrintingPolicy();
100 policy.SuppressInlineNamespace = false;
101 policy.AlwaysIncludeTypeForTemplateArgument = true;
102 policy.PrintAsCanonical = true;
103 policy.SuppressTagKeyword = true;
104
105 if (recordDecl->getIdentifier())
106 astContext.getRecordType(Decl: recordDecl).print(OS&: outStream, Policy: policy);
107 else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl())
108 typedefNameDecl->printQualifiedName(outStream, policy);
109 else
110 outStream << builder.getUniqueAnonRecordName();
111
112 if (!suffix.empty())
113 outStream << suffix;
114
115 return builder.getUniqueRecordName(baseName: std::string(typeName));
116}
117
118/// Return true if the specified type is already completely laid out.
119bool CIRGenTypes::isRecordLayoutComplete(const Type *ty) const {
120 const auto it = recordDeclTypes.find(ty);
121 return it != recordDeclTypes.end() && it->second.isComplete();
122}
123
124// We have multiple forms of this function that call each other, so we need to
125// declare one in advance.
126static bool
127isSafeToConvert(QualType qt, CIRGenTypes &cgt,
128 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked);
129
130/// Return true if it is safe to convert the specified record decl to CIR and
131/// lay it out, false if doing so would cause us to get into a recursive
132/// compilation mess.
133static bool
134isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt,
135 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked) {
136 // If we have already checked this type (maybe the same type is used by-value
137 // multiple times in multiple record fields, don't check again.
138 if (!alreadyChecked.insert(Ptr: rd).second)
139 return true;
140
141 const Type *key = cgt.getASTContext().getTagDeclType(rd).getTypePtr();
142
143 // If this type is already laid out, converting it is a noop.
144 if (cgt.isRecordLayoutComplete(ty: key))
145 return true;
146
147 // If this type is currently being laid out, we can't recursively compile it.
148 if (cgt.isRecordBeingLaidOut(ty: key))
149 return false;
150
151 // If this type would require laying out bases that are currently being laid
152 // out, don't do it. This includes virtual base classes which get laid out
153 // when a class is translated, even though they aren't embedded by-value into
154 // the class.
155 if (auto *crd = dyn_cast<CXXRecordDecl>(Val: rd)) {
156 if (crd->getNumBases() > 0) {
157 assert(!cir::MissingFeatures::cxxSupport());
158 cgt.getCGModule().errorNYI(rd->getSourceRange(),
159 "isSafeToConvert: CXXRecordDecl with bases");
160 return false;
161 }
162 }
163
164 // If this type would require laying out members that are currently being laid
165 // out, don't do it.
166 for (const FieldDecl *field : rd->fields())
167 if (!isSafeToConvert(field->getType(), cgt, alreadyChecked))
168 return false;
169
170 // If there are no problems, lets do it.
171 return true;
172}
173
174/// Return true if it is safe to convert this field type, which requires the
175/// record elements contained by-value to all be recursively safe to convert.
176static bool
177isSafeToConvert(QualType qt, CIRGenTypes &cgt,
178 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked) {
179 // Strip off atomic type sugar.
180 if (const auto *at = qt->getAs<AtomicType>())
181 qt = at->getValueType();
182
183 // If this is a record, check it.
184 if (const auto *rt = qt->getAs<RecordType>())
185 return isSafeToConvert(rd: rt->getDecl(), cgt, alreadyChecked);
186
187 // If this is an array, check the elements, which are embedded inline.
188 if (const auto *at = cgt.getASTContext().getAsArrayType(T: qt))
189 return isSafeToConvert(qt: at->getElementType(), cgt, alreadyChecked);
190
191 // Otherwise, there is no concern about transforming this. We only care about
192 // things that are contained by-value in a record that can have another
193 // record as a member.
194 return true;
195}
196
197// Return true if it is safe to convert the specified record decl to CIR and lay
198// it out, false if doing so would cause us to get into a recursive compilation
199// mess.
200static bool isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt) {
201 // If no records are being laid out, we can certainly do this one.
202 if (cgt.noRecordsBeingLaidOut())
203 return true;
204
205 llvm::SmallPtrSet<const RecordDecl *, 16> alreadyChecked;
206 return isSafeToConvert(rd, cgt, alreadyChecked);
207}
208
209/// Lay out a tagged decl type like struct or union.
210mlir::Type CIRGenTypes::convertRecordDeclType(const clang::RecordDecl *rd) {
211 // TagDecl's are not necessarily unique, instead use the (clang) type
212 // connected to the decl.
213 const Type *key = astContext.getTagDeclType(rd).getTypePtr();
214 cir::RecordType entry = recordDeclTypes[key];
215
216 // If we don't have an entry for this record yet, create one.
217 // We create an incomplete type initially. If `rd` is complete, we will
218 // add the members below.
219 if (!entry) {
220 auto name = getRecordTypeName(recordDecl: rd, suffix: "");
221 entry = builder.getIncompleteRecordTy(name, rd);
222 recordDeclTypes[key] = entry;
223 }
224
225 rd = rd->getDefinition();
226 if (!rd || !rd->isCompleteDefinition() || entry.isComplete())
227 return entry;
228
229 // If converting this type would cause us to infinitely loop, don't do it!
230 if (!isSafeToConvert(rd, cgt&: *this)) {
231 deferredRecords.push_back(Elt: rd);
232 return entry;
233 }
234
235 // Okay, this is a definition of a type. Compile the implementation now.
236 bool insertResult = recordsBeingLaidOut.insert(Ptr: key).second;
237 (void)insertResult;
238 assert(insertResult && "isSafeToCovert() should have caught this.");
239
240 // Force conversion of non-virtual base classes recursively.
241 if (const auto *cxxRecordDecl = dyn_cast<CXXRecordDecl>(Val: rd)) {
242 for (const auto &base : cxxRecordDecl->bases()) {
243 if (base.isVirtual())
244 continue;
245 convertRecordDeclType(base.getType()->castAs<RecordType>()->getDecl());
246 }
247 }
248
249 // Layout fields.
250 std::unique_ptr<CIRGenRecordLayout> layout = computeRecordLayout(rd, &entry);
251 recordDeclTypes[key] = entry;
252 cirGenRecordLayouts[key] = std::move(layout);
253
254 // We're done laying out this record.
255 bool eraseResult = recordsBeingLaidOut.erase(Ptr: key);
256 (void)eraseResult;
257 assert(eraseResult && "record not in RecordsBeingLaidOut set?");
258
259 // If this record blocked a FunctionType conversion, then recompute whatever
260 // was derived from that.
261 assert(!cir::MissingFeatures::skippedLayout());
262
263 // If we're done converting the outer-most record, then convert any deferred
264 // records as well.
265 if (recordsBeingLaidOut.empty())
266 while (!deferredRecords.empty())
267 convertRecordDeclType(deferredRecords.pop_back_val());
268
269 return entry;
270}
271
272mlir::Type CIRGenTypes::convertType(QualType type) {
273 type = astContext.getCanonicalType(T: type);
274 const Type *ty = type.getTypePtr();
275
276 // Process record types before the type cache lookup.
277 if (const auto *recordType = dyn_cast<RecordType>(type))
278 return convertRecordDeclType(recordType->getDecl());
279
280 // Has the type already been processed?
281 TypeCacheTy::iterator tci = typeCache.find(Val: ty);
282 if (tci != typeCache.end())
283 return tci->second;
284
285 // For types that haven't been implemented yet or are otherwise unsupported,
286 // report an error and return 'int'.
287
288 mlir::Type resultType = nullptr;
289 switch (ty->getTypeClass()) {
290 case Type::Record:
291 llvm_unreachable("Should have been handled above");
292
293 case Type::Builtin: {
294 switch (cast<BuiltinType>(Val: ty)->getKind()) {
295 // void
296 case BuiltinType::Void:
297 resultType = cgm.VoidTy;
298 break;
299
300 // bool
301 case BuiltinType::Bool:
302 resultType = cir::BoolType::get(&getMLIRContext());
303 break;
304
305 // Signed integral types.
306 case BuiltinType::Char_S:
307 case BuiltinType::Int:
308 case BuiltinType::Int128:
309 case BuiltinType::Long:
310 case BuiltinType::LongLong:
311 case BuiltinType::SChar:
312 case BuiltinType::Short:
313 case BuiltinType::WChar_S:
314 resultType =
315 cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty),
316 /*isSigned=*/true);
317 break;
318 // Unsigned integral types.
319 case BuiltinType::Char8:
320 case BuiltinType::Char16:
321 case BuiltinType::Char32:
322 case BuiltinType::Char_U:
323 case BuiltinType::UChar:
324 case BuiltinType::UInt:
325 case BuiltinType::UInt128:
326 case BuiltinType::ULong:
327 case BuiltinType::ULongLong:
328 case BuiltinType::UShort:
329 case BuiltinType::WChar_U:
330 resultType =
331 cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty),
332 /*isSigned=*/false);
333 break;
334
335 // Floating-point types
336 case BuiltinType::Float16:
337 resultType = cgm.FP16Ty;
338 break;
339 case BuiltinType::Half:
340 if (astContext.getLangOpts().NativeHalfType ||
341 !astContext.getTargetInfo().useFP16ConversionIntrinsics()) {
342 resultType = cgm.FP16Ty;
343 } else {
344 cgm.errorNYI(loc: SourceLocation(), feature: "processing of built-in type", name: type);
345 resultType = cgm.SInt32Ty;
346 }
347 break;
348 case BuiltinType::BFloat16:
349 resultType = cgm.BFloat16Ty;
350 break;
351 case BuiltinType::Float:
352 assert(&astContext.getFloatTypeSemantics(type) ==
353 &llvm::APFloat::IEEEsingle() &&
354 "ClangIR NYI: 'float' in a format other than IEEE 32-bit");
355 resultType = cgm.FloatTy;
356 break;
357 case BuiltinType::Double:
358 assert(&astContext.getFloatTypeSemantics(type) ==
359 &llvm::APFloat::IEEEdouble() &&
360 "ClangIR NYI: 'double' in a format other than IEEE 64-bit");
361 resultType = cgm.DoubleTy;
362 break;
363 case BuiltinType::LongDouble:
364 resultType =
365 builder.getLongDoubleTy(astContext.getFloatTypeSemantics(T: type));
366 break;
367 case BuiltinType::Float128:
368 resultType = cgm.FP128Ty;
369 break;
370 case BuiltinType::Ibm128:
371 cgm.errorNYI(loc: SourceLocation(), feature: "processing of built-in type", name: type);
372 resultType = cgm.SInt32Ty;
373 break;
374
375 case BuiltinType::NullPtr:
376 // Add proper CIR type for it? this looks mostly useful for sema related
377 // things (like for overloads accepting void), for now, given that
378 // `sizeof(std::nullptr_t)` is equal to `sizeof(void *)`, model
379 // std::nullptr_t as !cir.ptr<!void>
380 resultType = builder.getVoidPtrTy();
381 break;
382
383 default:
384 cgm.errorNYI(loc: SourceLocation(), feature: "processing of built-in type", name: type);
385 resultType = cgm.SInt32Ty;
386 break;
387 }
388 break;
389 }
390
391 case Type::Complex: {
392 const auto *ct = cast<clang::ComplexType>(Val: ty);
393 mlir::Type elementTy = convertType(ct->getElementType());
394 resultType = cir::ComplexType::get(elementTy);
395 break;
396 }
397
398 case Type::LValueReference:
399 case Type::RValueReference: {
400 const ReferenceType *refTy = cast<ReferenceType>(Val: ty);
401 QualType elemTy = refTy->getPointeeType();
402 auto pointeeType = convertTypeForMem(elemTy);
403 resultType = builder.getPointerTo(pointeeType);
404 assert(resultType && "Cannot get pointer type?");
405 break;
406 }
407
408 case Type::Pointer: {
409 const PointerType *ptrTy = cast<PointerType>(Val: ty);
410 QualType elemTy = ptrTy->getPointeeType();
411 assert(!elemTy->isConstantMatrixType() && "not implemented");
412
413 mlir::Type pointeeType = convertType(elemTy);
414
415 resultType = builder.getPointerTo(pointeeType);
416 break;
417 }
418
419 case Type::ConstantArray: {
420 const ConstantArrayType *arrTy = cast<ConstantArrayType>(Val: ty);
421 mlir::Type elemTy = convertTypeForMem(arrTy->getElementType());
422 resultType = cir::ArrayType::get(elemTy, arrTy->getSize().getZExtValue());
423 break;
424 }
425
426 case Type::ExtVector:
427 case Type::Vector: {
428 const VectorType *vec = cast<VectorType>(Val: ty);
429 const mlir::Type elemTy = convertType(vec->getElementType());
430 resultType = cir::VectorType::get(elemTy, vec->getNumElements());
431 break;
432 }
433
434 case Type::Enum: {
435 const EnumDecl *ED = cast<EnumType>(Val: ty)->getDecl();
436 if (auto integerType = ED->getIntegerType(); !integerType.isNull())
437 return convertType(integerType);
438 // Return a placeholder 'i32' type. This can be changed later when the
439 // type is defined (see UpdateCompletedType), but is likely to be the
440 // "right" answer.
441 resultType = cgm.UInt32Ty;
442 break;
443 }
444
445 case Type::FunctionNoProto:
446 case Type::FunctionProto:
447 resultType = convertFunctionTypeInternal(type);
448 break;
449
450 case Type::BitInt: {
451 const auto *bitIntTy = cast<BitIntType>(Val&: type);
452 if (bitIntTy->getNumBits() > cir::IntType::maxBitwidth()) {
453 cgm.errorNYI(loc: SourceLocation(), feature: "large _BitInt type", name: type);
454 resultType = cgm.SInt32Ty;
455 } else {
456 resultType = cir::IntType::get(&getMLIRContext(), bitIntTy->getNumBits(),
457 bitIntTy->isSigned());
458 }
459 break;
460 }
461
462 default:
463 cgm.errorNYI(loc: SourceLocation(), feature: "processing of type",
464 name: type->getTypeClassName());
465 resultType = cgm.SInt32Ty;
466 break;
467 }
468
469 assert(resultType && "Type conversion not yet implemented");
470
471 typeCache[ty] = resultType;
472 return resultType;
473}
474
475mlir::Type CIRGenTypes::convertTypeForMem(clang::QualType qualType,
476 bool forBitField) {
477 assert(!qualType->isConstantMatrixType() && "Matrix types NYI");
478
479 mlir::Type convertedType = convertType(qualType);
480
481 assert(!forBitField && "Bit fields NYI");
482
483 // If this is a bit-precise integer type in a bitfield representation, map
484 // this integer to the target-specified size.
485 if (forBitField && qualType->isBitIntType())
486 assert(!qualType->isBitIntType() && "Bit field with type _BitInt NYI");
487
488 return convertedType;
489}
490
491/// Return record layout info for the given record decl.
492const CIRGenRecordLayout &
493CIRGenTypes::getCIRGenRecordLayout(const RecordDecl *rd) {
494 const auto *key = astContext.getTagDeclType(rd).getTypePtr();
495
496 // If we have already computed the layout, return it.
497 auto it = cirGenRecordLayouts.find(key);
498 if (it != cirGenRecordLayouts.end())
499 return *it->second;
500
501 // Compute the type information.
502 convertRecordDeclType(rd);
503
504 // Now try again.
505 it = cirGenRecordLayouts.find(key);
506
507 assert(it != cirGenRecordLayouts.end() &&
508 "Unable to find record layout information for type");
509 return *it->second;
510}
511
512bool CIRGenTypes::isZeroInitializable(clang::QualType t) {
513 if (t->getAs<PointerType>())
514 return astContext.getTargetNullPointerValue(QT: t) == 0;
515
516 if (const auto *at = astContext.getAsArrayType(T: t)) {
517 if (isa<IncompleteArrayType>(Val: at))
518 return true;
519
520 if (const auto *cat = dyn_cast<ConstantArrayType>(Val: at))
521 if (astContext.getConstantArrayElementCount(CA: cat) == 0)
522 return true;
523 }
524
525 if (const RecordType *rt = t->getAs<RecordType>()) {
526 const RecordDecl *rd = rt->getDecl();
527 return isZeroInitializable(rd);
528 }
529
530 if (t->getAs<MemberPointerType>()) {
531 cgm.errorNYI(loc: SourceLocation(), feature: "isZeroInitializable for MemberPointerType",
532 name: t);
533 return false;
534 }
535
536 return true;
537}
538
539bool CIRGenTypes::isZeroInitializable(const RecordDecl *rd) {
540 return getCIRGenRecordLayout(rd).isZeroInitializable();
541}
542
543const CIRGenFunctionInfo &
544CIRGenTypes::arrangeCIRFunctionInfo(CanQualType returnType,
545 llvm::ArrayRef<CanQualType> argTypes,
546 RequiredArgs required) {
547 assert(llvm::all_of(argTypes,
548 [](CanQualType t) { return t.isCanonicalAsParam(); }));
549 // Lookup or create unique function info.
550 llvm::FoldingSetNodeID id;
551 CIRGenFunctionInfo::Profile(id, required, resultType: returnType, argTypes);
552
553 void *insertPos = nullptr;
554 CIRGenFunctionInfo *fi = functionInfos.FindNodeOrInsertPos(ID: id, InsertPos&: insertPos);
555 if (fi) {
556 // We found a matching function info based on id. These asserts verify that
557 // it really is a match.
558 assert(
559 fi->getReturnType() == returnType &&
560 std::equal(fi->argTypesBegin(), fi->argTypesEnd(), argTypes.begin()) &&
561 "Bad match based on CIRGenFunctionInfo folding set id");
562 return *fi;
563 }
564
565 assert(!cir::MissingFeatures::opCallCallConv());
566
567 // Construction the function info. We co-allocate the ArgInfos.
568 fi = CIRGenFunctionInfo::create(resultType: returnType, argTypes, required);
569 functionInfos.InsertNode(N: fi, InsertPos: insertPos);
570
571 return *fi;
572}
573
574const CIRGenFunctionInfo &CIRGenTypes::arrangeGlobalDeclaration(GlobalDecl gd) {
575 assert(!dyn_cast<ObjCMethodDecl>(gd.getDecl()) &&
576 "This is reported as a FIXME in LLVM codegen");
577 const auto *fd = cast<FunctionDecl>(Val: gd.getDecl());
578
579 if (isa<CXXConstructorDecl>(Val: gd.getDecl()) ||
580 isa<CXXDestructorDecl>(Val: gd.getDecl())) {
581 cgm.errorNYI(SourceLocation(),
582 "arrangeGlobalDeclaration for C++ constructor or destructor");
583 }
584
585 return arrangeFunctionDeclaration(fd);
586}
587
588// When we find the full definition for a TagDecl, replace the 'opaque' type we
589// previously made for it if applicable.
590void CIRGenTypes::updateCompletedType(const TagDecl *td) {
591 // If this is an enum being completed, then we flush all non-struct types
592 // from the cache. This allows function types and other things that may be
593 // derived from the enum to be recomputed.
594 if (const auto *ed = dyn_cast<EnumDecl>(Val: td)) {
595 // Classic codegen clears the type cache if it contains an entry for this
596 // enum type that doesn't use i32 as the underlying type, but I can't find
597 // a test case that meets that condition. C++ doesn't allow forward
598 // declaration of enums, and C doesn't allow an incomplete forward
599 // declaration with a non-default type.
600 assert(
601 !typeCache.count(ed->getTypeForDecl()) ||
602 (convertType(ed->getIntegerType()) == typeCache[ed->getTypeForDecl()]));
603 // If necessary, provide the full definition of a type only used with a
604 // declaration so far.
605 assert(!cir::MissingFeatures::generateDebugInfo());
606 return;
607 }
608
609 // If we completed a RecordDecl that we previously used and converted to an
610 // anonymous type, then go ahead and complete it now.
611 const auto *rd = cast<RecordDecl>(Val: td);
612 if (rd->isDependentType())
613 return;
614
615 // Only complete if we converted it already. If we haven't converted it yet,
616 // we'll just do it lazily.
617 if (recordDeclTypes.count(astContext.getTagDeclType(rd).getTypePtr()))
618 convertRecordDeclType(rd);
619
620 // If necessary, provide the full definition of a type only used with a
621 // declaration so far.
622 assert(!cir::MissingFeatures::generateDebugInfo());
623}
624

source code of clang/lib/CIR/CodeGen/CIRGenTypes.cpp