1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "Address.h"
14#include "CIRGenConstantEmitter.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/CharUnits.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/Expr.h"
24#include "clang/AST/ExprCXX.h"
25#include "clang/CIR/Dialect/IR/CIRDialect.h"
26#include "clang/CIR/MissingFeatures.h"
27#include <optional>
28
29using namespace clang;
30using namespace clang::CIRGen;
31using namespace cir;
32
33/// Get the address of a zero-sized field within a record. The resulting address
34/// doesn't necessarily have the right type.
35Address CIRGenFunction::emitAddrOfFieldStorage(Address base,
36 const FieldDecl *field,
37 llvm::StringRef fieldName,
38 unsigned fieldIndex) {
39 if (field->isZeroSize(Ctx: getContext())) {
40 cgm.errorNYI(field->getSourceRange(),
41 "emitAddrOfFieldStorage: zero-sized field");
42 return Address::invalid();
43 }
44
45 mlir::Location loc = getLoc(field->getLocation());
46
47 mlir::Type fieldType = convertType(field->getType());
48 auto fieldPtr = cir::PointerType::get(fieldType);
49 // For most cases fieldName is the same as field->getName() but for lambdas,
50 // which do not currently carry the name, so it can be passed down from the
51 // CaptureStmt.
52 cir::GetMemberOp memberAddr = builder.createGetMember(
53 loc, fieldPtr, base.getPointer(), fieldName, fieldIndex);
54
55 // Retrieve layout information, compute alignment and return the final
56 // address.
57 const RecordDecl *rec = field->getParent();
58 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rd: rec);
59 unsigned idx = layout.getCIRFieldNo(fd: field);
60 CharUnits offset = CharUnits::fromQuantity(
61 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx));
62 return Address(memberAddr, base.getAlignment().alignmentAtOffset(offset));
63}
64
65/// Given an expression of pointer type, try to
66/// derive a more accurate bound on the alignment of the pointer.
67Address CIRGenFunction::emitPointerWithAlignment(const Expr *expr,
68 LValueBaseInfo *baseInfo) {
69 // We allow this with ObjC object pointers because of fragile ABIs.
70 assert(expr->getType()->isPointerType() ||
71 expr->getType()->isObjCObjectPointerType());
72 expr = expr->IgnoreParens();
73
74 // Casts:
75 if (auto const *ce = dyn_cast<CastExpr>(Val: expr)) {
76 if (isa<ExplicitCastExpr>(Val: ce)) {
77 cgm.errorNYI(expr->getSourceRange(),
78 "emitPointerWithAlignment: explicit cast");
79 return Address::invalid();
80 }
81
82 switch (ce->getCastKind()) {
83 // Non-converting casts (but not C's implicit conversion from void*).
84 case CK_BitCast:
85 case CK_NoOp:
86 case CK_AddressSpaceConversion: {
87 cgm.errorNYI(expr->getSourceRange(),
88 "emitPointerWithAlignment: noop cast");
89 return Address::invalid();
90 } break;
91
92 // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
93 case CK_ArrayToPointerDecay: {
94 cgm.errorNYI(expr->getSourceRange(),
95 "emitPointerWithAlignment: array-to-pointer decay");
96 return Address::invalid();
97 }
98
99 case CK_UncheckedDerivedToBase:
100 case CK_DerivedToBase: {
101 assert(!cir::MissingFeatures::opTBAA());
102 assert(!cir::MissingFeatures::addressIsKnownNonNull());
103 Address addr = emitPointerWithAlignment(expr: ce->getSubExpr(), baseInfo);
104 const CXXRecordDecl *derived =
105 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl();
106 return getAddressOfBaseClass(value: addr, derived, path: ce->path(),
107 nullCheckValue: shouldNullCheckClassCastValue(ce),
108 loc: ce->getExprLoc());
109 }
110
111 case CK_AnyPointerToBlockPointerCast:
112 case CK_BaseToDerived:
113 case CK_BaseToDerivedMemberPointer:
114 case CK_BlockPointerToObjCPointerCast:
115 case CK_BuiltinFnToFnPtr:
116 case CK_CPointerToObjCPointerCast:
117 case CK_DerivedToBaseMemberPointer:
118 case CK_Dynamic:
119 case CK_FunctionToPointerDecay:
120 case CK_IntegralToPointer:
121 case CK_LValueToRValue:
122 case CK_LValueToRValueBitCast:
123 case CK_NullToMemberPointer:
124 case CK_NullToPointer:
125 case CK_ReinterpretMemberPointer:
126 // Common pointer conversions, nothing to do here.
127 // TODO: Is there any reason to treat base-to-derived conversions
128 // specially?
129 break;
130
131 case CK_ARCConsumeObject:
132 case CK_ARCExtendBlockObject:
133 case CK_ARCProduceObject:
134 case CK_ARCReclaimReturnedObject:
135 case CK_AtomicToNonAtomic:
136 case CK_BooleanToSignedIntegral:
137 case CK_ConstructorConversion:
138 case CK_CopyAndAutoreleaseBlockObject:
139 case CK_Dependent:
140 case CK_FixedPointCast:
141 case CK_FixedPointToBoolean:
142 case CK_FixedPointToFloating:
143 case CK_FixedPointToIntegral:
144 case CK_FloatingCast:
145 case CK_FloatingComplexCast:
146 case CK_FloatingComplexToBoolean:
147 case CK_FloatingComplexToIntegralComplex:
148 case CK_FloatingComplexToReal:
149 case CK_FloatingRealToComplex:
150 case CK_FloatingToBoolean:
151 case CK_FloatingToFixedPoint:
152 case CK_FloatingToIntegral:
153 case CK_HLSLAggregateSplatCast:
154 case CK_HLSLArrayRValue:
155 case CK_HLSLElementwiseCast:
156 case CK_HLSLVectorTruncation:
157 case CK_IntToOCLSampler:
158 case CK_IntegralCast:
159 case CK_IntegralComplexCast:
160 case CK_IntegralComplexToBoolean:
161 case CK_IntegralComplexToFloatingComplex:
162 case CK_IntegralComplexToReal:
163 case CK_IntegralRealToComplex:
164 case CK_IntegralToBoolean:
165 case CK_IntegralToFixedPoint:
166 case CK_IntegralToFloating:
167 case CK_LValueBitCast:
168 case CK_MatrixCast:
169 case CK_MemberPointerToBoolean:
170 case CK_NonAtomicToAtomic:
171 case CK_ObjCObjectLValueCast:
172 case CK_PointerToBoolean:
173 case CK_PointerToIntegral:
174 case CK_ToUnion:
175 case CK_ToVoid:
176 case CK_UserDefinedConversion:
177 case CK_VectorSplat:
178 case CK_ZeroToOCLOpaqueType:
179 llvm_unreachable("unexpected cast for emitPointerWithAlignment");
180 }
181 }
182
183 // Unary &
184 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(Val: expr)) {
185 // TODO(cir): maybe we should use cir.unary for pointers here instead.
186 if (uo->getOpcode() == UO_AddrOf) {
187 cgm.errorNYI(expr->getSourceRange(), "emitPointerWithAlignment: unary &");
188 return Address::invalid();
189 }
190 }
191
192 // std::addressof and variants.
193 if (auto const *call = dyn_cast<CallExpr>(Val: expr)) {
194 switch (call->getBuiltinCallee()) {
195 default:
196 break;
197 case Builtin::BIaddressof:
198 case Builtin::BI__addressof:
199 case Builtin::BI__builtin_addressof: {
200 cgm.errorNYI(expr->getSourceRange(),
201 "emitPointerWithAlignment: builtin addressof");
202 return Address::invalid();
203 }
204 }
205 }
206
207 // Otherwise, use the alignment of the type.
208 return makeNaturalAddressForPointer(
209 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(),
210 /*forPointeeType=*/true, baseInfo);
211}
212
213void CIRGenFunction::emitStoreThroughLValue(RValue src, LValue dst,
214 bool isInit) {
215 if (!dst.isSimple()) {
216 if (dst.isVectorElt()) {
217 // Read/modify/write the vector, inserting the new element
218 const mlir::Location loc = dst.getVectorPointer().getLoc();
219 const mlir::Value vector =
220 builder.createLoad(loc, dst.getVectorAddress());
221 const mlir::Value newVector = builder.create<cir::VecInsertOp>(
222 loc, vector, src.getScalarVal(), dst.getVectorIdx());
223 builder.createStore(loc, newVector, dst.getVectorAddress());
224 return;
225 }
226
227 cgm.errorNYI(dst.getPointer().getLoc(),
228 "emitStoreThroughLValue: non-simple lvalue");
229 return;
230 }
231
232 assert(!cir::MissingFeatures::opLoadStoreObjC());
233
234 assert(src.isScalar() && "Can't emit an aggregate store with this method");
235 emitStoreOfScalar(src.getScalarVal(), dst, isInit);
236}
237
238static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
239 const VarDecl *vd) {
240 QualType t = e->getType();
241
242 // If it's thread_local, emit a call to its wrapper function instead.
243 assert(!cir::MissingFeatures::opGlobalThreadLocal());
244 if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
245 cgf.cgm.errorNYI(e->getSourceRange(),
246 "emitGlobalVarDeclLValue: thread_local variable");
247
248 // Check if the variable is marked as declare target with link clause in
249 // device codegen.
250 if (cgf.getLangOpts().OpenMP)
251 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP");
252
253 // Traditional LLVM codegen handles thread local separately, CIR handles
254 // as part of getAddrOfGlobalVar.
255 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd);
256
257 assert(!cir::MissingFeatures::addressSpace());
258 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType());
259 cir::PointerType realPtrTy = cgf.getBuilder().getPointerTo(realVarTy);
260 if (realPtrTy != v.getType())
261 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy);
262
263 CharUnits alignment = cgf.getContext().getDeclAlign(vd);
264 Address addr(v, realVarTy, alignment);
265 LValue lv;
266 if (vd->getType()->isReferenceType())
267 cgf.cgm.errorNYI(e->getSourceRange(),
268 "emitGlobalVarDeclLValue: reference type");
269 else
270 lv = cgf.makeAddrLValue(addr, ty: t, source: AlignmentSource::Decl);
271 assert(!cir::MissingFeatures::setObjCGCLValueClass());
272 return lv;
273}
274
275void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
276 bool isVolatile, QualType ty,
277 bool isInit, bool isNontemporal) {
278 assert(!cir::MissingFeatures::opLoadStoreThreadLocal());
279
280 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
281 // Boolean vectors use `iN` as storage type.
282 if (clangVecTy->isExtVectorBoolType())
283 cgm.errorNYI(addr.getPointer().getLoc(),
284 "emitStoreOfScalar ExtVectorBoolType");
285
286 // Handle vectors of size 3 like size 4 for better performance.
287 const mlir::Type elementType = addr.getElementType();
288 const auto vecTy = cast<cir::VectorType>(elementType);
289
290 // TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
291 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
292 cgm.errorNYI(addr.getPointer().getLoc(),
293 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
294 }
295
296 value = emitToMemory(value, ty);
297
298 assert(!cir::MissingFeatures::opLoadStoreAtomic());
299
300 // Update the alloca with more info on initialization.
301 assert(addr.getPointer() && "expected pointer to exist");
302 auto srcAlloca =
303 dyn_cast_or_null<cir::AllocaOp>(addr.getPointer().getDefiningOp());
304 if (currVarDecl && srcAlloca) {
305 const VarDecl *vd = currVarDecl;
306 assert(vd && "VarDecl expected");
307 if (vd->hasInit())
308 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
309 }
310
311 assert(currSrcLoc && "must pass in source location");
312 builder.createStore(*currSrcLoc, value, addr /*, isVolatile*/);
313
314 if (isNontemporal) {
315 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
316 return;
317 }
318
319 assert(!cir::MissingFeatures::opTBAA());
320}
321
322mlir::Value CIRGenFunction::emitStoreThroughBitfieldLValue(RValue src,
323 LValue dst) {
324 assert(!cir::MissingFeatures::bitfields());
325 cgm.errorNYI(feature: "bitfields");
326 return {};
327}
328
329LValue CIRGenFunction::emitLValueForField(LValue base, const FieldDecl *field) {
330 LValueBaseInfo baseInfo = base.getBaseInfo();
331
332 if (field->isBitField()) {
333 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: bitfield");
334 return LValue();
335 }
336
337 QualType fieldType = field->getType();
338 const RecordDecl *rec = field->getParent();
339 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource();
340 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(source: baseAlignSource));
341 assert(!cir::MissingFeatures::opTBAA());
342
343 Address addr = base.getAddress();
344 if (auto *classDecl = dyn_cast<CXXRecordDecl>(Val: rec)) {
345 if (cgm.getCodeGenOpts().StrictVTablePointers &&
346 classDecl->isDynamicClass()) {
347 cgm.errorNYI(field->getSourceRange(),
348 "emitLValueForField: strict vtable for dynamic class");
349 }
350 }
351
352 unsigned recordCVR = base.getVRQualifiers();
353
354 llvm::StringRef fieldName = field->getName();
355 unsigned fieldIndex;
356 assert(!cir::MissingFeatures::lambdaFieldToName());
357
358 if (rec->isUnion())
359 fieldIndex = field->getFieldIndex();
360 else {
361 const CIRGenRecordLayout &layout =
362 cgm.getTypes().getCIRGenRecordLayout(rd: field->getParent());
363 fieldIndex = layout.getCIRFieldNo(fd: field);
364 }
365
366 addr = emitAddrOfFieldStorage(base: addr, field, fieldName, fieldIndex);
367 assert(!cir::MissingFeatures::preservedAccessIndexRegion());
368
369 // If this is a reference field, load the reference right now.
370 if (fieldType->isReferenceType()) {
371 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: reference type");
372 return LValue();
373 }
374
375 if (field->hasAttr<AnnotateAttr>()) {
376 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr");
377 return LValue();
378 }
379
380 LValue lv = makeAddrLValue(addr, ty: fieldType, baseInfo: fieldBaseInfo);
381 lv.getQuals().addCVRQualifiers(mask: recordCVR);
382
383 // __weak attribute on a field is ignored.
384 if (lv.getQuals().getObjCGCAttr() == Qualifiers::Weak) {
385 cgm.errorNYI(field->getSourceRange(),
386 "emitLValueForField: __weak attribute");
387 return LValue();
388 }
389
390 return lv;
391}
392
393mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
394 // Bool has a different representation in memory than in registers,
395 // but in ClangIR, it is simply represented as a cir.bool value.
396 // This function is here as a placeholder for possible future changes.
397 return value;
398}
399
400void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
401 bool isInit) {
402 if (lvalue.getType()->isConstantMatrixType()) {
403 assert(0 && "NYI: emitStoreOfScalar constant matrix type");
404 return;
405 }
406
407 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
408 lvalue.getType(), isInit, /*isNontemporal=*/false);
409}
410
411mlir::Value CIRGenFunction::emitLoadOfScalar(LValue lvalue,
412 SourceLocation loc) {
413 assert(!cir::MissingFeatures::opLoadStoreThreadLocal());
414 assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck());
415 assert(!cir::MissingFeatures::opLoadBooleanRepresentation());
416
417 Address addr = lvalue.getAddress();
418 mlir::Type eltTy = addr.getElementType();
419
420 if (mlir::isa<cir::VoidType>(eltTy))
421 cgm.errorNYI(loc, "emitLoadOfScalar: void type");
422
423 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr);
424
425 return loadOp;
426}
427
428/// Given an expression that represents a value lvalue, this
429/// method emits the address of the lvalue, then loads the result as an rvalue,
430/// returning the rvalue.
431RValue CIRGenFunction::emitLoadOfLValue(LValue lv, SourceLocation loc) {
432 assert(!lv.getType()->isFunctionType());
433 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented");
434
435 if (lv.isSimple())
436 return RValue::get(emitLoadOfScalar(lv, loc));
437
438 if (lv.isVectorElt()) {
439 const mlir::Value load =
440 builder.createLoad(getLoc(loc), lv.getVectorAddress());
441 return RValue::get(builder.create<cir::VecExtractOp>(getLoc(loc), load,
442 lv.getVectorIdx()));
443 }
444
445 cgm.errorNYI(loc, "emitLoadOfLValue");
446 return RValue::get(nullptr);
447}
448
449LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
450 const NamedDecl *nd = e->getDecl();
451 QualType ty = e->getType();
452
453 assert(e->isNonOdrUse() != NOUR_Unevaluated &&
454 "should not emit an unevaluated operand");
455
456 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
457 // Checks for omitted feature handling
458 assert(!cir::MissingFeatures::opAllocaStaticLocal());
459 assert(!cir::MissingFeatures::opAllocaNonGC());
460 assert(!cir::MissingFeatures::opAllocaImpreciseLifetime());
461 assert(!cir::MissingFeatures::opAllocaTLS());
462 assert(!cir::MissingFeatures::opAllocaOpenMPThreadPrivate());
463 assert(!cir::MissingFeatures::opAllocaEscapeByReference());
464
465 // Check if this is a global variable
466 if (vd->hasLinkage() || vd->isStaticDataMember())
467 return emitGlobalVarDeclLValue(*this, e, vd);
468
469 Address addr = Address::invalid();
470
471 // The variable should generally be present in the local decl map.
472 auto iter = localDeclMap.find(vd);
473 if (iter != localDeclMap.end()) {
474 addr = iter->second;
475 } else {
476 // Otherwise, it might be static local we haven't emitted yet for some
477 // reason; most likely, because it's in an outer function.
478 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: static local");
479 }
480
481 // Drill into reference types.
482 LValue lv =
483 vd->getType()->isReferenceType()
484 ? emitLoadOfReferenceLValue(addr, getLoc(e->getSourceRange()),
485 vd->getType(), AlignmentSource::Decl)
486 : makeAddrLValue(addr, ty, AlignmentSource::Decl);
487 return lv;
488 }
489
490 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
491 return LValue();
492}
493
494mlir::Value CIRGenFunction::evaluateExprAsBool(const Expr *e) {
495 QualType boolTy = getContext().BoolTy;
496 SourceLocation loc = e->getExprLoc();
497
498 assert(!cir::MissingFeatures::pgoUse());
499 if (e->getType()->getAs<MemberPointerType>()) {
500 cgm.errorNYI(e->getSourceRange(),
501 "evaluateExprAsBool: member pointer type");
502 return createDummyValue(getLoc(loc), boolTy);
503 }
504
505 assert(!cir::MissingFeatures::cgFPOptionsRAII());
506 if (!e->getType()->isAnyComplexType())
507 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
508
509 cgm.errorNYI(e->getSourceRange(), "evaluateExprAsBool: complex type");
510 return createDummyValue(getLoc(loc), boolTy);
511}
512
513LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) {
514 UnaryOperatorKind op = e->getOpcode();
515
516 // __extension__ doesn't affect lvalue-ness.
517 if (op == UO_Extension)
518 return emitLValue(e: e->getSubExpr());
519
520 switch (op) {
521 case UO_Deref: {
522 QualType t = e->getSubExpr()->getType()->getPointeeType();
523 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
524
525 assert(!cir::MissingFeatures::opTBAA());
526 LValueBaseInfo baseInfo;
527 Address addr = emitPointerWithAlignment(expr: e->getSubExpr(), baseInfo: &baseInfo);
528
529 // Tag 'load' with deref attribute.
530 // FIXME: This misses some derefence cases and has problematic interactions
531 // with other operators.
532 if (auto loadOp =
533 dyn_cast<cir::LoadOp>(addr.getPointer().getDefiningOp())) {
534 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
535 }
536
537 LValue lv = makeAddrLValue(addr, ty: t, baseInfo);
538 assert(!cir::MissingFeatures::addressSpace());
539 assert(!cir::MissingFeatures::setNonGC());
540 return lv;
541 }
542 case UO_Real:
543 case UO_Imag: {
544 cgm.errorNYI(e->getSourceRange(), "UnaryOp real/imag");
545 return LValue();
546 }
547 case UO_PreInc:
548 case UO_PreDec: {
549 bool isInc = e->isIncrementOp();
550 LValue lv = emitLValue(e: e->getSubExpr());
551
552 assert(e->isPrefix() && "Prefix operator in unexpected state!");
553
554 if (e->getType()->isAnyComplexType()) {
555 cgm.errorNYI(e->getSourceRange(), "UnaryOp complex inc/dec");
556 lv = LValue();
557 } else {
558 emitScalarPrePostIncDec(e, lv, isInc, /*isPre=*/true);
559 }
560
561 return lv;
562 }
563 case UO_Extension:
564 llvm_unreachable("UnaryOperator extension should be handled above!");
565 case UO_Plus:
566 case UO_Minus:
567 case UO_Not:
568 case UO_LNot:
569 case UO_AddrOf:
570 case UO_PostInc:
571 case UO_PostDec:
572 case UO_Coawait:
573 llvm_unreachable("UnaryOperator of non-lvalue kind!");
574 }
575 llvm_unreachable("Unknown unary operator kind!");
576}
577
578/// If the specified expr is a simple decay from an array to pointer,
579/// return the array subexpression.
580/// FIXME: this could be abstracted into a common AST helper.
581static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
582 // If this isn't just an array->pointer decay, bail out.
583 const auto *castExpr = dyn_cast<CastExpr>(Val: e);
584 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
585 return nullptr;
586
587 // If this is a decay from variable width array, bail out.
588 const Expr *subExpr = castExpr->getSubExpr();
589 if (subExpr->getType()->isVariableArrayType())
590 return nullptr;
591
592 return subExpr;
593}
594
595static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
596 // TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
597 if (auto constantOp = dyn_cast<cir::ConstantOp>(idx.getDefiningOp()))
598 return mlir::dyn_cast<cir::IntAttr>(constantOp.getValue());
599 return {};
600}
601
602static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
603 CharUnits eltSize) {
604 // If we have a constant index, we can use the exact offset of the
605 // element we're accessing.
606 const cir::IntAttr constantIdx = getConstantIndexOrNull(idx);
607 if (constantIdx) {
608 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
609 return arrayAlign.alignmentAtOffset(offset);
610 }
611 // Otherwise, use the worst-case alignment for any element.
612 return arrayAlign.alignmentOfArrayElement(elementSize: eltSize);
613}
614
615static QualType getFixedSizeElementType(const ASTContext &astContext,
616 const VariableArrayType *vla) {
617 QualType eltType;
618 do {
619 eltType = vla->getElementType();
620 } while ((vla = astContext.getAsVariableArrayType(T: eltType)));
621 return eltType;
622}
623
624static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf,
625 mlir::Location beginLoc,
626 mlir::Location endLoc, mlir::Value ptr,
627 mlir::Type eltTy, mlir::Value idx,
628 bool shouldDecay) {
629 CIRGenModule &cgm = cgf.getCIRGenModule();
630 // TODO(cir): LLVM codegen emits in bound gep check here, is there anything
631 // that would enhance tracking this later in CIR?
632 assert(!cir::MissingFeatures::emitCheckedInBoundsGEP());
633 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
634 shouldDecay);
635}
636
637static Address emitArraySubscriptPtr(CIRGenFunction &cgf,
638 mlir::Location beginLoc,
639 mlir::Location endLoc, Address addr,
640 QualType eltType, mlir::Value idx,
641 mlir::Location loc, bool shouldDecay) {
642
643 // Determine the element size of the statically-sized base. This is
644 // the thing that the indices are expressed in terms of.
645 if (const VariableArrayType *vla =
646 cgf.getContext().getAsVariableArrayType(T: eltType)) {
647 eltType = getFixedSizeElementType(astContext: cgf.getContext(), vla);
648 }
649
650 // We can use that to compute the best alignment of the element.
651 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(T: eltType);
652 const CharUnits eltAlign =
653 getArrayElementAlign(addr.getAlignment(), idx, eltSize);
654
655 assert(!cir::MissingFeatures::preservedAccessIndexRegion());
656 const mlir::Value eltPtr =
657 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
658 addr.getElementType(), idx, shouldDecay);
659 const mlir::Type elementType = cgf.convertTypeForMem(eltType);
660 return Address(eltPtr, elementType, eltAlign);
661}
662
663LValue
664CIRGenFunction::emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e) {
665 if (isa<ExtVectorElementExpr>(Val: e->getBase())) {
666 cgm.errorNYI(e->getSourceRange(),
667 "emitArraySubscriptExpr: ExtVectorElementExpr");
668 return LValue::makeAddr(address: Address::invalid(), t: e->getType(), baseInfo: LValueBaseInfo());
669 }
670
671 if (getContext().getAsVariableArrayType(T: e->getType())) {
672 cgm.errorNYI(e->getSourceRange(),
673 "emitArraySubscriptExpr: VariableArrayType");
674 return LValue::makeAddr(address: Address::invalid(), t: e->getType(), baseInfo: LValueBaseInfo());
675 }
676
677 if (e->getType()->getAs<ObjCObjectType>()) {
678 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
679 return LValue::makeAddr(address: Address::invalid(), t: e->getType(), baseInfo: LValueBaseInfo());
680 }
681
682 // The index must always be an integer, which is not an aggregate. Emit it
683 // in lexical order (this complexity is, sadly, required by C++17).
684 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
685 "index was neither LHS nor RHS");
686
687 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
688 const mlir::Value idx = emitScalarExpr(e->getIdx());
689
690 // Extend or truncate the index type to 32 or 64-bits.
691 auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
692 if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>())
693 cgm.errorNYI(e->getSourceRange(),
694 "emitArraySubscriptExpr: index type cast");
695 return idx;
696 };
697
698 // If the base is a vector type, then we are forming a vector element
699 // with this subscript.
700 if (e->getBase()->getType()->isVectorType() &&
701 !isa<ExtVectorElementExpr>(Val: e->getBase())) {
702 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
703 const LValue lhs = emitLValue(e: e->getBase());
704 return LValue::makeVectorElt(lhs.getAddress(), idx, e->getBase()->getType(),
705 lhs.getBaseInfo());
706 }
707
708 const mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
709 if (const Expr *array = getSimpleArrayDecayOperand(e: e->getBase())) {
710 LValue arrayLV;
711 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(Val: array))
712 arrayLV = emitArraySubscriptExpr(e: ase);
713 else
714 arrayLV = emitLValue(e: array);
715
716 // Propagate the alignment from the array itself to the result.
717 const Address addr = emitArraySubscriptPtr(
718 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
719 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
720 /*shouldDecay=*/true);
721
722 const LValue lv = LValue::makeAddr(address: addr, t: e->getType(), baseInfo: LValueBaseInfo());
723
724 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
725 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
726 }
727
728 return lv;
729 }
730
731 // The base must be a pointer; emit it with an estimate of its alignment.
732 assert(e->getBase()->getType()->isPointerType() &&
733 "The base must be a pointer");
734
735 LValueBaseInfo eltBaseInfo;
736 const Address ptrAddr = emitPointerWithAlignment(expr: e->getBase(), baseInfo: &eltBaseInfo);
737 // Propagate the alignment from the array itself to the result.
738 const Address addxr = emitArraySubscriptPtr(
739 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
740 e->getType(), idx, cgm.getLoc(e->getExprLoc()),
741 /*shouldDecay=*/false);
742
743 const LValue lv = LValue::makeAddr(address: addxr, t: e->getType(), baseInfo: eltBaseInfo);
744
745 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
746 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
747 }
748
749 return lv;
750}
751
752LValue CIRGenFunction::emitStringLiteralLValue(const StringLiteral *e) {
753 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e);
754 assert(globalOp.getAlignment() && "expected alignment for string literal");
755 unsigned align = *(globalOp.getAlignment());
756 mlir::Value addr =
757 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp);
758 return makeAddrLValue(
759 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(Quantity: align)),
760 e->getType(), AlignmentSource::Decl);
761}
762
763/// Casts are never lvalues unless that cast is to a reference type. If the cast
764/// is to a reference, we can have the usual lvalue result, otherwise if a cast
765/// is needed by the code generator in an lvalue context, then it must mean that
766/// we need the address of an aggregate in order to access one of its members.
767/// This can happen for all the reasons that casts are permitted with aggregate
768/// result, including noop aggregate casts, and cast from scalar to union.
769LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
770 switch (e->getCastKind()) {
771 case CK_ToVoid:
772 case CK_BitCast:
773 case CK_LValueToRValueBitCast:
774 case CK_ArrayToPointerDecay:
775 case CK_FunctionToPointerDecay:
776 case CK_NullToMemberPointer:
777 case CK_NullToPointer:
778 case CK_IntegralToPointer:
779 case CK_PointerToIntegral:
780 case CK_PointerToBoolean:
781 case CK_IntegralCast:
782 case CK_BooleanToSignedIntegral:
783 case CK_IntegralToBoolean:
784 case CK_IntegralToFloating:
785 case CK_FloatingToIntegral:
786 case CK_FloatingToBoolean:
787 case CK_FloatingCast:
788 case CK_FloatingRealToComplex:
789 case CK_FloatingComplexToReal:
790 case CK_FloatingComplexToBoolean:
791 case CK_FloatingComplexCast:
792 case CK_FloatingComplexToIntegralComplex:
793 case CK_IntegralRealToComplex:
794 case CK_IntegralComplexToReal:
795 case CK_IntegralComplexToBoolean:
796 case CK_IntegralComplexCast:
797 case CK_IntegralComplexToFloatingComplex:
798 case CK_DerivedToBaseMemberPointer:
799 case CK_BaseToDerivedMemberPointer:
800 case CK_MemberPointerToBoolean:
801 case CK_ReinterpretMemberPointer:
802 case CK_AnyPointerToBlockPointerCast:
803 case CK_ARCProduceObject:
804 case CK_ARCConsumeObject:
805 case CK_ARCReclaimReturnedObject:
806 case CK_ARCExtendBlockObject:
807 case CK_CopyAndAutoreleaseBlockObject:
808 case CK_IntToOCLSampler:
809 case CK_FloatingToFixedPoint:
810 case CK_FixedPointToFloating:
811 case CK_FixedPointCast:
812 case CK_FixedPointToBoolean:
813 case CK_FixedPointToIntegral:
814 case CK_IntegralToFixedPoint:
815 case CK_MatrixCast:
816 case CK_HLSLVectorTruncation:
817 case CK_HLSLArrayRValue:
818 case CK_HLSLElementwiseCast:
819 case CK_HLSLAggregateSplatCast:
820 llvm_unreachable("unexpected cast lvalue");
821
822 case CK_Dependent:
823 llvm_unreachable("dependent cast kind in IR gen!");
824
825 case CK_BuiltinFnToFnPtr:
826 llvm_unreachable("builtin functions are handled elsewhere");
827
828 // These are never l-values; just use the aggregate emission code.
829 case CK_NonAtomicToAtomic:
830 case CK_AtomicToNonAtomic:
831 case CK_Dynamic:
832 case CK_ToUnion:
833 case CK_BaseToDerived:
834 case CK_LValueBitCast:
835 case CK_AddressSpaceConversion:
836 case CK_ObjCObjectLValueCast:
837 case CK_VectorSplat:
838 case CK_ConstructorConversion:
839 case CK_UserDefinedConversion:
840 case CK_CPointerToObjCPointerCast:
841 case CK_BlockPointerToObjCPointerCast:
842 case CK_LValueToRValue: {
843 cgm.errorNYI(e->getSourceRange(),
844 std::string("emitCastLValue for unhandled cast kind: ") +
845 e->getCastKindName());
846
847 return {};
848 }
849
850 case CK_NoOp: {
851 // CK_NoOp can model a qualification conversion, which can remove an array
852 // bound and change the IR type.
853 LValue lv = emitLValue(e: e->getSubExpr());
854 // Propagate the volatile qualifier to LValue, if exists in e.
855 if (e->changesVolatileQualification())
856 cgm.errorNYI(e->getSourceRange(),
857 "emitCastLValue: NoOp changes volatile qual");
858 if (lv.isSimple()) {
859 Address v = lv.getAddress();
860 if (v.isValid()) {
861 mlir::Type ty = convertTypeForMem(e->getType());
862 if (v.getElementType() != ty)
863 cgm.errorNYI(e->getSourceRange(),
864 "emitCastLValue: NoOp needs bitcast");
865 }
866 }
867 return lv;
868 }
869
870 case CK_UncheckedDerivedToBase:
871 case CK_DerivedToBase: {
872 const auto *derivedClassTy =
873 e->getSubExpr()->getType()->castAs<clang::RecordType>();
874 auto *derivedClassDecl = cast<CXXRecordDecl>(Val: derivedClassTy->getDecl());
875
876 LValue lv = emitLValue(e: e->getSubExpr());
877 Address thisAddr = lv.getAddress();
878
879 // Perform the derived-to-base conversion
880 Address baseAddr =
881 getAddressOfBaseClass(value: thisAddr, derived: derivedClassDecl, path: e->path(),
882 /*NullCheckValue=*/nullCheckValue: false, loc: e->getExprLoc());
883
884 // TODO: Support accesses to members of base classes in TBAA. For now, we
885 // conservatively pretend that the complete object is of the base class
886 // type.
887 assert(!cir::MissingFeatures::opTBAA());
888 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo());
889 }
890
891 case CK_ZeroToOCLOpaqueType:
892 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
893 }
894
895 llvm_unreachable("Invalid cast kind");
896}
897
898LValue CIRGenFunction::emitMemberExpr(const MemberExpr *e) {
899 if (isa<VarDecl>(Val: e->getMemberDecl())) {
900 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: VarDecl");
901 return LValue();
902 }
903
904 Expr *baseExpr = e->getBase();
905 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
906 LValue baseLV;
907 if (e->isArrow()) {
908 LValueBaseInfo baseInfo;
909 assert(!cir::MissingFeatures::opTBAA());
910 Address addr = emitPointerWithAlignment(expr: baseExpr, baseInfo: &baseInfo);
911 QualType ptrTy = baseExpr->getType()->getPointeeType();
912 assert(!cir::MissingFeatures::typeChecks());
913 baseLV = makeAddrLValue(addr, ty: ptrTy, baseInfo);
914 } else {
915 assert(!cir::MissingFeatures::typeChecks());
916 baseLV = emitLValue(e: baseExpr);
917 }
918
919 const NamedDecl *nd = e->getMemberDecl();
920 if (auto *field = dyn_cast<FieldDecl>(nd)) {
921 LValue lv = emitLValueForField(base: baseLV, field: field);
922 assert(!cir::MissingFeatures::setObjCGCLValueClass());
923 if (getLangOpts().OpenMP) {
924 // If the member was explicitly marked as nontemporal, mark it as
925 // nontemporal. If the base lvalue is marked as nontemporal, mark access
926 // to children as nontemporal too.
927 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP");
928 }
929 return lv;
930 }
931
932 if (isa<FunctionDecl>(Val: nd)) {
933 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: FunctionDecl");
934 return LValue();
935 }
936
937 llvm_unreachable("Unhandled member declaration!");
938}
939
940LValue CIRGenFunction::emitCallExprLValue(const CallExpr *e) {
941 RValue rv = emitCallExpr(e);
942
943 if (!rv.isScalar()) {
944 cgm.errorNYI(e->getSourceRange(), "emitCallExprLValue: non-scalar return");
945 return {};
946 }
947
948 assert(e->getCallReturnType(getContext())->isReferenceType() &&
949 "Can't have a scalar return unless the return type is a "
950 "reference type!");
951
952 return makeNaturalAlignPointeeAddrLValue(rv.getScalarVal(), e->getType());
953}
954
955LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *e) {
956 // Comma expressions just emit their LHS then their RHS as an l-value.
957 if (e->getOpcode() == BO_Comma) {
958 emitIgnoredExpr(e: e->getLHS());
959 return emitLValue(e: e->getRHS());
960 }
961
962 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) {
963 cgm.errorNYI(e->getSourceRange(), "member pointers");
964 return {};
965 }
966
967 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
968
969 // Note that in all of these cases, __block variables need the RHS
970 // evaluated first just in case the variable gets moved by the RHS.
971
972 switch (CIRGenFunction::getEvaluationKind(type: e->getType())) {
973 case cir::TEK_Scalar: {
974 assert(!cir::MissingFeatures::objCLifetime());
975 if (e->getLHS()->getType().getObjCLifetime() !=
976 clang::Qualifiers::ObjCLifetime::OCL_None) {
977 cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
978 return {};
979 }
980
981 RValue rv = emitAnyExpr(e: e->getRHS());
982 LValue lv = emitLValue(e: e->getLHS());
983
984 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
985 if (lv.isBitField()) {
986 cgm.errorNYI(e->getSourceRange(), "bitfields");
987 return {};
988 }
989 emitStoreThroughLValue(src: rv, dst: lv);
990
991 if (getLangOpts().OpenMP) {
992 cgm.errorNYI(e->getSourceRange(), "openmp");
993 return {};
994 }
995
996 return lv;
997 }
998
999 case cir::TEK_Complex: {
1000 assert(!cir::MissingFeatures::complexType());
1001 cgm.errorNYI(e->getSourceRange(), "complex l-values");
1002 return {};
1003 }
1004 case cir::TEK_Aggregate:
1005 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
1006 return {};
1007 }
1008 llvm_unreachable("bad evaluation kind");
1009}
1010
1011/// Emit code to compute the specified expression which
1012/// can have any type. The result is returned as an RValue struct.
1013RValue CIRGenFunction::emitAnyExpr(const Expr *e) {
1014 switch (CIRGenFunction::getEvaluationKind(type: e->getType())) {
1015 case cir::TEK_Scalar:
1016 return RValue::get(emitScalarExpr(e));
1017 case cir::TEK_Complex:
1018 cgm.errorNYI(e->getSourceRange(), "emitAnyExpr: complex type");
1019 return RValue::get(nullptr);
1020 case cir::TEK_Aggregate:
1021 cgm.errorNYI(e->getSourceRange(), "emitAnyExpr: aggregate type");
1022 return RValue::get(nullptr);
1023 }
1024 llvm_unreachable("bad evaluation kind");
1025}
1026
1027static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
1028 assert(!cir::MissingFeatures::weakRefReference());
1029 return cgm.getAddrOfFunction(gd);
1030}
1031
1032// Detect the unusual situation where an inline version is shadowed by a
1033// non-inline version. In that case we should pick the external one
1034// everywhere. That's GCC behavior too.
1035static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd) {
1036 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl())
1037 if (!pd->isInlineBuiltinDeclaration())
1038 return false;
1039 return true;
1040}
1041
1042CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
1043 const auto *fd = cast<FunctionDecl>(Val: gd.getDecl());
1044
1045 if (unsigned builtinID = fd->getBuiltinID()) {
1046 if (fd->getAttr<AsmLabelAttr>()) {
1047 cgm.errorNYI(feature: "AsmLabelAttr");
1048 }
1049
1050 StringRef ident = fd->getName();
1051 std::string fdInlineName = (ident + ".inline").str();
1052
1053 bool isPredefinedLibFunction =
1054 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(ID: builtinID);
1055 bool hasAttributeNoBuiltin = false;
1056 assert(!cir::MissingFeatures::attributeNoBuiltin());
1057
1058 // When directing calling an inline builtin, call it through it's mangled
1059 // name to make it clear it's not the actual builtin.
1060 auto fn = cast<cir::FuncOp>(curFn);
1061 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
1062 cgm.errorNYI(feature: "Inline only builtin function calls");
1063 }
1064
1065 // Replaceable builtins provide their own implementation of a builtin. If we
1066 // are in an inline builtin implementation, avoid trivial infinite
1067 // recursion. Honor __attribute__((no_builtin("foo"))) or
1068 // __attribute__((no_builtin)) on the current function unless foo is
1069 // not a predefined library function which means we must generate the
1070 // builtin no matter what.
1071 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin)
1072 return CIRGenCallee::forBuiltin(builtinID, builtinDecl: fd);
1073 }
1074
1075 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd);
1076
1077 assert(!cir::MissingFeatures::hip());
1078
1079 return CIRGenCallee::forDirect(callee, gd);
1080}
1081
1082RValue CIRGenFunction::getUndefRValue(QualType ty) {
1083 if (ty->isVoidType())
1084 return RValue::get(nullptr);
1085
1086 cgm.errorNYI(feature: "unsupported type for undef rvalue");
1087 return RValue::get(nullptr);
1088}
1089
1090RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
1091 const CIRGenCallee &callee,
1092 const clang::CallExpr *e,
1093 ReturnValueSlot returnValue) {
1094 // Get the actual function type. The callee type will always be a pointer to
1095 // function type or a block pointer type.
1096 assert(calleeTy->isFunctionPointerType() &&
1097 "Callee must have function pointer type!");
1098
1099 calleeTy = getContext().getCanonicalType(T: calleeTy);
1100 auto pointeeTy = cast<PointerType>(Val&: calleeTy)->getPointeeType();
1101
1102 if (getLangOpts().CPlusPlus)
1103 assert(!cir::MissingFeatures::sanitizers());
1104
1105 const auto *fnType = cast<FunctionType>(Val&: pointeeTy);
1106
1107 assert(!cir::MissingFeatures::sanitizers());
1108
1109 CallArgList args;
1110 assert(!cir::MissingFeatures::opCallArgEvaluationOrder());
1111
1112 emitCallArgs(args, prototype: dyn_cast<FunctionProtoType>(Val: fnType), argRange: e->arguments(),
1113 callee: e->getDirectCallee());
1114
1115 const CIRGenFunctionInfo &funcInfo =
1116 cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
1117
1118 assert(!cir::MissingFeatures::opCallNoPrototypeFunc());
1119 assert(!cir::MissingFeatures::opCallFnInfoOpts());
1120 assert(!cir::MissingFeatures::hip());
1121 assert(!cir::MissingFeatures::opCallMustTail());
1122
1123 cir::CIRCallOpInterface callOp;
1124 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp,
1125 getLoc(e->getExprLoc()));
1126
1127 assert(!cir::MissingFeatures::generateDebugInfo());
1128
1129 return callResult;
1130}
1131
1132CIRGenCallee CIRGenFunction::emitCallee(const clang::Expr *e) {
1133 e = e->IgnoreParens();
1134
1135 // Look through function-to-pointer decay.
1136 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(Val: e)) {
1137 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay ||
1138 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) {
1139 return emitCallee(e: implicitCast->getSubExpr());
1140 }
1141 // When performing an indirect call through a function pointer lvalue, the
1142 // function pointer lvalue is implicitly converted to an rvalue through an
1143 // lvalue-to-rvalue conversion.
1144 assert(implicitCast->getCastKind() == CK_LValueToRValue &&
1145 "unexpected implicit cast on function pointers");
1146 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(Val: e)) {
1147 // Resolve direct calls.
1148 const auto *funcDecl = cast<FunctionDecl>(Val: declRef->getDecl());
1149 return emitDirectCallee(gd: funcDecl);
1150 } else if (isa<MemberExpr>(Val: e)) {
1151 cgm.errorNYI(e->getSourceRange(),
1152 "emitCallee: call to member function is NYI");
1153 return {};
1154 }
1155
1156 assert(!cir::MissingFeatures::opCallPseudoDtor());
1157
1158 // Otherwise, we have an indirect reference.
1159 mlir::Value calleePtr;
1160 QualType functionType;
1161 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) {
1162 calleePtr = emitScalarExpr(e);
1163 functionType = ptrType->getPointeeType();
1164 } else {
1165 functionType = e->getType();
1166 calleePtr = emitLValue(e).getPointer();
1167 }
1168 assert(functionType->isFunctionType());
1169
1170 GlobalDecl gd;
1171 if (const auto *vd =
1172 dyn_cast_or_null<VarDecl>(Val: e->getReferencedDeclOfCallee()))
1173 gd = GlobalDecl(vd);
1174
1175 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd);
1176 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp());
1177 return callee;
1178}
1179
1180RValue CIRGenFunction::emitCallExpr(const clang::CallExpr *e,
1181 ReturnValueSlot returnValue) {
1182 assert(!cir::MissingFeatures::objCBlocks());
1183
1184 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(Val: e))
1185 return emitCXXMemberCallExpr(e: ce, returnValue);
1186
1187 if (isa<CUDAKernelCallExpr>(Val: e)) {
1188 cgm.errorNYI(e->getSourceRange(), "call to CUDA kernel");
1189 return RValue::get(nullptr);
1190 }
1191
1192 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(Val: e)) {
1193 // If the callee decl is a CXXMethodDecl, we need to emit this as a C++
1194 // operator member call.
1195 if (const CXXMethodDecl *md =
1196 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl()))
1197 return emitCXXOperatorMemberCallExpr(e: operatorCall, md, returnValue);
1198 // A CXXOperatorCallExpr is created even for explicit object methods, but
1199 // these should be treated like static function calls. Fall through to do
1200 // that.
1201 }
1202
1203 CIRGenCallee callee = emitCallee(e: e->getCallee());
1204
1205 if (callee.isBuiltin())
1206 return emitBuiltinExpr(gd: callee.getBuiltinDecl(), builtinID: callee.getBuiltinID(), e,
1207 returnValue);
1208
1209 if (isa<CXXPseudoDestructorExpr>(Val: e->getCallee())) {
1210 cgm.errorNYI(e->getSourceRange(), "call to pseudo destructor");
1211 }
1212 assert(!cir::MissingFeatures::opCallPseudoDtor());
1213
1214 return emitCall(calleeTy: e->getCallee()->getType(), callee, e, returnValue);
1215}
1216
1217/// Emit code to compute the specified expression, ignoring the result.
1218void CIRGenFunction::emitIgnoredExpr(const Expr *e) {
1219 if (e->isPRValue()) {
1220 assert(!cir::MissingFeatures::aggValueSlot());
1221 emitAnyExpr(e);
1222 return;
1223 }
1224
1225 // Just emit it as an l-value and drop the result.
1226 emitLValue(e);
1227}
1228
1229Address CIRGenFunction::emitArrayToPointerDecay(const Expr *e) {
1230 assert(e->getType()->isArrayType() &&
1231 "Array to pointer decay must have array source type!");
1232
1233 // Expressions of array type can't be bitfields or vector elements.
1234 LValue lv = emitLValue(e);
1235 Address addr = lv.getAddress();
1236
1237 // If the array type was an incomplete type, we need to make sure
1238 // the decay ends up being the right type.
1239 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
1240
1241 if (e->getType()->isVariableArrayType())
1242 return addr;
1243
1244 auto pointeeTy = mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
1245
1246 mlir::Type arrayTy = convertType(e->getType());
1247 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
1248 assert(pointeeTy == arrayTy);
1249
1250 // The result of this decay conversion points to an array element within the
1251 // base lvalue. However, since TBAA currently does not support representing
1252 // accesses to elements of member arrays, we conservatively represent accesses
1253 // to the pointee object as if it had no any base lvalue specified.
1254 // TODO: Support TBAA for member arrays.
1255 QualType eltType = e->getType()->castAsArrayTypeUnsafe()->getElementType();
1256 assert(!cir::MissingFeatures::opTBAA());
1257
1258 mlir::Value ptr = builder.maybeBuildArrayDecay(
1259 cgm.getLoc(e->getSourceRange()), addr.getPointer(),
1260 convertTypeForMem(eltType));
1261 return Address(ptr, addr.getAlignment());
1262}
1263
1264/// Emit an `if` on a boolean condition, filling `then` and `else` into
1265/// appropriated regions.
1266mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,
1267 const Stmt *thenS,
1268 const Stmt *elseS) {
1269 mlir::Location thenLoc = getLoc(thenS->getSourceRange());
1270 std::optional<mlir::Location> elseLoc;
1271 if (elseS)
1272 elseLoc = getLoc(elseS->getSourceRange());
1273
1274 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success();
1275 emitIfOnBoolExpr(
1276 cond, /*thenBuilder=*/
1277 [&](mlir::OpBuilder &, mlir::Location) {
1278 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()};
1279 resThen = emitStmt(thenS, /*useCurrentScope=*/true);
1280 },
1281 thenLoc,
1282 /*elseBuilder=*/
1283 [&](mlir::OpBuilder &, mlir::Location) {
1284 assert(elseLoc && "Invalid location for elseS.");
1285 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()};
1286 resElse = emitStmt(elseS, /*useCurrentScope=*/true);
1287 },
1288 elseLoc);
1289
1290 return mlir::LogicalResult::success(resThen.succeeded() &&
1291 resElse.succeeded());
1292}
1293
1294/// Emit an `if` on a boolean condition, filling `then` and `else` into
1295/// appropriated regions.
1296cir::IfOp CIRGenFunction::emitIfOnBoolExpr(
1297 const clang::Expr *cond, BuilderCallbackRef thenBuilder,
1298 mlir::Location thenLoc, BuilderCallbackRef elseBuilder,
1299 std::optional<mlir::Location> elseLoc) {
1300 // Attempt to be as accurate as possible with IfOp location, generate
1301 // one fused location that has either 2 or 4 total locations, depending
1302 // on else's availability.
1303 SmallVector<mlir::Location, 2> ifLocs{thenLoc};
1304 if (elseLoc)
1305 ifLocs.push_back(*elseLoc);
1306 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs);
1307
1308 // Emit the code with the fully general case.
1309 mlir::Value condV = emitOpOnBoolExpr(loc, cond);
1310 return builder.create<cir::IfOp>(loc, condV, elseLoc.has_value(),
1311 /*thenBuilder=*/thenBuilder,
1312 /*elseBuilder=*/elseBuilder);
1313}
1314
1315/// TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
1316mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
1317 const Expr *cond) {
1318 assert(!cir::MissingFeatures::pgoUse());
1319 assert(!cir::MissingFeatures::generateDebugInfo());
1320 cond = cond->IgnoreParens();
1321
1322 // In LLVM the condition is reversed here for efficient codegen.
1323 // This should be done in CIR prior to LLVM lowering, if we do now
1324 // we can make CIR based diagnostics misleading.
1325 // cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
1326 assert(!cir::MissingFeatures::shouldReverseUnaryCondOnBoolExpr());
1327
1328 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(Val: cond)) {
1329 Expr *trueExpr = condOp->getTrueExpr();
1330 Expr *falseExpr = condOp->getFalseExpr();
1331 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
1332
1333 mlir::Value ternaryOpRes =
1334 builder
1335 .create<cir::TernaryOp>(
1336 loc, condV, /*thenBuilder=*/
1337 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
1338 mlir::Value lhs = emitScalarExpr(trueExpr);
1339 b.create<cir::YieldOp>(loc, lhs);
1340 },
1341 /*elseBuilder=*/
1342 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
1343 mlir::Value rhs = emitScalarExpr(falseExpr);
1344 b.create<cir::YieldOp>(loc, rhs);
1345 })
1346 .getResult();
1347
1348 return emitScalarConversion(ternaryOpRes, condOp->getType(),
1349 getContext().BoolTy, condOp->getExprLoc());
1350 }
1351
1352 if (isa<CXXThrowExpr>(Val: cond)) {
1353 cgm.errorNYI(feature: "NYI");
1354 return createDummyValue(loc, cond->getType());
1355 }
1356
1357 // If the branch has a condition wrapped by __builtin_unpredictable,
1358 // create metadata that specifies that the branch is unpredictable.
1359 // Don't bother if not optimizing because that metadata would not be used.
1360 assert(!cir::MissingFeatures::insertBuiltinUnpredictable());
1361
1362 // Emit the code with the fully general case.
1363 return evaluateExprAsBool(cond);
1364}
1365
1366mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
1367 mlir::Location loc, CharUnits alignment,
1368 bool insertIntoFnEntryBlock,
1369 mlir::Value arraySize) {
1370 mlir::Block *entryBlock = insertIntoFnEntryBlock
1371 ? getCurFunctionEntryBlock()
1372 : curLexScope->getEntryBlock();
1373
1374 // If this is an alloca in the entry basic block of a cir.try and there's
1375 // a surrounding cir.scope, make sure the alloca ends up in the surrounding
1376 // scope instead. This is necessary in order to guarantee all SSA values are
1377 // reachable during cleanups.
1378 assert(!cir::MissingFeatures::tryOp());
1379
1380 return emitAlloca(name, ty, loc, alignment,
1381 builder.getBestAllocaInsertPoint(entryBlock), arraySize);
1382}
1383
1384mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
1385 mlir::Location loc, CharUnits alignment,
1386 mlir::OpBuilder::InsertPoint ip,
1387 mlir::Value arraySize) {
1388 // CIR uses its own alloca address space rather than follow the target data
1389 // layout like original CodeGen. The data layout awareness should be done in
1390 // the lowering pass instead.
1391 assert(!cir::MissingFeatures::addressSpace());
1392 cir::PointerType localVarPtrTy = builder.getPointerTo(ty);
1393 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment);
1394
1395 mlir::Value addr;
1396 {
1397 mlir::OpBuilder::InsertionGuard guard(builder);
1398 builder.restoreInsertionPoint(ip);
1399 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
1400 /*var type*/ ty, name, alignIntAttr);
1401 assert(!cir::MissingFeatures::astVarDeclInterface());
1402 }
1403 return addr;
1404}
1405
1406// Note: this function also emit constructor calls to support a MSVC extensions
1407// allowing explicit constructor function call.
1408RValue CIRGenFunction::emitCXXMemberCallExpr(const CXXMemberCallExpr *ce,
1409 ReturnValueSlot returnValue) {
1410 const Expr *callee = ce->getCallee()->IgnoreParens();
1411
1412 if (isa<BinaryOperator>(Val: callee)) {
1413 cgm.errorNYI(ce->getSourceRange(),
1414 "emitCXXMemberCallExpr: C++ binary operator");
1415 return RValue::get(nullptr);
1416 }
1417
1418 const auto *me = cast<MemberExpr>(Val: callee);
1419 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl());
1420
1421 if (md->isStatic()) {
1422 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method");
1423 return RValue::get(nullptr);
1424 }
1425
1426 bool hasQualifier = me->hasQualifier();
1427 NestedNameSpecifier *qualifier = hasQualifier ? me->getQualifier() : nullptr;
1428 bool isArrow = me->isArrow();
1429 const Expr *base = me->getBase();
1430
1431 return emitCXXMemberOrOperatorMemberCallExpr(
1432 ce, md: md, returnValue, hasQualifier, qualifier, isArrow, base);
1433}
1434
1435void CIRGenFunction::emitCXXConstructExpr(const CXXConstructExpr *e,
1436 AggValueSlot dest) {
1437 assert(!dest.isIgnored() && "Must have a destination!");
1438 const CXXConstructorDecl *cd = e->getConstructor();
1439
1440 // If we require zero initialization before (or instead of) calling the
1441 // constructor, as can be the case with a non-user-provided default
1442 // constructor, emit the zero initialization now, unless destination is
1443 // already zeroed.
1444 if (e->requiresZeroInitialization() && !dest.isZeroed()) {
1445 cgm.errorNYI(e->getSourceRange(),
1446 "emitCXXConstructExpr: requires initialization");
1447 return;
1448 }
1449
1450 // If this is a call to a trivial default constructor:
1451 // In LLVM: do nothing.
1452 // In CIR: emit as a regular call, other later passes should lower the
1453 // ctor call into trivial initialization.
1454
1455 // Elide the constructor if we're constructing from a temporary
1456 if (getLangOpts().ElideConstructors && e->isElidable()) {
1457 cgm.errorNYI(e->getSourceRange(),
1458 "emitCXXConstructExpr: elidable constructor");
1459 return;
1460 }
1461
1462 if (getContext().getAsArrayType(T: e->getType())) {
1463 cgm.errorNYI(e->getSourceRange(), "emitCXXConstructExpr: array type");
1464 return;
1465 }
1466
1467 clang::CXXCtorType type = Ctor_Complete;
1468 bool forVirtualBase = false;
1469 bool delegating = false;
1470
1471 switch (e->getConstructionKind()) {
1472 case CXXConstructionKind::Complete:
1473 type = Ctor_Complete;
1474 break;
1475 case CXXConstructionKind::Delegating:
1476 case CXXConstructionKind::VirtualBase:
1477 case CXXConstructionKind::NonVirtualBase:
1478 cgm.errorNYI(e->getSourceRange(),
1479 "emitCXXConstructExpr: other construction kind");
1480 return;
1481 }
1482
1483 emitCXXConstructorCall(d: cd, type, forVirtualBase, delegating, thisAVS: dest, e);
1484}
1485
1486RValue CIRGenFunction::emitReferenceBindingToExpr(const Expr *e) {
1487 // Emit the expression as an lvalue.
1488 LValue lv = emitLValue(e);
1489 assert(lv.isSimple());
1490 mlir::Value value = lv.getPointer();
1491
1492 assert(!cir::MissingFeatures::sanitizers());
1493
1494 return RValue::get(value);
1495}
1496
1497Address CIRGenFunction::emitLoadOfReference(LValue refLVal, mlir::Location loc,
1498 LValueBaseInfo *pointeeBaseInfo) {
1499 if (refLVal.isVolatile())
1500 cgm.errorNYI(loc, "load of volatile reference");
1501
1502 cir::LoadOp load =
1503 builder.create<cir::LoadOp>(loc, refLVal.getAddress().getElementType(),
1504 refLVal.getAddress().getPointer());
1505
1506 assert(!cir::MissingFeatures::opTBAA());
1507
1508 QualType pointeeType = refLVal.getType()->getPointeeType();
1509 CharUnits align = cgm.getNaturalTypeAlignment(t: pointeeType, baseInfo: pointeeBaseInfo);
1510 return Address(load, convertTypeForMem(pointeeType), align);
1511}
1512
1513LValue CIRGenFunction::emitLoadOfReferenceLValue(Address refAddr,
1514 mlir::Location loc,
1515 QualType refTy,
1516 AlignmentSource source) {
1517 LValue refLVal = makeAddrLValue(addr: refAddr, ty: refTy, baseInfo: LValueBaseInfo(source));
1518 LValueBaseInfo pointeeBaseInfo;
1519 assert(!cir::MissingFeatures::opTBAA());
1520 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo);
1521 return makeAddrLValue(addr: pointeeAddr, ty: refLVal.getType()->getPointeeType(),
1522 baseInfo: pointeeBaseInfo);
1523}
1524
1525mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
1526 clang::QualType qt) {
1527 mlir::Type t = convertType(qt);
1528 CharUnits alignment = getContext().getTypeAlignInChars(T: qt);
1529 return builder.createDummyValue(loc, t, alignment);
1530}
1531
1532//===----------------------------------------------------------------------===//
1533// CIR builder helpers
1534//===----------------------------------------------------------------------===//
1535
1536Address CIRGenFunction::createMemTemp(QualType ty, mlir::Location loc,
1537 const Twine &name, Address *alloca,
1538 mlir::OpBuilder::InsertPoint ip) {
1539 // FIXME: Should we prefer the preferred type alignment here?
1540 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
1541 alloca, ip);
1542}
1543
1544Address CIRGenFunction::createMemTemp(QualType ty, CharUnits align,
1545 mlir::Location loc, const Twine &name,
1546 Address *alloca,
1547 mlir::OpBuilder::InsertPoint ip) {
1548 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
1549 /*ArraySize=*/nullptr, alloca, ip);
1550 if (ty->isConstantMatrixType()) {
1551 assert(!cir::MissingFeatures::matrixType());
1552 cgm.errorNYI(loc, "temporary matrix value");
1553 }
1554 return result;
1555}
1556
1557/// This creates a alloca and inserts it into the entry block of the
1558/// current region.
1559Address CIRGenFunction::createTempAllocaWithoutCast(
1560 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
1561 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
1562 cir::AllocaOp alloca = ip.isSet()
1563 ? createTempAlloca(ty, loc, name, ip, arraySize)
1564 : createTempAlloca(ty, loc, name, arraySize);
1565 alloca.setAlignmentAttr(cgm.getSize(align));
1566 return Address(alloca, ty, align);
1567}
1568
1569/// This creates a alloca and inserts it into the entry block. The alloca is
1570/// casted to default address space if necessary.
1571Address CIRGenFunction::createTempAlloca(mlir::Type ty, CharUnits align,
1572 mlir::Location loc, const Twine &name,
1573 mlir::Value arraySize,
1574 Address *allocaAddr,
1575 mlir::OpBuilder::InsertPoint ip) {
1576 Address alloca =
1577 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
1578 if (allocaAddr)
1579 *allocaAddr = alloca;
1580 mlir::Value v = alloca.getPointer();
1581 // Alloca always returns a pointer in alloca address space, which may
1582 // be different from the type defined by the language. For example,
1583 // in C++ the auto variables are in the default address space. Therefore
1584 // cast alloca to the default address space when necessary.
1585 assert(!cir::MissingFeatures::addressSpace());
1586 return Address(v, ty, align);
1587}
1588
1589/// This creates an alloca and inserts it into the entry block if \p ArraySize
1590/// is nullptr, otherwise inserts it at the current insertion point of the
1591/// builder.
1592cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
1593 mlir::Location loc,
1594 const Twine &name,
1595 mlir::Value arraySize,
1596 bool insertIntoFnEntryBlock) {
1597 return cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
1598 insertIntoFnEntryBlock, arraySize)
1599 .getDefiningOp());
1600}
1601
1602/// This creates an alloca and inserts it into the provided insertion point
1603cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
1604 mlir::Location loc,
1605 const Twine &name,
1606 mlir::OpBuilder::InsertPoint ip,
1607 mlir::Value arraySize) {
1608 assert(ip.isSet() && "Insertion point is not set");
1609 return cast<cir::AllocaOp>(
1610 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
1611 .getDefiningOp());
1612}
1613
1614/// Try to emit a reference to the given value without producing it as
1615/// an l-value. For many cases, this is just an optimization, but it avoids
1616/// us needing to emit global copies of variables if they're named without
1617/// triggering a formal use in a context where we can't emit a direct
1618/// reference to them, for instance if a block or lambda or a member of a
1619/// local class uses a const int variable or constexpr variable from an
1620/// enclosing function.
1621///
1622/// For named members of enums, this is the only way they are emitted.
1623CIRGenFunction::ConstantEmission
1624CIRGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
1625 ValueDecl *value = refExpr->getDecl();
1626
1627 // There is a lot more to do here, but for now only EnumConstantDecl is
1628 // supported.
1629 assert(!cir::MissingFeatures::tryEmitAsConstant());
1630
1631 // The value needs to be an enum constant or a constant variable.
1632 if (!isa<EnumConstantDecl>(Val: value))
1633 return ConstantEmission();
1634
1635 Expr::EvalResult result;
1636 if (!refExpr->EvaluateAsRValue(result, getContext()))
1637 return ConstantEmission();
1638
1639 QualType resultType = refExpr->getType();
1640
1641 // As long as we're only handling EnumConstantDecl, there should be no
1642 // side-effects.
1643 assert(!result.HasSideEffects);
1644
1645 // Emit as a constant.
1646 // FIXME(cir): have emitAbstract build a TypedAttr instead (this requires
1647 // somewhat heavy refactoring...)
1648 mlir::Attribute c = ConstantEmitter(*this).emitAbstract(
1649 refExpr->getLocation(), result.Val, resultType);
1650 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c);
1651 assert(cstToEmit && "expected a typed attribute");
1652
1653 assert(!cir::MissingFeatures::generateDebugInfo());
1654
1655 return ConstantEmission::forValue(cstToEmit);
1656}
1657
1658mlir::Value CIRGenFunction::emitScalarConstant(
1659 const CIRGenFunction::ConstantEmission &constant, Expr *e) {
1660 assert(constant && "not a constant");
1661 if (constant.isReference()) {
1662 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference");
1663 return {};
1664 }
1665 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue());
1666}
1667

source code of clang/lib/CIR/CodeGen/CIRGenExpr.cpp