1//===--- CIRGenExprCXX.cpp - Emit CIR Code for C++ expressions ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with code generation of C++ expressions
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenCXXABI.h"
14#include "CIRGenFunction.h"
15
16#include "clang/AST/DeclCXX.h"
17#include "clang/AST/ExprCXX.h"
18#include "clang/CIR/MissingFeatures.h"
19
20using namespace clang;
21using namespace clang::CIRGen;
22
23namespace {
24struct MemberCallInfo {
25 RequiredArgs reqArgs;
26 // Number of prefix arguments for the call. Ignores the `this` pointer.
27 unsigned prefixSize;
28};
29} // namespace
30
31static MemberCallInfo commonBuildCXXMemberOrOperatorCall(
32 CIRGenFunction &cgf, const CXXMethodDecl *md, mlir::Value thisPtr,
33 mlir::Value implicitParam, QualType implicitParamTy, const CallExpr *ce,
34 CallArgList &args, CallArgList *rtlArgs) {
35 assert(ce == nullptr || isa<CXXMemberCallExpr>(ce) ||
36 isa<CXXOperatorCallExpr>(ce));
37 assert(md->isInstance() &&
38 "Trying to emit a member or operator call expr on a static method!");
39
40 // Push the this ptr.
41 const CXXRecordDecl *rd =
42 cgf.cgm.getCXXABI().getThisArgumentTypeForMethod(md);
43 args.add(RValue::rvalue: get(thisPtr), type: cgf.getTypes().deriveThisType(rd, md));
44
45 // If there is an implicit parameter (e.g. VTT), emit it.
46 if (implicitParam) {
47 args.add(RValue::rvalue: get(implicitParam), type: implicitParamTy);
48 }
49
50 const auto *fpt = md->getType()->castAs<FunctionProtoType>();
51 RequiredArgs required =
52 RequiredArgs::getFromProtoWithExtraSlots(prototype: fpt, additional: args.size());
53 unsigned prefixSize = args.size() - 1;
54
55 // Add the rest of the call args
56 if (rtlArgs) {
57 // Special case: if the caller emitted the arguments right-to-left already
58 // (prior to emitting the *this argument), we're done. This happens for
59 // assignment operators.
60 args.addFrom(other: *rtlArgs);
61 } else if (ce) {
62 // Special case: skip first argument of CXXOperatorCall (it is "this").
63 unsigned argsToSkip = isa<CXXOperatorCallExpr>(Val: ce) ? 1 : 0;
64 cgf.emitCallArgs(args, prototype: fpt, argRange: drop_begin(RangeOrContainer: ce->arguments(), N: argsToSkip),
65 callee: ce->getDirectCallee());
66 } else {
67 assert(
68 fpt->getNumParams() == 0 &&
69 "No CallExpr specified for function with non-zero number of arguments");
70 }
71
72 // return {required, prefixSize};
73 return {.reqArgs: required, .prefixSize: prefixSize};
74}
75
76RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr(
77 const CallExpr *ce, const CXXMethodDecl *md, ReturnValueSlot returnValue,
78 bool hasQualifier, NestedNameSpecifier *qualifier, bool isArrow,
79 const Expr *base) {
80 assert(isa<CXXMemberCallExpr>(ce) || isa<CXXOperatorCallExpr>(ce));
81
82 if (md->isVirtual()) {
83 cgm.errorNYI(ce->getSourceRange(),
84 "emitCXXMemberOrOperatorMemberCallExpr: virtual call");
85 return RValue::get(nullptr);
86 }
87
88 // Note on trivial assignment
89 // --------------------------
90 // Classic codegen avoids generating the trivial copy/move assignment operator
91 // when it isn't necessary, choosing instead to just produce IR with an
92 // equivalent effect. We have chosen not to do that in CIR, instead emitting
93 // trivial copy/move assignment operators and allowing later transformations
94 // to optimize them away if appropriate.
95
96 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
97 // operator before the LHS.
98 CallArgList rtlArgStorage;
99 CallArgList *rtlArgs = nullptr;
100 if (auto *oce = dyn_cast<CXXOperatorCallExpr>(Val: ce)) {
101 if (oce->isAssignmentOp()) {
102 rtlArgs = &rtlArgStorage;
103 emitCallArgs(args&: *rtlArgs, prototype: md->getType()->castAs<FunctionProtoType>(),
104 argRange: drop_begin(RangeOrContainer: ce->arguments(), N: 1), callee: ce->getDirectCallee(),
105 /*ParamsToSkip*/ paramsToSkip: 0);
106 }
107 }
108
109 LValue thisPtr;
110 if (isArrow) {
111 LValueBaseInfo baseInfo;
112 assert(!cir::MissingFeatures::opTBAA());
113 Address thisValue = emitPointerWithAlignment(expr: base, baseInfo: &baseInfo);
114 thisPtr = makeAddrLValue(addr: thisValue, ty: base->getType(), baseInfo);
115 } else {
116 thisPtr = emitLValue(e: base);
117 }
118
119 if (isa<CXXConstructorDecl>(Val: md)) {
120 cgm.errorNYI(ce->getSourceRange(),
121 "emitCXXMemberOrOperatorMemberCallExpr: constructor call");
122 return RValue::get(nullptr);
123 }
124
125 if ((md->isTrivial() || (md->isDefaulted() && md->getParent()->isUnion())) &&
126 isa<CXXDestructorDecl>(Val: md))
127 return RValue::get(nullptr);
128
129 // Compute the function type we're calling
130 const CXXMethodDecl *calleeDecl = md;
131 const CIRGenFunctionInfo *fInfo = nullptr;
132 if (isa<CXXDestructorDecl>(Val: calleeDecl)) {
133 cgm.errorNYI(ce->getSourceRange(),
134 "emitCXXMemberOrOperatorMemberCallExpr: destructor call");
135 return RValue::get(nullptr);
136 }
137
138 fInfo = &cgm.getTypes().arrangeCXXMethodDeclaration(md: calleeDecl);
139
140 mlir::Type ty = cgm.getTypes().getFunctionType(*fInfo);
141
142 assert(!cir::MissingFeatures::sanitizers());
143 assert(!cir::MissingFeatures::emitTypeCheck());
144
145 if (isa<CXXDestructorDecl>(Val: calleeDecl)) {
146 cgm.errorNYI(ce->getSourceRange(),
147 "emitCXXMemberOrOperatorMemberCallExpr: destructor call");
148 return RValue::get(nullptr);
149 }
150
151 assert(!cir::MissingFeatures::sanitizers());
152 if (getLangOpts().AppleKext) {
153 cgm.errorNYI(ce->getSourceRange(),
154 "emitCXXMemberOrOperatorMemberCallExpr: AppleKext");
155 return RValue::get(nullptr);
156 }
157 CIRGenCallee callee =
158 CIRGenCallee::forDirect(cgm.getAddrOfFunction(md, ty), GlobalDecl(md));
159
160 return emitCXXMemberOrOperatorCall(
161 calleeDecl, callee, returnValue, thisPtr.getPointer(),
162 /*ImplicitParam=*/nullptr, QualType(), ce, rtlArgs);
163}
164
165RValue
166CIRGenFunction::emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e,
167 const CXXMethodDecl *md,
168 ReturnValueSlot returnValue) {
169 assert(md->isInstance() &&
170 "Trying to emit a member call expr on a static method!");
171 return emitCXXMemberOrOperatorMemberCallExpr(
172 ce: e, md, returnValue, /*HasQualifier=*/hasQualifier: false, /*Qualifier=*/qualifier: nullptr,
173 /*IsArrow=*/isArrow: false, base: e->getArg(Arg: 0));
174}
175
176RValue CIRGenFunction::emitCXXMemberOrOperatorCall(
177 const CXXMethodDecl *md, const CIRGenCallee &callee,
178 ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam,
179 QualType implicitParamTy, const CallExpr *ce, CallArgList *rtlArgs) {
180 const auto *fpt = md->getType()->castAs<FunctionProtoType>();
181 CallArgList args;
182 MemberCallInfo callInfo = commonBuildCXXMemberOrOperatorCall(
183 *this, md, thisPtr, implicitParam, implicitParamTy, ce, args, rtlArgs);
184 auto &fnInfo = cgm.getTypes().arrangeCXXMethodCall(
185 args, type: fpt, required: callInfo.reqArgs, numPrefixArgs: callInfo.prefixSize);
186 assert((ce || currSrcLoc) && "expected source location");
187 mlir::Location loc = ce ? getLoc(ce->getExprLoc()) : *currSrcLoc;
188 assert(!cir::MissingFeatures::opCallMustTail());
189 return emitCall(calleeTy: fnInfo, callee, e: returnValue, returnValue: args, nullptr, loc);
190}
191
192static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
193 unsigned minElements,
194 mlir::Value &numElements,
195 mlir::Value &sizeWithoutCookie) {
196 QualType type = e->getAllocatedType();
197 mlir::Location loc = cgf.getLoc(e->getSourceRange());
198
199 if (!e->isArray()) {
200 CharUnits typeSize = cgf.getContext().getTypeSizeInChars(T: type);
201 sizeWithoutCookie = cgf.getBuilder().getConstant(
202 loc, cir::IntAttr::get(cgf.SizeTy, typeSize.getQuantity()));
203 return sizeWithoutCookie;
204 }
205
206 cgf.cgm.errorNYI(e->getSourceRange(), "emitCXXNewAllocSize: array");
207 return {};
208}
209
210static void storeAnyExprIntoOneUnit(CIRGenFunction &cgf, const Expr *init,
211 QualType allocType, Address newPtr,
212 AggValueSlot::Overlap_t mayOverlap) {
213 // FIXME: Refactor with emitExprAsInit.
214 switch (cgf.getEvaluationKind(type: allocType)) {
215 case cir::TEK_Scalar:
216 cgf.emitScalarInit(init, cgf.getLoc(init->getSourceRange()),
217 cgf.makeAddrLValue(addr: newPtr, ty: allocType), false);
218 return;
219 case cir::TEK_Complex:
220 cgf.cgm.errorNYI(init->getSourceRange(),
221 "storeAnyExprIntoOneUnit: complex");
222 return;
223 case cir::TEK_Aggregate: {
224 assert(!cir::MissingFeatures::aggValueSlotGC());
225 assert(!cir::MissingFeatures::sanitizers());
226 AggValueSlot slot = AggValueSlot::forAddr(
227 addr: newPtr, quals: allocType.getQualifiers(), isDestructed: AggValueSlot::IsDestructed,
228 isAliased: AggValueSlot::IsNotAliased, mayOverlap, isZeroed: AggValueSlot::IsNotZeroed);
229 cgf.emitAggExpr(e: init, slot);
230 return;
231 }
232 }
233 llvm_unreachable("bad evaluation kind");
234}
235
236static void emitNewInitializer(CIRGenFunction &cgf, const CXXNewExpr *e,
237 QualType elementType, mlir::Type elementTy,
238 Address newPtr, mlir::Value numElements,
239 mlir::Value allocSizeWithoutCookie) {
240 assert(!cir::MissingFeatures::generateDebugInfo());
241 if (e->isArray()) {
242 cgf.cgm.errorNYI(e->getSourceRange(), "emitNewInitializer: array");
243 } else if (const Expr *init = e->getInitializer()) {
244 storeAnyExprIntoOneUnit(cgf, init, allocType: e->getAllocatedType(), newPtr,
245 mayOverlap: AggValueSlot::DoesNotOverlap);
246 }
247}
248
249/// Emit a call to an operator new or operator delete function, as implicitly
250/// created by new-expressions and delete-expressions.
251static RValue emitNewDeleteCall(CIRGenFunction &cgf,
252 const FunctionDecl *calleeDecl,
253 const FunctionProtoType *calleeType,
254 const CallArgList &args) {
255 cir::CIRCallOpInterface callOrTryCall;
256 cir::FuncOp calleePtr = cgf.cgm.getAddrOfFunction(calleeDecl);
257 CIRGenCallee callee =
258 CIRGenCallee::forDirect(calleePtr, GlobalDecl(calleeDecl));
259 RValue rv =
260 cgf.emitCall(calleeTy: cgf.cgm.getTypes().arrangeFreeFunctionCall(args, fnType: calleeType),
261 callee, e: ReturnValueSlot(), returnValue: args, &callOrTryCall);
262
263 /// C++1y [expr.new]p10:
264 /// [In a new-expression,] an implementation is allowed to omit a call
265 /// to a replaceable global allocation function.
266 ///
267 /// We model such elidable calls with the 'builtin' attribute.
268 assert(!cir::MissingFeatures::attributeBuiltin());
269 return rv;
270}
271
272mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
273 // The element type being allocated.
274 QualType allocType = getContext().getBaseElementType(QT: e->getAllocatedType());
275
276 // 1. Build a call to the allocation function.
277 FunctionDecl *allocator = e->getOperatorNew();
278
279 // If there is a brace-initializer, cannot allocate fewer elements than inits.
280 unsigned minElements = 0;
281 if (e->isArray() && e->hasInitializer()) {
282 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: array initializer");
283 }
284
285 mlir::Value numElements = nullptr;
286 mlir::Value allocSizeWithoutCookie = nullptr;
287 mlir::Value allocSize = emitCXXNewAllocSize(
288 *this, e, minElements, numElements, allocSizeWithoutCookie);
289 CharUnits allocAlign = getContext().getTypeAlignInChars(T: allocType);
290
291 // Emit the allocation call.
292 Address allocation = Address::invalid();
293 CallArgList allocatorArgs;
294 if (allocator->isReservedGlobalPlacementOperator()) {
295 cgm.errorNYI(e->getSourceRange(),
296 "emitCXXNewExpr: reserved global placement operator");
297 } else {
298 const FunctionProtoType *allocatorType =
299 allocator->getType()->castAs<FunctionProtoType>();
300 unsigned paramsToSkip = 0;
301
302 // The allocation size is the first argument.
303 QualType sizeType = getContext().getSizeType();
304 allocatorArgs.add(RValue::rvalue: get(allocSize), type: sizeType);
305 ++paramsToSkip;
306
307 if (allocSize != allocSizeWithoutCookie) {
308 CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
309 allocAlign = std::max(a: allocAlign, b: cookieAlign);
310 }
311
312 // The allocation alignment may be passed as the second argument.
313 if (e->passAlignment()) {
314 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: pass alignment");
315 }
316
317 // FIXME: Why do we not pass a CalleeDecl here?
318 emitCallArgs(args&: allocatorArgs, prototype: allocatorType, argRange: e->placement_arguments(),
319 callee: AbstractCallee(), paramsToSkip);
320 RValue rv =
321 emitNewDeleteCall(cgf&: *this, calleeDecl: allocator, calleeType: allocatorType, args: allocatorArgs);
322
323 // Set !heapallocsite metadata on the call to operator new.
324 assert(!cir::MissingFeatures::generateDebugInfo());
325
326 // If this was a call to a global replaceable allocation function that does
327 // not take an alignment argument, the allocator is known to produce storage
328 // that's suitably aligned for any object that fits, up to a known
329 // threshold. Otherwise assume it's suitably aligned for the allocated type.
330 CharUnits allocationAlign = allocAlign;
331 if (!e->passAlignment() &&
332 allocator->isReplaceableGlobalAllocationFunction()) {
333 const TargetInfo &target = cgm.getASTContext().getTargetInfo();
334 unsigned allocatorAlign = llvm::bit_floor(Value: std::min<uint64_t>(
335 a: target.getNewAlign(), b: getContext().getTypeSize(T: allocType)));
336 allocationAlign = std::max(
337 a: allocationAlign, b: getContext().toCharUnitsFromBits(BitSize: allocatorAlign));
338 }
339
340 mlir::Value allocPtr = rv.getValue();
341 allocation = Address(
342 allocPtr, mlir::cast<cir::PointerType>(allocPtr.getType()).getPointee(),
343 allocationAlign);
344 }
345
346 // Emit a null check on the allocation result if the allocation
347 // function is allowed to return null (because it has a non-throwing
348 // exception spec or is the reserved placement new) and we have an
349 // interesting initializer will be running sanitizers on the initialization.
350 bool nullCheck = e->shouldNullCheckAllocation() &&
351 (!allocType.isPODType(Context: getContext()) || e->hasInitializer());
352 assert(!cir::MissingFeatures::exprNewNullCheck());
353 if (nullCheck)
354 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: null check");
355
356 // If there's an operator delete, enter a cleanup to call it if an
357 // exception is thrown.
358 if (e->getOperatorDelete() &&
359 !e->getOperatorDelete()->isReservedGlobalPlacementOperator())
360 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: operator delete");
361
362 if (allocSize != allocSizeWithoutCookie)
363 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: array with cookies");
364
365 mlir::Type elementTy = convertTypeForMem(allocType);
366 Address result = builder.createElementBitCast(getLoc(e->getSourceRange()),
367 allocation, elementTy);
368
369 // Passing pointer through launder.invariant.group to avoid propagation of
370 // vptrs information which may be included in previous type.
371 // To not break LTO with different optimizations levels, we do it regardless
372 // of optimization level.
373 if (cgm.getCodeGenOpts().StrictVTablePointers &&
374 allocator->isReservedGlobalPlacementOperator())
375 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: strict vtable pointers");
376
377 assert(!cir::MissingFeatures::sanitizers());
378
379 emitNewInitializer(*this, e, allocType, elementTy, result, numElements,
380 allocSizeWithoutCookie);
381 return result.getPointer();
382}
383

source code of clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp