1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Expr nodes with scalar CIR types as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14#include "CIRGenValue.h"
15
16#include "clang/AST/Expr.h"
17#include "clang/AST/StmtVisitor.h"
18#include "clang/CIR/MissingFeatures.h"
19
20#include "mlir/IR/Location.h"
21#include "mlir/IR/Value.h"
22
23#include <cassert>
24#include <utility>
25
26using namespace clang;
27using namespace clang::CIRGen;
28
29namespace {
30
31struct BinOpInfo {
32 mlir::Value lhs;
33 mlir::Value rhs;
34 SourceRange loc;
35 QualType fullType; // Type of operands and result
36 QualType compType; // Type used for computations. Element type
37 // for vectors, otherwise same as FullType.
38 BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
39 FPOptions fpfeatures;
40 const Expr *e; // Entire expr, for error unsupported. May not be binop.
41
42 /// Check if the binop computes a division or a remainder.
43 bool isDivRemOp() const {
44 return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
45 opcode == BO_RemAssign;
46 }
47
48 /// Check if the binop can result in integer overflow.
49 bool mayHaveIntegerOverflow() const {
50 // Without constant input, we can't rule out overflow.
51 auto lhsci = dyn_cast<cir::ConstantOp>(lhs.getDefiningOp());
52 auto rhsci = dyn_cast<cir::ConstantOp>(rhs.getDefiningOp());
53 if (!lhsci || !rhsci)
54 return true;
55
56 assert(!cir::MissingFeatures::mayHaveIntegerOverflow());
57 // TODO(cir): For now we just assume that we might overflow
58 return true;
59 }
60
61 /// Check if at least one operand is a fixed point type. In such cases,
62 /// this operation did not follow usual arithmetic conversion and both
63 /// operands might not be of the same type.
64 bool isFixedPointOp() const {
65 // We cannot simply check the result type since comparison operations
66 // return an int.
67 if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(Val: e)) {
68 QualType lhstype = binOp->getLHS()->getType();
69 QualType rhstype = binOp->getRHS()->getType();
70 return lhstype->isFixedPointType() || rhstype->isFixedPointType();
71 }
72 if (const auto *unop = llvm::dyn_cast<UnaryOperator>(Val: e))
73 return unop->getSubExpr()->getType()->isFixedPointType();
74 return false;
75 }
76};
77
78class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
79 CIRGenFunction &cgf;
80 CIRGenBuilderTy &builder;
81 bool ignoreResultAssign;
82
83public:
84 ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder)
85 : cgf(cgf), builder(builder) {}
86
87 //===--------------------------------------------------------------------===//
88 // Utilities
89 //===--------------------------------------------------------------------===//
90
91 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
92 return builder.createFloatingCast(result, cgf.convertType(promotionType));
93 }
94
95 mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
96 return builder.createFloatingCast(result, cgf.convertType(exprType));
97 }
98
99 mlir::Value emitPromoted(const Expr *e, QualType promotionType);
100
101 mlir::Value maybePromoteBoolResult(mlir::Value value,
102 mlir::Type dstTy) const {
103 if (mlir::isa<cir::IntType>(dstTy))
104 return builder.createBoolToInt(value, dstTy);
105 if (mlir::isa<cir::BoolType>(dstTy))
106 return value;
107 llvm_unreachable("Can only promote integer or boolean types");
108 }
109
110 //===--------------------------------------------------------------------===//
111 // Visitor Methods
112 //===--------------------------------------------------------------------===//
113
114 mlir::Value Visit(Expr *e) {
115 return StmtVisitor<ScalarExprEmitter, mlir::Value>::Visit(e);
116 }
117
118 mlir::Value VisitStmt(Stmt *s) {
119 llvm_unreachable("Statement passed to ScalarExprEmitter");
120 }
121
122 mlir::Value VisitExpr(Expr *e) {
123 cgf.getCIRGenModule().errorNYI(
124 e->getSourceRange(), "scalar expression kind: ", e->getStmtClassName());
125 return {};
126 }
127
128 mlir::Value VisitParenExpr(ParenExpr *pe) { return Visit(pe->getSubExpr()); }
129
130 /// Emits the address of the l-value, then loads and returns the result.
131 mlir::Value emitLoadOfLValue(const Expr *e) {
132 LValue lv = cgf.emitLValue(e);
133 // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V);
134 return cgf.emitLoadOfLValue(lv, loc: e->getExprLoc()).getScalarVal();
135 }
136
137 mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
138 return cgf.emitLoadOfLValue(lv, loc).getScalarVal();
139 }
140
141 // l-values
142 mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
143 if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(refExpr: e))
144 return cgf.emitScalarConstant(constant, e);
145
146 return emitLoadOfLValue(e);
147 }
148
149 mlir::Value VisitIntegerLiteral(const IntegerLiteral *e) {
150 mlir::Type type = cgf.convertType(e->getType());
151 return builder.create<cir::ConstantOp>(
152 cgf.getLoc(e->getExprLoc()),
153 builder.getAttr<cir::IntAttr>(type, e->getValue()));
154 }
155
156 mlir::Value VisitFloatingLiteral(const FloatingLiteral *e) {
157 mlir::Type type = cgf.convertType(e->getType());
158 assert(mlir::isa<cir::CIRFPTypeInterface>(type) &&
159 "expect floating-point type");
160 return builder.create<cir::ConstantOp>(
161 cgf.getLoc(e->getExprLoc()),
162 builder.getAttr<cir::FPAttr>(type, e->getValue()));
163 }
164
165 mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *e) {
166 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
167 }
168
169 mlir::Value VisitCastExpr(CastExpr *e);
170 mlir::Value VisitCallExpr(const CallExpr *e);
171
172 mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
173 if (e->getBase()->getType()->isVectorType()) {
174 assert(!cir::MissingFeatures::scalableVectors());
175
176 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
177 const mlir::Value vecValue = Visit(e->getBase());
178 const mlir::Value indexValue = Visit(e->getIdx());
179 return cgf.builder.create<cir::VecExtractOp>(loc, vecValue, indexValue);
180 }
181 // Just load the lvalue formed by the subscript expression.
182 return emitLoadOfLValue(e);
183 }
184
185 mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *e) {
186 if (e->getNumSubExprs() == 2) {
187 // The undocumented form of __builtin_shufflevector.
188 mlir::Value inputVec = Visit(e->getExpr(0));
189 mlir::Value indexVec = Visit(e->getExpr(1));
190 return cgf.builder.create<cir::VecShuffleDynamicOp>(
191 cgf.getLoc(e->getSourceRange()), inputVec, indexVec);
192 }
193
194 mlir::Value vec1 = Visit(e->getExpr(0));
195 mlir::Value vec2 = Visit(e->getExpr(1));
196
197 // The documented form of __builtin_shufflevector, where the indices are
198 // a variable number of integer constants. The constants will be stored
199 // in an ArrayAttr.
200 SmallVector<mlir::Attribute, 8> indices;
201 for (unsigned i = 2; i < e->getNumSubExprs(); ++i) {
202 indices.push_back(
203 cir::IntAttr::get(cgf.builder.getSInt64Ty(),
204 e->getExpr(i)
205 ->EvaluateKnownConstInt(cgf.getContext())
206 .getSExtValue()));
207 }
208
209 return cgf.builder.create<cir::VecShuffleOp>(
210 cgf.getLoc(e->getSourceRange()), cgf.convertType(e->getType()), vec1,
211 vec2, cgf.builder.getArrayAttr(indices));
212 }
213
214 mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *e) {
215 // __builtin_convertvector is an element-wise cast, and is implemented as a
216 // regular cast. The back end handles casts of vectors correctly.
217 return emitScalarConversion(Visit(e->getSrcExpr()),
218 e->getSrcExpr()->getType(), e->getType(),
219 e->getSourceRange().getBegin());
220 }
221
222 mlir::Value VisitMemberExpr(MemberExpr *e);
223
224 mlir::Value VisitInitListExpr(InitListExpr *e);
225
226 mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
227 return VisitCastExpr(e);
228 }
229
230 mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *e) {
231 return cgf.cgm.emitNullConstant(e->getType(),
232 cgf.getLoc(e->getSourceRange()));
233 }
234
235 /// Perform a pointer to boolean conversion.
236 mlir::Value emitPointerToBoolConversion(mlir::Value v, QualType qt) {
237 // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM.
238 // We might want to have a separate pass for these types of conversions.
239 return cgf.getBuilder().createPtrToBoolCast(v);
240 }
241
242 mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) {
243 cir::BoolType boolTy = builder.getBoolTy();
244 return builder.create<cir::CastOp>(loc, boolTy,
245 cir::CastKind::float_to_bool, src);
246 }
247
248 mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) {
249 // Because of the type rules of C, we often end up computing a
250 // logical value, then zero extending it to int, then wanting it
251 // as a logical value again.
252 // TODO: optimize this common case here or leave it for later
253 // CIR passes?
254 cir::BoolType boolTy = builder.getBoolTy();
255 return builder.create<cir::CastOp>(loc, boolTy, cir::CastKind::int_to_bool,
256 srcVal);
257 }
258
259 /// Convert the specified expression value to a boolean (!cir.bool) truth
260 /// value. This is equivalent to "Val != 0".
261 mlir::Value emitConversionToBool(mlir::Value src, QualType srcType,
262 mlir::Location loc) {
263 assert(srcType.isCanonical() && "EmitScalarConversion strips typedefs");
264
265 if (srcType->isRealFloatingType())
266 return emitFloatToBoolConversion(src, loc);
267
268 if (llvm::isa<MemberPointerType>(Val: srcType)) {
269 cgf.getCIRGenModule().errorNYI(loc, "member pointer to bool conversion");
270 return builder.getFalse(loc);
271 }
272
273 if (srcType->isIntegerType())
274 return emitIntToBoolConversion(src, loc);
275
276 assert(::mlir::isa<cir::PointerType>(src.getType()));
277 return emitPointerToBoolConversion(src, srcType);
278 }
279
280 // Emit a conversion from the specified type to the specified destination
281 // type, both of which are CIR scalar types.
282 struct ScalarConversionOpts {
283 bool treatBooleanAsSigned;
284 bool emitImplicitIntegerTruncationChecks;
285 bool emitImplicitIntegerSignChangeChecks;
286
287 ScalarConversionOpts()
288 : treatBooleanAsSigned(false),
289 emitImplicitIntegerTruncationChecks(false),
290 emitImplicitIntegerSignChangeChecks(false) {}
291
292 ScalarConversionOpts(clang::SanitizerSet sanOpts)
293 : treatBooleanAsSigned(false),
294 emitImplicitIntegerTruncationChecks(
295 sanOpts.hasOneOf(K: SanitizerKind::ImplicitIntegerTruncation)),
296 emitImplicitIntegerSignChangeChecks(
297 sanOpts.has(K: SanitizerKind::ImplicitIntegerSignChange)) {}
298 };
299
300 // Conversion from bool, integral, or floating-point to integral or
301 // floating-point. Conversions involving other types are handled elsewhere.
302 // Conversion to bool is handled elsewhere because that's a comparison against
303 // zero, not a simple cast. This handles both individual scalars and vectors.
304 mlir::Value emitScalarCast(mlir::Value src, QualType srcType,
305 QualType dstType, mlir::Type srcTy,
306 mlir::Type dstTy, ScalarConversionOpts opts) {
307 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
308 "Internal error: matrix types not handled by this function.");
309 assert(!(mlir::isa<mlir::IntegerType>(srcTy) ||
310 mlir::isa<mlir::IntegerType>(dstTy)) &&
311 "Obsolete code. Don't use mlir::IntegerType with CIR.");
312
313 mlir::Type fullDstTy = dstTy;
314 if (mlir::isa<cir::VectorType>(srcTy) &&
315 mlir::isa<cir::VectorType>(dstTy)) {
316 // Use the element types of the vectors to figure out the CastKind.
317 srcTy = mlir::dyn_cast<cir::VectorType>(srcTy).getElementType();
318 dstTy = mlir::dyn_cast<cir::VectorType>(dstTy).getElementType();
319 }
320
321 std::optional<cir::CastKind> castKind;
322
323 if (mlir::isa<cir::BoolType>(srcTy)) {
324 if (opts.treatBooleanAsSigned)
325 cgf.getCIRGenModule().errorNYI(feature: "signed bool");
326 if (cgf.getBuilder().isInt(dstTy))
327 castKind = cir::CastKind::bool_to_int;
328 else if (mlir::isa<cir::CIRFPTypeInterface>(dstTy))
329 castKind = cir::CastKind::bool_to_float;
330 else
331 llvm_unreachable("Internal error: Cast to unexpected type");
332 } else if (cgf.getBuilder().isInt(srcTy)) {
333 if (cgf.getBuilder().isInt(dstTy))
334 castKind = cir::CastKind::integral;
335 else if (mlir::isa<cir::CIRFPTypeInterface>(dstTy))
336 castKind = cir::CastKind::int_to_float;
337 else
338 llvm_unreachable("Internal error: Cast to unexpected type");
339 } else if (mlir::isa<cir::CIRFPTypeInterface>(srcTy)) {
340 if (cgf.getBuilder().isInt(dstTy)) {
341 // If we can't recognize overflow as undefined behavior, assume that
342 // overflow saturates. This protects against normal optimizations if we
343 // are compiling with non-standard FP semantics.
344 if (!cgf.cgm.getCodeGenOpts().StrictFloatCastOverflow)
345 cgf.getCIRGenModule().errorNYI(feature: "strict float cast overflow");
346 assert(!cir::MissingFeatures::fpConstraints());
347 castKind = cir::CastKind::float_to_int;
348 } else if (mlir::isa<cir::CIRFPTypeInterface>(dstTy)) {
349 cgf.getCIRGenModule().errorNYI(feature: "floating point casts");
350 return cgf.createDummyValue(src.getLoc(), dstType);
351 } else {
352 llvm_unreachable("Internal error: Cast to unexpected type");
353 }
354 } else {
355 llvm_unreachable("Internal error: Cast from unexpected type");
356 }
357
358 assert(castKind.has_value() && "Internal error: CastKind not set.");
359 return builder.create<cir::CastOp>(src.getLoc(), fullDstTy, *castKind, src);
360 }
361
362 mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
363 mlir::Value
364 VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
365
366 // Unary Operators.
367 mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
368 LValue lv = cgf.emitLValue(e: e->getSubExpr());
369 return emitScalarPrePostIncDec(e, lv, false, false);
370 }
371 mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
372 LValue lv = cgf.emitLValue(e: e->getSubExpr());
373 return emitScalarPrePostIncDec(e, lv, true, false);
374 }
375 mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
376 LValue lv = cgf.emitLValue(e: e->getSubExpr());
377 return emitScalarPrePostIncDec(e, lv, false, true);
378 }
379 mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
380 LValue lv = cgf.emitLValue(e: e->getSubExpr());
381 return emitScalarPrePostIncDec(e, lv, true, true);
382 }
383 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
384 bool isInc, bool isPre) {
385 if (cgf.getLangOpts().OpenMP)
386 cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
387
388 QualType type = e->getSubExpr()->getType();
389
390 mlir::Value value;
391 mlir::Value input;
392
393 if (type->getAs<AtomicType>()) {
394 cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
395 // TODO(cir): This is not correct, but it will produce reasonable code
396 // until atomic operations are implemented.
397 value = cgf.emitLoadOfLValue(lv, loc: e->getExprLoc()).getScalarVal();
398 input = value;
399 } else {
400 value = cgf.emitLoadOfLValue(lv, loc: e->getExprLoc()).getScalarVal();
401 input = value;
402 }
403
404 // NOTE: When possible, more frequent cases are handled first.
405
406 // Special case of integer increment that we have to check first: bool++.
407 // Due to promotion rules, we get:
408 // bool++ -> bool = bool + 1
409 // -> bool = (int)bool + 1
410 // -> bool = ((int)bool + 1 != 0)
411 // An interesting aspect of this is that increment is always true.
412 // Decrement does not have this property.
413 if (isInc && type->isBooleanType()) {
414 value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
415 } else if (type->isIntegerType()) {
416 QualType promotedType;
417 bool canPerformLossyDemotionCheck = false;
418 if (cgf.getContext().isPromotableIntegerType(T: type)) {
419 promotedType = cgf.getContext().getPromotedIntegerType(PromotableType: type);
420 assert(promotedType != type && "Shouldn't promote to the same type.");
421 canPerformLossyDemotionCheck = true;
422 canPerformLossyDemotionCheck &=
423 cgf.getContext().getCanonicalType(T: type) !=
424 cgf.getContext().getCanonicalType(T: promotedType);
425 canPerformLossyDemotionCheck &=
426 type->isIntegerType() && promotedType->isIntegerType();
427
428 // TODO(cir): Currently, we store bitwidths in CIR types only for
429 // integers. This might also be required for other types.
430
431 assert(
432 (!canPerformLossyDemotionCheck ||
433 type->isSignedIntegerOrEnumerationType() ||
434 promotedType->isSignedIntegerOrEnumerationType() ||
435 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth() ==
436 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth()) &&
437 "The following check expects that if we do promotion to different "
438 "underlying canonical type, at least one of the types (either "
439 "base or promoted) will be signed, or the bitwidths will match.");
440 }
441
442 assert(!cir::MissingFeatures::sanitizers());
443 if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
444 value = emitIncDecConsiderOverflowBehavior(e, value, isInc);
445 } else {
446 cir::UnaryOpKind kind =
447 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
448 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
449 value = emitUnaryOp(e, kind, input, /*nsw=*/false);
450 }
451 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
452 QualType type = ptr->getPointeeType();
453 if (cgf.getContext().getAsVariableArrayType(T: type)) {
454 // VLA types don't have constant size.
455 cgf.cgm.errorNYI(e->getSourceRange(), "Pointer arithmetic on VLA");
456 return {};
457 } else if (type->isFunctionType()) {
458 // Arithmetic on function pointers (!) is just +-1.
459 cgf.cgm.errorNYI(e->getSourceRange(),
460 "Pointer arithmetic on function pointer");
461 return {};
462 } else {
463 // For everything else, we can just do a simple increment.
464 mlir::Location loc = cgf.getLoc(e->getSourceRange());
465 CIRGenBuilderTy &builder = cgf.getBuilder();
466 int amount = (isInc ? 1 : -1);
467 mlir::Value amt = builder.getSInt32(amount, loc);
468 assert(!cir::MissingFeatures::sanitizers());
469 value = builder.createPtrStride(loc, value, amt);
470 }
471 } else if (type->isVectorType()) {
472 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
473 return {};
474 } else if (type->isRealFloatingType()) {
475 assert(!cir::MissingFeatures::cgFPOptionsRAII());
476
477 if (type->isHalfType() &&
478 !cgf.getContext().getLangOpts().NativeHalfType) {
479 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
480 return {};
481 }
482
483 if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
484 // Create the inc/dec operation.
485 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
486 cir::UnaryOpKind kind =
487 (isInc ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec);
488 value = emitUnaryOp(e, kind, value);
489 } else {
490 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
491 return {};
492 }
493 } else if (type->isFixedPointType()) {
494 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
495 return {};
496 } else {
497 assert(type->castAs<ObjCObjectPointerType>());
498 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC pointer");
499 return {};
500 }
501
502 CIRGenFunction::SourceLocRAIIObject sourceloc{
503 cgf, cgf.getLoc(e->getSourceRange())};
504
505 // Store the updated result through the lvalue
506 if (lv.isBitField()) {
507 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec bitfield");
508 return {};
509 } else {
510 cgf.emitStoreThroughLValue(RValue::src: get(value), dst: lv);
511 }
512
513 // If this is a postinc, return the value read from memory, otherwise use
514 // the updated value.
515 return isPre ? value : input;
516 }
517
518 mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
519 mlir::Value inVal,
520 bool isInc) {
521 cir::UnaryOpKind kind =
522 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
523 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
524 case LangOptions::SOB_Defined:
525 return emitUnaryOp(e, kind, inVal, /*nsw=*/false);
526 case LangOptions::SOB_Undefined:
527 assert(!cir::MissingFeatures::sanitizers());
528 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
529 case LangOptions::SOB_Trapping:
530 if (!e->canOverflow())
531 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
532 cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
533 return {};
534 }
535 llvm_unreachable("Unexpected signed overflow behavior kind");
536 }
537
538 mlir::Value VisitUnaryAddrOf(const UnaryOperator *e) {
539 if (llvm::isa<MemberPointerType>(e->getType())) {
540 cgf.cgm.errorNYI(e->getSourceRange(), "Address of member pointer");
541 return builder.getNullPtr(cgf.convertType(e->getType()),
542 cgf.getLoc(e->getExprLoc()));
543 }
544
545 return cgf.emitLValue(e: e->getSubExpr()).getPointer();
546 }
547
548 mlir::Value VisitUnaryDeref(const UnaryOperator *e) {
549 if (e->getType()->isVoidType())
550 return Visit(e->getSubExpr()); // the actual value should be unused
551 return emitLoadOfLValue(e);
552 }
553
554 mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
555 return emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus);
556 }
557
558 mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
559 return emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus);
560 }
561
562 mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
563 cir::UnaryOpKind kind) {
564 ignoreResultAssign = false;
565
566 QualType promotionType = getPromotionType(ty: e->getSubExpr()->getType());
567
568 mlir::Value operand;
569 if (!promotionType.isNull())
570 operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
571 else
572 operand = Visit(e->getSubExpr());
573
574 bool nsw =
575 kind == cir::UnaryOpKind::Minus && e->getType()->isSignedIntegerType();
576
577 // NOTE: LLVM codegen will lower this directly to either a FNeg
578 // or a Sub instruction. In CIR this will be handled later in LowerToLLVM.
579 mlir::Value result = emitUnaryOp(e, kind, operand, nsw);
580 if (result && !promotionType.isNull())
581 return emitUnPromotedValue(result, e->getType());
582 return result;
583 }
584
585 mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
586 mlir::Value input, bool nsw = false) {
587 return builder.create<cir::UnaryOp>(
588 cgf.getLoc(e->getSourceRange().getBegin()), input.getType(), kind,
589 input, nsw);
590 }
591
592 mlir::Value VisitUnaryNot(const UnaryOperator *e) {
593 ignoreResultAssign = false;
594 mlir::Value op = Visit(e->getSubExpr());
595 return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
596 }
597
598 mlir::Value VisitUnaryLNot(const UnaryOperator *e);
599
600 mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
601
602 /// Emit a conversion from the specified type to the specified destination
603 /// type, both of which are CIR scalar types.
604 /// TODO: do we need ScalarConversionOpts here? Should be done in another
605 /// pass.
606 mlir::Value
607 emitScalarConversion(mlir::Value src, QualType srcType, QualType dstType,
608 SourceLocation loc,
609 ScalarConversionOpts opts = ScalarConversionOpts()) {
610 // All conversions involving fixed point types should be handled by the
611 // emitFixedPoint family functions. This is done to prevent bloating up
612 // this function more, and although fixed point numbers are represented by
613 // integers, we do not want to follow any logic that assumes they should be
614 // treated as integers.
615 // TODO(leonardchan): When necessary, add another if statement checking for
616 // conversions to fixed point types from other types.
617 // conversions to fixed point types from other types.
618 if (srcType->isFixedPointType() || dstType->isFixedPointType()) {
619 cgf.getCIRGenModule().errorNYI(loc, "fixed point conversions");
620 return {};
621 }
622
623 srcType = srcType.getCanonicalType();
624 dstType = dstType.getCanonicalType();
625 if (srcType == dstType) {
626 if (opts.emitImplicitIntegerSignChangeChecks)
627 cgf.getCIRGenModule().errorNYI(loc,
628 "implicit integer sign change checks");
629 return src;
630 }
631
632 if (dstType->isVoidType())
633 return {};
634
635 mlir::Type mlirSrcType = src.getType();
636
637 // Handle conversions to bool first, they are special: comparisons against
638 // 0.
639 if (dstType->isBooleanType())
640 return emitConversionToBool(src, srcType, cgf.getLoc(loc));
641
642 mlir::Type mlirDstType = cgf.convertType(dstType);
643
644 if (srcType->isHalfType() &&
645 !cgf.getContext().getLangOpts().NativeHalfType) {
646 // Cast to FP using the intrinsic if the half type itself isn't supported.
647 if (mlir::isa<cir::CIRFPTypeInterface>(mlirDstType)) {
648 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
649 cgf.getCIRGenModule().errorNYI(loc,
650 "cast via llvm.convert.from.fp16");
651 } else {
652 // Cast to other types through float, using either the intrinsic or
653 // FPExt, depending on whether the half type itself is supported (as
654 // opposed to operations on half, available with NativeHalfType).
655 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
656 cgf.getCIRGenModule().errorNYI(loc,
657 "cast via llvm.convert.from.fp16");
658 // FIXME(cir): For now lets pretend we shouldn't use the conversion
659 // intrinsics and insert a cast here unconditionally.
660 src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
661 cgf.FloatTy);
662 srcType = cgf.getContext().FloatTy;
663 mlirSrcType = cgf.FloatTy;
664 }
665 }
666
667 // TODO(cir): LLVM codegen ignore conversions like int -> uint,
668 // is there anything to be done for CIR here?
669 if (mlirSrcType == mlirDstType) {
670 if (opts.emitImplicitIntegerSignChangeChecks)
671 cgf.getCIRGenModule().errorNYI(loc,
672 "implicit integer sign change checks");
673 return src;
674 }
675
676 // Handle pointer conversions next: pointers can only be converted to/from
677 // other pointers and integers. Check for pointer types in terms of LLVM, as
678 // some native types (like Obj-C id) may map to a pointer type.
679 if (auto dstPT = dyn_cast<cir::PointerType>(mlirDstType)) {
680 cgf.getCIRGenModule().errorNYI(loc, "pointer casts");
681 return builder.getNullPtr(dstPT, src.getLoc());
682 }
683
684 if (isa<cir::PointerType>(mlirSrcType)) {
685 // Must be an ptr to int cast.
686 assert(isa<cir::IntType>(mlirDstType) && "not ptr->int?");
687 return builder.createPtrToInt(src, mlirDstType);
688 }
689
690 // A scalar can be splatted to an extended vector of the same element type
691 if (dstType->isExtVectorType() && !srcType->isVectorType()) {
692 // Sema should add casts to make sure that the source expression's type
693 // is the same as the vector's element type (sans qualifiers)
694 assert(dstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
695 srcType.getTypePtr() &&
696 "Splatted expr doesn't match with vector element type?");
697
698 cgf.getCIRGenModule().errorNYI(loc, "vector splatting");
699 return {};
700 }
701
702 if (srcType->isMatrixType() && dstType->isMatrixType()) {
703 cgf.getCIRGenModule().errorNYI(loc,
704 "matrix type to matrix type conversion");
705 return {};
706 }
707 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
708 "Internal error: conversion between matrix type and scalar type");
709
710 // Finally, we have the arithmetic types or vectors of arithmetic types.
711 mlir::Value res = nullptr;
712 mlir::Type resTy = mlirDstType;
713
714 res = emitScalarCast(src, srcType, dstType, mlirSrcType, mlirDstType, opts);
715
716 if (mlirDstType != resTy) {
717 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
718 cgf.getCIRGenModule().errorNYI(loc, "cast via llvm.convert.to.fp16");
719 }
720 // FIXME(cir): For now we never use FP16 conversion intrinsics even if
721 // required by the target. Change that once this is implemented
722 res = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, res,
723 resTy);
724 }
725
726 if (opts.emitImplicitIntegerTruncationChecks)
727 cgf.getCIRGenModule().errorNYI(loc, "implicit integer truncation checks");
728
729 if (opts.emitImplicitIntegerSignChangeChecks)
730 cgf.getCIRGenModule().errorNYI(loc,
731 "implicit integer sign change checks");
732
733 return res;
734 }
735
736 BinOpInfo emitBinOps(const BinaryOperator *e,
737 QualType promotionType = QualType()) {
738 BinOpInfo result;
739 result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
740 result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
741 if (!promotionType.isNull())
742 result.fullType = promotionType;
743 else
744 result.fullType = e->getType();
745 result.compType = result.fullType;
746 if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
747 result.compType = vecType->getElementType();
748 }
749 result.opcode = e->getOpcode();
750 result.loc = e->getSourceRange();
751 // TODO(cir): Result.FPFeatures
752 assert(!cir::MissingFeatures::cgFPOptionsRAII());
753 result.e = e;
754 return result;
755 }
756
757 mlir::Value emitMul(const BinOpInfo &ops);
758 mlir::Value emitDiv(const BinOpInfo &ops);
759 mlir::Value emitRem(const BinOpInfo &ops);
760 mlir::Value emitAdd(const BinOpInfo &ops);
761 mlir::Value emitSub(const BinOpInfo &ops);
762 mlir::Value emitShl(const BinOpInfo &ops);
763 mlir::Value emitShr(const BinOpInfo &ops);
764 mlir::Value emitAnd(const BinOpInfo &ops);
765 mlir::Value emitXor(const BinOpInfo &ops);
766 mlir::Value emitOr(const BinOpInfo &ops);
767
768 LValue emitCompoundAssignLValue(
769 const CompoundAssignOperator *e,
770 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
771 mlir::Value &result);
772 mlir::Value
773 emitCompoundAssign(const CompoundAssignOperator *e,
774 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
775
776 // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
777 // codegen.
778 QualType getPromotionType(QualType ty) {
779 if (ty->getAs<ComplexType>()) {
780 assert(!cir::MissingFeatures::complexType());
781 cgf.cgm.errorNYI(feature: "promotion to complex type");
782 return QualType();
783 }
784 if (ty.UseExcessPrecision(Ctx: cgf.getContext())) {
785 if (ty->getAs<VectorType>()) {
786 assert(!cir::MissingFeatures::vectorType());
787 cgf.cgm.errorNYI(feature: "promotion to vector type");
788 return QualType();
789 }
790 return cgf.getContext().FloatTy;
791 }
792 return QualType();
793 }
794
795// Binary operators and binary compound assignment operators.
796#define HANDLEBINOP(OP) \
797 mlir::Value VisitBin##OP(const BinaryOperator *e) { \
798 QualType promotionTy = getPromotionType(e->getType()); \
799 auto result = emit##OP(emitBinOps(e, promotionTy)); \
800 if (result && !promotionTy.isNull()) \
801 result = emitUnPromotedValue(result, e->getType()); \
802 return result; \
803 } \
804 mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) { \
805 return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP); \
806 }
807
808 HANDLEBINOP(Mul)
809 HANDLEBINOP(Div)
810 HANDLEBINOP(Rem)
811 HANDLEBINOP(Add)
812 HANDLEBINOP(Sub)
813 HANDLEBINOP(Shl)
814 HANDLEBINOP(Shr)
815 HANDLEBINOP(And)
816 HANDLEBINOP(Xor)
817 HANDLEBINOP(Or)
818#undef HANDLEBINOP
819
820 mlir::Value emitCmp(const BinaryOperator *e) {
821 const mlir::Location loc = cgf.getLoc(e->getExprLoc());
822 mlir::Value result;
823 QualType lhsTy = e->getLHS()->getType();
824 QualType rhsTy = e->getRHS()->getType();
825
826 auto clangCmpToCIRCmp =
827 [](clang::BinaryOperatorKind clangCmp) -> cir::CmpOpKind {
828 switch (clangCmp) {
829 case BO_LT:
830 return cir::CmpOpKind::lt;
831 case BO_GT:
832 return cir::CmpOpKind::gt;
833 case BO_LE:
834 return cir::CmpOpKind::le;
835 case BO_GE:
836 return cir::CmpOpKind::ge;
837 case BO_EQ:
838 return cir::CmpOpKind::eq;
839 case BO_NE:
840 return cir::CmpOpKind::ne;
841 default:
842 llvm_unreachable("unsupported comparison kind for cir.cmp");
843 }
844 };
845
846 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
847 if (lhsTy->getAs<MemberPointerType>()) {
848 assert(!cir::MissingFeatures::dataMemberType());
849 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
850 mlir::Value lhs = cgf.emitScalarExpr(e->getLHS());
851 mlir::Value rhs = cgf.emitScalarExpr(e->getRHS());
852 result = builder.createCompare(loc, kind, lhs, rhs);
853 } else if (!lhsTy->isAnyComplexType() && !rhsTy->isAnyComplexType()) {
854 BinOpInfo boInfo = emitBinOps(e);
855 mlir::Value lhs = boInfo.lhs;
856 mlir::Value rhs = boInfo.rhs;
857
858 if (lhsTy->isVectorType()) {
859 if (!e->getType()->isVectorType()) {
860 // If AltiVec, the comparison results in a numeric type, so we use
861 // intrinsics comparing vectors and giving 0 or 1 as a result
862 cgf.cgm.errorNYI(loc, "AltiVec comparison");
863 } else {
864 // Other kinds of vectors. Element-wise comparison returning
865 // a vector.
866 result = builder.create<cir::VecCmpOp>(
867 cgf.getLoc(boInfo.loc), cgf.convertType(boInfo.fullType), kind,
868 boInfo.lhs, boInfo.rhs);
869 }
870 } else if (boInfo.isFixedPointOp()) {
871 assert(!cir::MissingFeatures::fixedPointType());
872 cgf.cgm.errorNYI(loc, "fixed point comparisons");
873 result = builder.getBool(false, loc);
874 } else {
875 // integers and pointers
876 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers &&
877 mlir::isa<cir::PointerType>(lhs.getType()) &&
878 mlir::isa<cir::PointerType>(rhs.getType())) {
879 cgf.cgm.errorNYI(loc, "strict vtable pointer comparisons");
880 }
881
882 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
883 result = builder.createCompare(loc, kind, lhs, rhs);
884 }
885 } else {
886 // Complex Comparison: can only be an equality comparison.
887 assert(!cir::MissingFeatures::complexType());
888 cgf.cgm.errorNYI(loc, "complex comparison");
889 result = builder.getBool(false, loc);
890 }
891
892 return emitScalarConversion(result, cgf.getContext().BoolTy, e->getType(),
893 e->getExprLoc());
894 }
895
896// Comparisons.
897#define VISITCOMP(CODE) \
898 mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); }
899 VISITCOMP(LT)
900 VISITCOMP(GT)
901 VISITCOMP(LE)
902 VISITCOMP(GE)
903 VISITCOMP(EQ)
904 VISITCOMP(NE)
905#undef VISITCOMP
906
907 mlir::Value VisitBinAssign(const BinaryOperator *e) {
908 const bool ignore = std::exchange(obj&: ignoreResultAssign, new_val: false);
909
910 mlir::Value rhs;
911 LValue lhs;
912
913 switch (e->getLHS()->getType().getObjCLifetime()) {
914 case Qualifiers::OCL_Strong:
915 case Qualifiers::OCL_Autoreleasing:
916 case Qualifiers::OCL_ExplicitNone:
917 case Qualifiers::OCL_Weak:
918 assert(!cir::MissingFeatures::objCLifetime());
919 break;
920 case Qualifiers::OCL_None:
921 // __block variables need to have the rhs evaluated first, plus this
922 // should improve codegen just a little.
923 rhs = Visit(e->getRHS());
924 assert(!cir::MissingFeatures::sanitizers());
925 // TODO(cir): This needs to be emitCheckedLValue() once we support
926 // sanitizers
927 lhs = cgf.emitLValue(e: e->getLHS());
928
929 // Store the value into the LHS. Bit-fields are handled specially because
930 // the result is altered by the store, i.e., [C99 6.5.16p1]
931 // 'An assignment expression has the value of the left operand after the
932 // assignment...'.
933 if (lhs.isBitField()) {
934 rhs = cgf.emitStoreThroughBitfieldLValue(RValue::get(rhs), lhs);
935 } else {
936 cgf.emitNullabilityCheck(lhs, rhs, e->getExprLoc());
937 CIRGenFunction::SourceLocRAIIObject loc{
938 cgf, cgf.getLoc(e->getSourceRange())};
939 cgf.emitStoreThroughLValue(RValue::src: get(rhs), dst: lhs);
940 }
941 }
942
943 // If the result is clearly ignored, return now.
944 if (ignore)
945 return nullptr;
946
947 // The result of an assignment in C is the assigned r-value.
948 if (!cgf.getLangOpts().CPlusPlus)
949 return rhs;
950
951 // If the lvalue is non-volatile, return the computed value of the
952 // assignment.
953 if (!lhs.isVolatile())
954 return rhs;
955
956 // Otherwise, reload the value.
957 return emitLoadOfLValue(lhs, e->getExprLoc());
958 }
959
960 mlir::Value VisitBinComma(const BinaryOperator *e) {
961 cgf.emitIgnoredExpr(e: e->getLHS());
962 // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
963 return Visit(e->getRHS());
964 }
965
966 mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
967 if (e->getType()->isVectorType()) {
968 assert(!cir::MissingFeatures::vectorType());
969 return {};
970 }
971
972 assert(!cir::MissingFeatures::instrumentation());
973 mlir::Type resTy = cgf.convertType(e->getType());
974 mlir::Location loc = cgf.getLoc(e->getExprLoc());
975
976 CIRGenFunction::ConditionalEvaluation eval(cgf);
977
978 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
979 auto resOp = builder.create<cir::TernaryOp>(
980 loc, lhsCondV, /*trueBuilder=*/
981 [&](mlir::OpBuilder &b, mlir::Location loc) {
982 CIRGenFunction::LexicalScope lexScope{cgf, loc,
983 b.getInsertionBlock()};
984 cgf.curLexScope->setAsTernary();
985 b.create<cir::YieldOp>(loc, cgf.evaluateExprAsBool(e->getRHS()));
986 },
987 /*falseBuilder*/
988 [&](mlir::OpBuilder &b, mlir::Location loc) {
989 CIRGenFunction::LexicalScope lexScope{cgf, loc,
990 b.getInsertionBlock()};
991 cgf.curLexScope->setAsTernary();
992 auto res = b.create<cir::ConstantOp>(loc, builder.getFalseAttr());
993 b.create<cir::YieldOp>(loc, res.getRes());
994 });
995 return maybePromoteBoolResult(resOp.getResult(), resTy);
996 }
997
998 mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
999 if (e->getType()->isVectorType()) {
1000 assert(!cir::MissingFeatures::vectorType());
1001 return {};
1002 }
1003
1004 assert(!cir::MissingFeatures::instrumentation());
1005 mlir::Type resTy = cgf.convertType(e->getType());
1006 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1007
1008 CIRGenFunction::ConditionalEvaluation eval(cgf);
1009
1010 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1011 auto resOp = builder.create<cir::TernaryOp>(
1012 loc, lhsCondV, /*trueBuilder=*/
1013 [&](mlir::OpBuilder &b, mlir::Location loc) {
1014 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1015 b.getInsertionBlock()};
1016 cgf.curLexScope->setAsTernary();
1017 auto res = b.create<cir::ConstantOp>(loc, builder.getTrueAttr());
1018 b.create<cir::YieldOp>(loc, res.getRes());
1019 },
1020 /*falseBuilder*/
1021 [&](mlir::OpBuilder &b, mlir::Location loc) {
1022 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1023 b.getInsertionBlock()};
1024 cgf.curLexScope->setAsTernary();
1025 b.create<cir::YieldOp>(loc, cgf.evaluateExprAsBool(e->getRHS()));
1026 });
1027
1028 return maybePromoteBoolResult(resOp.getResult(), resTy);
1029 }
1030};
1031
1032LValue ScalarExprEmitter::emitCompoundAssignLValue(
1033 const CompoundAssignOperator *e,
1034 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
1035 mlir::Value &result) {
1036 QualType lhsTy = e->getLHS()->getType();
1037 BinOpInfo opInfo;
1038
1039 if (e->getComputationResultType()->isAnyComplexType()) {
1040 cgf.cgm.errorNYI(result.getLoc(), "complex lvalue assign");
1041 return LValue();
1042 }
1043
1044 // Emit the RHS first. __block variables need to have the rhs evaluated
1045 // first, plus this should improve codegen a little.
1046
1047 QualType promotionTypeCR = getPromotionType(ty: e->getComputationResultType());
1048 if (promotionTypeCR.isNull())
1049 promotionTypeCR = e->getComputationResultType();
1050
1051 QualType promotionTypeLHS = getPromotionType(ty: e->getComputationLHSType());
1052 QualType promotionTypeRHS = getPromotionType(ty: e->getRHS()->getType());
1053
1054 if (!promotionTypeRHS.isNull())
1055 opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
1056 else
1057 opInfo.rhs = Visit(e->getRHS());
1058
1059 opInfo.fullType = promotionTypeCR;
1060 opInfo.compType = opInfo.fullType;
1061 if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
1062 opInfo.compType = vecType->getElementType();
1063 opInfo.opcode = e->getOpcode();
1064 opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
1065 opInfo.e = e;
1066 opInfo.loc = e->getSourceRange();
1067
1068 // Load/convert the LHS
1069 LValue lhsLV = cgf.emitLValue(e: e->getLHS());
1070
1071 if (lhsTy->getAs<AtomicType>()) {
1072 cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
1073 return LValue();
1074 }
1075
1076 opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
1077
1078 CIRGenFunction::SourceLocRAIIObject sourceloc{
1079 cgf, cgf.getLoc(e->getSourceRange())};
1080 SourceLocation loc = e->getExprLoc();
1081 if (!promotionTypeLHS.isNull())
1082 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
1083 else
1084 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
1085 e->getComputationLHSType(), loc);
1086
1087 // Expand the binary operator.
1088 result = (this->*func)(opInfo);
1089
1090 // Convert the result back to the LHS type,
1091 // potentially with Implicit Conversion sanitizer check.
1092 result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
1093 ScalarConversionOpts(cgf.sanOpts));
1094
1095 // Store the result value into the LHS lvalue. Bit-fields are handled
1096 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1097 // 'An assignment expression has the value of the left operand after the
1098 // assignment...'.
1099 if (lhsLV.isBitField())
1100 cgf.cgm.errorNYI(e->getSourceRange(), "store through bitfield lvalue");
1101 else
1102 cgf.emitStoreThroughLValue(RValue::src: get(result), dst: lhsLV);
1103
1104 if (cgf.getLangOpts().OpenMP)
1105 cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
1106
1107 return lhsLV;
1108}
1109
1110mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
1111 QualType promotionType) {
1112 e = e->IgnoreParens();
1113 if (const auto *bo = dyn_cast<BinaryOperator>(Val: e)) {
1114 switch (bo->getOpcode()) {
1115#define HANDLE_BINOP(OP) \
1116 case BO_##OP: \
1117 return emit##OP(emitBinOps(bo, promotionType));
1118 HANDLE_BINOP(Add)
1119 HANDLE_BINOP(Sub)
1120 HANDLE_BINOP(Mul)
1121 HANDLE_BINOP(Div)
1122#undef HANDLE_BINOP
1123 default:
1124 break;
1125 }
1126 } else if (isa<UnaryOperator>(Val: e)) {
1127 cgf.cgm.errorNYI(e->getSourceRange(), "unary operators");
1128 return {};
1129 }
1130 mlir::Value result = Visit(const_cast<Expr *>(e));
1131 if (result) {
1132 if (!promotionType.isNull())
1133 return emitPromotedValue(result, promotionType);
1134 return emitUnPromotedValue(result, e->getType());
1135 }
1136 return result;
1137}
1138
1139mlir::Value ScalarExprEmitter::emitCompoundAssign(
1140 const CompoundAssignOperator *e,
1141 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
1142
1143 bool ignore = std::exchange(obj&: ignoreResultAssign, new_val: false);
1144 mlir::Value rhs;
1145 LValue lhs = emitCompoundAssignLValue(e, func, rhs);
1146
1147 // If the result is clearly ignored, return now.
1148 if (ignore)
1149 return {};
1150
1151 // The result of an assignment in C is the assigned r-value.
1152 if (!cgf.getLangOpts().CPlusPlus)
1153 return rhs;
1154
1155 // If the lvalue is non-volatile, return the computed value of the assignment.
1156 if (!lhs.isVolatile())
1157 return rhs;
1158
1159 // Otherwise, reload the value.
1160 return emitLoadOfLValue(lhs, e->getExprLoc());
1161}
1162
1163} // namespace
1164
1165LValue
1166CIRGenFunction::emitCompoundAssignmentLValue(const CompoundAssignOperator *e) {
1167 ScalarExprEmitter emitter(*this, builder);
1168 mlir::Value result;
1169 switch (e->getOpcode()) {
1170#define COMPOUND_OP(Op) \
1171 case BO_##Op##Assign: \
1172 return emitter.emitCompoundAssignLValue(e, &ScalarExprEmitter::emit##Op, \
1173 result)
1174 COMPOUND_OP(Mul);
1175 COMPOUND_OP(Div);
1176 COMPOUND_OP(Rem);
1177 COMPOUND_OP(Add);
1178 COMPOUND_OP(Sub);
1179 COMPOUND_OP(Shl);
1180 COMPOUND_OP(Shr);
1181 COMPOUND_OP(And);
1182 COMPOUND_OP(Xor);
1183 COMPOUND_OP(Or);
1184#undef COMPOUND_OP
1185
1186 case BO_PtrMemD:
1187 case BO_PtrMemI:
1188 case BO_Mul:
1189 case BO_Div:
1190 case BO_Rem:
1191 case BO_Add:
1192 case BO_Sub:
1193 case BO_Shl:
1194 case BO_Shr:
1195 case BO_LT:
1196 case BO_GT:
1197 case BO_LE:
1198 case BO_GE:
1199 case BO_EQ:
1200 case BO_NE:
1201 case BO_Cmp:
1202 case BO_And:
1203 case BO_Xor:
1204 case BO_Or:
1205 case BO_LAnd:
1206 case BO_LOr:
1207 case BO_Assign:
1208 case BO_Comma:
1209 llvm_unreachable("Not valid compound assignment operators");
1210 }
1211 llvm_unreachable("Unhandled compound assignment operator");
1212}
1213
1214/// Emit the computation of the specified expression of scalar type.
1215mlir::Value CIRGenFunction::emitScalarExpr(const Expr *e) {
1216 assert(e && hasScalarEvaluationKind(e->getType()) &&
1217 "Invalid scalar expression to emit");
1218
1219 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1220}
1221
1222mlir::Value CIRGenFunction::emitPromotedScalarExpr(const Expr *e,
1223 QualType promotionType) {
1224 if (!promotionType.isNull())
1225 return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
1226 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1227}
1228
1229[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
1230 // If a null pointer expression's type is the C++0x nullptr_t and
1231 // the expression is not a simple literal, it must be evaluated
1232 // for its potential side effects.
1233 if (isa<IntegerLiteral>(Val: e) || isa<CXXNullPtrLiteralExpr>(Val: e))
1234 return false;
1235 return e->getType()->isNullPtrType();
1236}
1237
1238/// If \p e is a widened promoted integer, get its base (unpromoted) type.
1239static std::optional<QualType>
1240getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
1241 const Expr *base = e->IgnoreImpCasts();
1242 if (e == base)
1243 return std::nullopt;
1244
1245 QualType baseTy = base->getType();
1246 if (!astContext.isPromotableIntegerType(T: baseTy) ||
1247 astContext.getTypeSize(T: baseTy) >= astContext.getTypeSize(T: e->getType()))
1248 return std::nullopt;
1249
1250 return baseTy;
1251}
1252
1253/// Check if \p e is a widened promoted integer.
1254[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
1255 const Expr *e) {
1256 return getUnwidenedIntegerType(astContext, e).has_value();
1257}
1258
1259/// Check if we can skip the overflow check for \p Op.
1260[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
1261 const BinOpInfo &op) {
1262 assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
1263 "Expected a unary or binary operator");
1264
1265 // If the binop has constant inputs and we can prove there is no overflow,
1266 // we can elide the overflow check.
1267 if (!op.mayHaveIntegerOverflow())
1268 return true;
1269
1270 // If a unary op has a widened operand, the op cannot overflow.
1271 if (const auto *uo = dyn_cast<UnaryOperator>(Val: op.e))
1272 return !uo->canOverflow();
1273
1274 // We usually don't need overflow checks for binops with widened operands.
1275 // Multiplication with promoted unsigned operands is a special case.
1276 const auto *bo = cast<BinaryOperator>(Val: op.e);
1277 std::optional<QualType> optionalLHSTy =
1278 getUnwidenedIntegerType(astContext, bo->getLHS());
1279 if (!optionalLHSTy)
1280 return false;
1281
1282 std::optional<QualType> optionalRHSTy =
1283 getUnwidenedIntegerType(astContext, bo->getRHS());
1284 if (!optionalRHSTy)
1285 return false;
1286
1287 QualType lhsTy = *optionalLHSTy;
1288 QualType rhsTy = *optionalRHSTy;
1289
1290 // This is the simple case: binops without unsigned multiplication, and with
1291 // widened operands. No overflow check is needed here.
1292 if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
1293 !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
1294 return true;
1295
1296 // For unsigned multiplication the overflow check can be elided if either one
1297 // of the unpromoted types are less than half the size of the promoted type.
1298 unsigned promotedSize = astContext.getTypeSize(T: op.e->getType());
1299 return (2 * astContext.getTypeSize(T: lhsTy)) < promotedSize ||
1300 (2 * astContext.getTypeSize(T: rhsTy)) < promotedSize;
1301}
1302
1303/// Emit pointer + index arithmetic.
1304static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf,
1305 const BinOpInfo &op,
1306 bool isSubtraction) {
1307 // Must have binary (not unary) expr here. Unary pointer
1308 // increment/decrement doesn't use this path.
1309 const BinaryOperator *expr = cast<BinaryOperator>(Val: op.e);
1310
1311 mlir::Value pointer = op.lhs;
1312 Expr *pointerOperand = expr->getLHS();
1313 mlir::Value index = op.rhs;
1314 Expr *indexOperand = expr->getRHS();
1315
1316 // In the case of subtraction, the FE has ensured that the LHS is always the
1317 // pointer. However, addition can have the pointer on either side. We will
1318 // always have a pointer operand and an integer operand, so if the LHS wasn't
1319 // a pointer, we need to swap our values.
1320 if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) {
1321 std::swap(pointer, index);
1322 std::swap(a&: pointerOperand, b&: indexOperand);
1323 }
1324 assert(mlir::isa<cir::PointerType>(pointer.getType()) &&
1325 "Need a pointer operand");
1326 assert(mlir::isa<cir::IntType>(index.getType()) && "Need an integer operand");
1327
1328 // Some versions of glibc and gcc use idioms (particularly in their malloc
1329 // routines) that add a pointer-sized integer (known to be a pointer value)
1330 // to a null pointer in order to cast the value back to an integer or as
1331 // part of a pointer alignment algorithm. This is undefined behavior, but
1332 // we'd like to be able to compile programs that use it.
1333 //
1334 // Normally, we'd generate a GEP with a null-pointer base here in response
1335 // to that code, but it's also UB to dereference a pointer created that
1336 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
1337 // generate a direct cast of the integer value to a pointer.
1338 //
1339 // The idiom (p = nullptr + N) is not met if any of the following are true:
1340 //
1341 // The operation is subtraction.
1342 // The index is not pointer-sized.
1343 // The pointer type is not byte-sized.
1344 //
1345 if (BinaryOperator::isNullPointerArithmeticExtension(
1346 Ctx&: cgf.getContext(), Opc: op.opcode, LHS: expr->getLHS(), RHS: expr->getRHS()))
1347 return cgf.getBuilder().createIntToPtr(index, pointer.getType());
1348
1349 // Differently from LLVM codegen, ABI bits for index sizes is handled during
1350 // LLVM lowering.
1351
1352 // If this is subtraction, negate the index.
1353 if (isSubtraction)
1354 index = cgf.getBuilder().createNeg(index);
1355
1356 assert(!cir::MissingFeatures::sanitizers());
1357
1358 const PointerType *pointerType =
1359 pointerOperand->getType()->getAs<PointerType>();
1360 if (!pointerType) {
1361 cgf.cgm.errorNYI(feature: "Objective-C:pointer arithmetic with non-pointer type");
1362 return nullptr;
1363 }
1364
1365 QualType elementType = pointerType->getPointeeType();
1366 if (cgf.getContext().getAsVariableArrayType(T: elementType)) {
1367 cgf.cgm.errorNYI(feature: "variable array type");
1368 return nullptr;
1369 }
1370
1371 if (elementType->isVoidType() || elementType->isFunctionType()) {
1372 cgf.cgm.errorNYI(feature: "void* or function pointer arithmetic");
1373 return nullptr;
1374 }
1375
1376 assert(!cir::MissingFeatures::sanitizers());
1377 return cgf.getBuilder().create<cir::PtrStrideOp>(
1378 cgf.getLoc(op.e->getExprLoc()), pointer.getType(), pointer, index);
1379}
1380
1381mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
1382 const mlir::Location loc = cgf.getLoc(ops.loc);
1383 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1384 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1385 case LangOptions::SOB_Defined:
1386 if (!cgf.sanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
1387 return builder.createMul(loc, ops.lhs, ops.rhs);
1388 [[fallthrough]];
1389 case LangOptions::SOB_Undefined:
1390 if (!cgf.sanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
1391 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1392 [[fallthrough]];
1393 case LangOptions::SOB_Trapping:
1394 if (canElideOverflowCheck(astContext: cgf.getContext(), op: ops))
1395 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1396 cgf.cgm.errorNYI(feature: "sanitizers");
1397 }
1398 }
1399 if (ops.fullType->isConstantMatrixType()) {
1400 assert(!cir::MissingFeatures::matrixType());
1401 cgf.cgm.errorNYI(feature: "matrix types");
1402 return nullptr;
1403 }
1404 if (ops.compType->isUnsignedIntegerType() &&
1405 cgf.sanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
1406 !canElideOverflowCheck(astContext: cgf.getContext(), op: ops))
1407 cgf.cgm.errorNYI(feature: "unsigned int overflow sanitizer");
1408
1409 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1410 assert(!cir::MissingFeatures::cgFPOptionsRAII());
1411 return builder.createFMul(loc, ops.lhs, ops.rhs);
1412 }
1413
1414 if (ops.isFixedPointOp()) {
1415 assert(!cir::MissingFeatures::fixedPointType());
1416 cgf.cgm.errorNYI(feature: "fixed point");
1417 return nullptr;
1418 }
1419
1420 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1421 cgf.convertType(ops.fullType),
1422 cir::BinOpKind::Mul, ops.lhs, ops.rhs);
1423}
1424mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
1425 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1426 cgf.convertType(ops.fullType),
1427 cir::BinOpKind::Div, ops.lhs, ops.rhs);
1428}
1429mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
1430 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1431 cgf.convertType(ops.fullType),
1432 cir::BinOpKind::Rem, ops.lhs, ops.rhs);
1433}
1434
1435mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
1436 if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
1437 mlir::isa<cir::PointerType>(ops.rhs.getType()))
1438 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
1439
1440 const mlir::Location loc = cgf.getLoc(ops.loc);
1441 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1442 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1443 case LangOptions::SOB_Defined:
1444 if (!cgf.sanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
1445 return builder.createAdd(loc, ops.lhs, ops.rhs);
1446 [[fallthrough]];
1447 case LangOptions::SOB_Undefined:
1448 if (!cgf.sanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
1449 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1450 [[fallthrough]];
1451 case LangOptions::SOB_Trapping:
1452 if (canElideOverflowCheck(astContext: cgf.getContext(), op: ops))
1453 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1454 cgf.cgm.errorNYI(feature: "sanitizers");
1455 }
1456 }
1457 if (ops.fullType->isConstantMatrixType()) {
1458 assert(!cir::MissingFeatures::matrixType());
1459 cgf.cgm.errorNYI(feature: "matrix types");
1460 return nullptr;
1461 }
1462
1463 if (ops.compType->isUnsignedIntegerType() &&
1464 cgf.sanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
1465 !canElideOverflowCheck(astContext: cgf.getContext(), op: ops))
1466 cgf.cgm.errorNYI(feature: "unsigned int overflow sanitizer");
1467
1468 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1469 assert(!cir::MissingFeatures::cgFPOptionsRAII());
1470 return builder.createFAdd(loc, ops.lhs, ops.rhs);
1471 }
1472
1473 if (ops.isFixedPointOp()) {
1474 assert(!cir::MissingFeatures::fixedPointType());
1475 cgf.cgm.errorNYI(feature: "fixed point");
1476 return {};
1477 }
1478
1479 return builder.create<cir::BinOp>(loc, cgf.convertType(ops.fullType),
1480 cir::BinOpKind::Add, ops.lhs, ops.rhs);
1481}
1482
1483mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
1484 const mlir::Location loc = cgf.getLoc(ops.loc);
1485 // The LHS is always a pointer if either side is.
1486 if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
1487 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1488 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1489 case LangOptions::SOB_Defined: {
1490 if (!cgf.sanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
1491 return builder.createSub(loc, ops.lhs, ops.rhs);
1492 [[fallthrough]];
1493 }
1494 case LangOptions::SOB_Undefined:
1495 if (!cgf.sanOpts.has(K: SanitizerKind::SignedIntegerOverflow))
1496 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1497 [[fallthrough]];
1498 case LangOptions::SOB_Trapping:
1499 if (canElideOverflowCheck(astContext: cgf.getContext(), op: ops))
1500 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1501 cgf.cgm.errorNYI(feature: "sanitizers");
1502 }
1503 }
1504
1505 if (ops.fullType->isConstantMatrixType()) {
1506 assert(!cir::MissingFeatures::matrixType());
1507 cgf.cgm.errorNYI(feature: "matrix types");
1508 return nullptr;
1509 }
1510
1511 if (ops.compType->isUnsignedIntegerType() &&
1512 cgf.sanOpts.has(K: SanitizerKind::UnsignedIntegerOverflow) &&
1513 !canElideOverflowCheck(astContext: cgf.getContext(), op: ops))
1514 cgf.cgm.errorNYI(feature: "unsigned int overflow sanitizer");
1515
1516 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1517 assert(!cir::MissingFeatures::cgFPOptionsRAII());
1518 return builder.createFSub(loc, ops.lhs, ops.rhs);
1519 }
1520
1521 if (ops.isFixedPointOp()) {
1522 assert(!cir::MissingFeatures::fixedPointType());
1523 cgf.cgm.errorNYI(feature: "fixed point");
1524 return {};
1525 }
1526
1527 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1528 cgf.convertType(ops.fullType),
1529 cir::BinOpKind::Sub, ops.lhs, ops.rhs);
1530 }
1531
1532 // If the RHS is not a pointer, then we have normal pointer
1533 // arithmetic.
1534 if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
1535 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
1536
1537 // Otherwise, this is a pointer subtraction
1538
1539 // Do the raw subtraction part.
1540 //
1541 // TODO(cir): note for LLVM lowering out of this; when expanding this into
1542 // LLVM we shall take VLA's, division by element size, etc.
1543 //
1544 // See more in `EmitSub` in CGExprScalar.cpp.
1545 assert(!cir::MissingFeatures::ptrDiffOp());
1546 cgf.cgm.errorNYI(feature: "ptrdiff");
1547 return {};
1548}
1549
1550mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
1551 // TODO: This misses out on the sanitizer check below.
1552 if (ops.isFixedPointOp()) {
1553 assert(cir::MissingFeatures::fixedPointType());
1554 cgf.cgm.errorNYI(feature: "fixed point");
1555 return {};
1556 }
1557
1558 // CIR accepts shift between different types, meaning nothing special
1559 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1560 // promote or truncate the RHS to the same size as the LHS.
1561
1562 bool sanitizeSignedBase = cgf.sanOpts.has(K: SanitizerKind::ShiftBase) &&
1563 ops.compType->hasSignedIntegerRepresentation() &&
1564 !cgf.getLangOpts().isSignedOverflowDefined() &&
1565 !cgf.getLangOpts().CPlusPlus20;
1566 bool sanitizeUnsignedBase =
1567 cgf.sanOpts.has(K: SanitizerKind::UnsignedShiftBase) &&
1568 ops.compType->hasUnsignedIntegerRepresentation();
1569 bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
1570 bool sanitizeExponent = cgf.sanOpts.has(K: SanitizerKind::ShiftExponent);
1571
1572 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1573 if (cgf.getLangOpts().OpenCL)
1574 cgf.cgm.errorNYI(feature: "opencl");
1575 else if ((sanitizeBase || sanitizeExponent) &&
1576 mlir::isa<cir::IntType>(ops.lhs.getType()))
1577 cgf.cgm.errorNYI(feature: "sanitizers");
1578
1579 return builder.createShiftLeft(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1580}
1581
1582mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
1583 // TODO: This misses out on the sanitizer check below.
1584 if (ops.isFixedPointOp()) {
1585 assert(cir::MissingFeatures::fixedPointType());
1586 cgf.cgm.errorNYI(feature: "fixed point");
1587 return {};
1588 }
1589
1590 // CIR accepts shift between different types, meaning nothing special
1591 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1592 // promote or truncate the RHS to the same size as the LHS.
1593
1594 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1595 if (cgf.getLangOpts().OpenCL)
1596 cgf.cgm.errorNYI(feature: "opencl");
1597 else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
1598 mlir::isa<cir::IntType>(ops.lhs.getType()))
1599 cgf.cgm.errorNYI(feature: "sanitizers");
1600
1601 // Note that we don't need to distinguish unsigned treatment at this
1602 // point since it will be handled later by LLVM lowering.
1603 return builder.createShiftRight(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1604}
1605
1606mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
1607 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1608 cgf.convertType(ops.fullType),
1609 cir::BinOpKind::And, ops.lhs, ops.rhs);
1610}
1611mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
1612 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1613 cgf.convertType(ops.fullType),
1614 cir::BinOpKind::Xor, ops.lhs, ops.rhs);
1615}
1616mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
1617 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1618 cgf.convertType(ops.fullType),
1619 cir::BinOpKind::Or, ops.lhs, ops.rhs);
1620}
1621
1622// Emit code for an explicit or implicit cast. Implicit
1623// casts have to handle a more broad range of conversions than explicit
1624// casts, as they handle things like function to ptr-to-function decay
1625// etc.
1626mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
1627 Expr *subExpr = ce->getSubExpr();
1628 QualType destTy = ce->getType();
1629 CastKind kind = ce->getCastKind();
1630
1631 // These cases are generally not written to ignore the result of evaluating
1632 // their sub-expressions, so we clear this now.
1633 ignoreResultAssign = false;
1634
1635 switch (kind) {
1636 case clang::CK_Dependent:
1637 llvm_unreachable("dependent cast kind in CIR gen!");
1638 case clang::CK_BuiltinFnToFnPtr:
1639 llvm_unreachable("builtin functions are handled elsewhere");
1640
1641 case CK_CPointerToObjCPointerCast:
1642 case CK_BlockPointerToObjCPointerCast:
1643 case CK_AnyPointerToBlockPointerCast:
1644 case CK_BitCast: {
1645 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
1646 mlir::Type dstTy = cgf.convertType(destTy);
1647
1648 assert(!cir::MissingFeatures::addressSpace());
1649
1650 if (cgf.sanOpts.has(K: SanitizerKind::CFIUnrelatedCast))
1651 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1652 "sanitizer support");
1653
1654 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1655 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1656 "strict vtable pointers");
1657
1658 // Update heapallocsite metadata when there is an explicit pointer cast.
1659 assert(!cir::MissingFeatures::addHeapAllocSiteMetadata());
1660
1661 // If Src is a fixed vector and Dst is a scalable vector, and both have the
1662 // same element type, use the llvm.vector.insert intrinsic to perform the
1663 // bitcast.
1664 assert(!cir::MissingFeatures::scalableVectors());
1665
1666 // If Src is a scalable vector and Dst is a fixed vector, and both have the
1667 // same element type, use the llvm.vector.extract intrinsic to perform the
1668 // bitcast.
1669 assert(!cir::MissingFeatures::scalableVectors());
1670
1671 // Perform VLAT <-> VLST bitcast through memory.
1672 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
1673 // require the element types of the vectors to be the same, we
1674 // need to keep this around for bitcasts between VLAT <-> VLST where
1675 // the element types of the vectors are not the same, until we figure
1676 // out a better way of doing these casts.
1677 assert(!cir::MissingFeatures::scalableVectors());
1678
1679 return cgf.getBuilder().createBitcast(cgf.getLoc(subExpr->getSourceRange()),
1680 src, dstTy);
1681 }
1682
1683 case CK_AtomicToNonAtomic: {
1684 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1685 "CastExpr: ", ce->getCastKindName());
1686 mlir::Location loc = cgf.getLoc(subExpr->getSourceRange());
1687 return cgf.createDummyValue(loc, destTy);
1688 }
1689 case CK_NonAtomicToAtomic:
1690 case CK_UserDefinedConversion:
1691 return Visit(const_cast<Expr *>(subExpr));
1692 case CK_NoOp: {
1693 auto v = Visit(const_cast<Expr *>(subExpr));
1694 if (v) {
1695 // CK_NoOp can model a pointer qualification conversion, which can remove
1696 // an array bound and change the IR type.
1697 // FIXME: Once pointee types are removed from IR, remove this.
1698 mlir::Type t = cgf.convertType(destTy);
1699 if (t != v.getType())
1700 cgf.getCIRGenModule().errorNYI(feature: "pointer qualification conversion");
1701 }
1702 return v;
1703 }
1704
1705 case CK_ArrayToPointerDecay:
1706 return cgf.emitArrayToPointerDecay(array: subExpr).getPointer();
1707
1708 case CK_NullToPointer: {
1709 if (mustVisitNullValue(e: subExpr))
1710 cgf.emitIgnoredExpr(e: subExpr);
1711
1712 // Note that DestTy is used as the MLIR type instead of a custom
1713 // nullptr type.
1714 mlir::Type ty = cgf.convertType(destTy);
1715 return builder.getNullPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
1716 }
1717
1718 case CK_LValueToRValue:
1719 assert(cgf.getContext().hasSameUnqualifiedType(subExpr->getType(), destTy));
1720 assert(subExpr->isGLValue() && "lvalue-to-rvalue applied to r-value!");
1721 return Visit(const_cast<Expr *>(subExpr));
1722
1723 case CK_IntegralCast: {
1724 ScalarConversionOpts opts;
1725 if (auto *ice = dyn_cast<ImplicitCastExpr>(Val: ce)) {
1726 if (!ice->isPartOfExplicitCast())
1727 opts = ScalarConversionOpts(cgf.sanOpts);
1728 }
1729 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
1730 ce->getExprLoc(), opts);
1731 }
1732
1733 case CK_FloatingRealToComplex:
1734 case CK_FloatingComplexCast:
1735 case CK_IntegralRealToComplex:
1736 case CK_IntegralComplexCast:
1737 case CK_IntegralComplexToFloatingComplex:
1738 case CK_FloatingComplexToIntegralComplex:
1739 llvm_unreachable("scalar cast to non-scalar value");
1740
1741 case CK_PointerToIntegral: {
1742 assert(!destTy->isBooleanType() && "bool should use PointerToBool");
1743 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1744 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1745 "strict vtable pointers");
1746 return builder.createPtrToInt(Visit(subExpr), cgf.convertType(destTy));
1747 }
1748 case CK_ToVoid:
1749 cgf.emitIgnoredExpr(e: subExpr);
1750 return {};
1751
1752 case CK_IntegralToFloating:
1753 case CK_FloatingToIntegral:
1754 case CK_FloatingCast:
1755 case CK_FixedPointToFloating:
1756 case CK_FloatingToFixedPoint: {
1757 if (kind == CK_FixedPointToFloating || kind == CK_FloatingToFixedPoint) {
1758 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1759 "fixed point casts");
1760 return {};
1761 }
1762 assert(!cir::MissingFeatures::cgFPOptionsRAII());
1763 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
1764 ce->getExprLoc());
1765 }
1766
1767 case CK_IntegralToBoolean:
1768 return emitIntToBoolConversion(Visit(subExpr),
1769 cgf.getLoc(ce->getSourceRange()));
1770
1771 case CK_PointerToBoolean:
1772 return emitPointerToBoolConversion(Visit(subExpr), subExpr->getType());
1773 case CK_FloatingToBoolean:
1774 return emitFloatToBoolConversion(Visit(subExpr),
1775 cgf.getLoc(subExpr->getExprLoc()));
1776 case CK_MemberPointerToBoolean: {
1777 mlir::Value memPtr = Visit(subExpr);
1778 return builder.createCast(cgf.getLoc(ce->getSourceRange()),
1779 cir::CastKind::member_ptr_to_bool, memPtr,
1780 cgf.convertType(destTy));
1781 }
1782
1783 case CK_VectorSplat: {
1784 // Create a vector object and fill all elements with the same scalar value.
1785 assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type");
1786 return builder.create<cir::VecSplatOp>(
1787 cgf.getLoc(subExpr->getSourceRange()), cgf.convertType(destTy),
1788 Visit(subExpr));
1789 }
1790
1791 default:
1792 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1793 "CastExpr: ", ce->getCastKindName());
1794 }
1795 return {};
1796}
1797
1798mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *e) {
1799 if (e->getCallReturnType(cgf.getContext())->isReferenceType())
1800 return emitLoadOfLValue(e);
1801
1802 auto v = cgf.emitCallExpr(e).getScalarVal();
1803 assert(!cir::MissingFeatures::emitLValueAlignmentAssumption());
1804 return v;
1805}
1806
1807mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
1808 // TODO(cir): The classic codegen calls tryEmitAsConstant() here. Folding
1809 // constants sound like work for MLIR optimizers, but we'll keep an assertion
1810 // for now.
1811 assert(!cir::MissingFeatures::tryEmitAsConstant());
1812 Expr::EvalResult result;
1813 if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
1814 cgf.cgm.errorNYI(e->getSourceRange(), "Constant interger member expr");
1815 // Fall through to emit this as a non-constant access.
1816 }
1817 return emitLoadOfLValue(e);
1818}
1819
1820mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
1821 const unsigned numInitElements = e->getNumInits();
1822
1823 if (e->hadArrayRangeDesignator()) {
1824 cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator");
1825 return {};
1826 }
1827
1828 if (e->getType()->isVectorType()) {
1829 const auto vectorType =
1830 mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1831
1832 SmallVector<mlir::Value, 16> elements;
1833 for (Expr *init : e->inits()) {
1834 elements.push_back(Visit(init));
1835 }
1836
1837 // Zero-initialize any remaining values.
1838 if (numInitElements < vectorType.getSize()) {
1839 const mlir::Value zeroValue = cgf.getBuilder().getNullValue(
1840 vectorType.getElementType(), cgf.getLoc(e->getSourceRange()));
1841 std::fill_n(std::back_inserter(elements),
1842 vectorType.getSize() - numInitElements, zeroValue);
1843 }
1844
1845 return cgf.getBuilder().create<cir::VecCreateOp>(
1846 cgf.getLoc(e->getSourceRange()), vectorType, elements);
1847 }
1848
1849 if (numInitElements == 0) {
1850 cgf.cgm.errorNYI(e->getSourceRange(),
1851 "InitListExpr Non VectorType with 0 init elements");
1852 return {};
1853 }
1854
1855 return Visit(e->getInit(0));
1856}
1857
1858mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
1859 QualType srcTy, QualType dstTy,
1860 SourceLocation loc) {
1861 assert(CIRGenFunction::hasScalarEvaluationKind(srcTy) &&
1862 CIRGenFunction::hasScalarEvaluationKind(dstTy) &&
1863 "Invalid scalar expression to emit");
1864 return ScalarExprEmitter(*this, builder)
1865 .emitScalarConversion(src, srcTy, dstTy, loc);
1866}
1867
1868mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
1869 // Perform vector logical not on comparison with zero vector.
1870 if (e->getType()->isVectorType() &&
1871 e->getType()->castAs<VectorType>()->getVectorKind() ==
1872 VectorKind::Generic) {
1873 assert(!cir::MissingFeatures::vectorType());
1874 cgf.cgm.errorNYI(e->getSourceRange(), "vector logical not");
1875 return {};
1876 }
1877
1878 // Compare operand to zero.
1879 mlir::Value boolVal = cgf.evaluateExprAsBool(e->getSubExpr());
1880
1881 // Invert value.
1882 boolVal = builder.createNot(boolVal);
1883
1884 // ZExt result to the expr type.
1885 return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
1886}
1887
1888/// Return the size or alignment of the type of argument of the sizeof
1889/// expression as an integer.
1890mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
1891 const UnaryExprOrTypeTraitExpr *e) {
1892 const QualType typeToSize = e->getTypeOfArgument();
1893 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
1894 if (auto kind = e->getKind();
1895 kind == UETT_SizeOf || kind == UETT_DataSizeOf) {
1896 if (cgf.getContext().getAsVariableArrayType(T: typeToSize)) {
1897 cgf.getCIRGenModule().errorNYI(e->getSourceRange(),
1898 "sizeof operator for VariableArrayType",
1899 e->getStmtClassName());
1900 return builder.getConstant(
1901 loc, builder.getAttr<cir::IntAttr>(
1902 cgf.cgm.UInt64Ty, llvm::APSInt(llvm::APInt(64, 1), true)));
1903 }
1904 } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
1905 cgf.getCIRGenModule().errorNYI(
1906 e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign",
1907 e->getStmtClassName());
1908 return builder.getConstant(
1909 loc, builder.getAttr<cir::IntAttr>(
1910 cgf.cgm.UInt64Ty, llvm::APSInt(llvm::APInt(64, 1), true)));
1911 } else if (e->getKind() == UETT_VectorElements) {
1912 cgf.getCIRGenModule().errorNYI(e->getSourceRange(),
1913 "sizeof operator for VectorElements",
1914 e->getStmtClassName());
1915 return builder.getConstant(
1916 loc, builder.getAttr<cir::IntAttr>(
1917 cgf.cgm.UInt64Ty, llvm::APSInt(llvm::APInt(64, 1), true)));
1918 }
1919
1920 return builder.getConstant(
1921 loc, builder.getAttr<cir::IntAttr>(
1922 cgf.cgm.UInt64Ty, e->EvaluateKnownConstInt(cgf.getContext())));
1923}
1924
1925/// Return true if the specified expression is cheap enough and side-effect-free
1926/// enough to evaluate unconditionally instead of conditionally. This is used
1927/// to convert control flow into selects in some cases.
1928/// TODO(cir): can be shared with LLVM codegen.
1929static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e,
1930 CIRGenFunction &cgf) {
1931 // Anything that is an integer or floating point constant is fine.
1932 return e->IgnoreParens()->isEvaluatable(Ctx: cgf.getContext());
1933
1934 // Even non-volatile automatic variables can't be evaluated unconditionally.
1935 // Referencing a thread_local may cause non-trivial initialization work to
1936 // occur. If we're inside a lambda and one of the variables is from the scope
1937 // outside the lambda, that function may have returned already. Reading its
1938 // locals is a bad idea. Also, these reads may introduce races there didn't
1939 // exist in the source-level program.
1940}
1941
1942mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
1943 const AbstractConditionalOperator *e) {
1944 CIRGenBuilderTy &builder = cgf.getBuilder();
1945 mlir::Location loc = cgf.getLoc(e->getSourceRange());
1946 ignoreResultAssign = false;
1947
1948 // Bind the common expression if necessary.
1949 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
1950
1951 Expr *condExpr = e->getCond();
1952 Expr *lhsExpr = e->getTrueExpr();
1953 Expr *rhsExpr = e->getFalseExpr();
1954
1955 // If the condition constant folds and can be elided, try to avoid emitting
1956 // the condition and the dead arm.
1957 bool condExprBool;
1958 if (cgf.constantFoldsToBool(cond: condExpr, resultBool&: condExprBool)) {
1959 Expr *live = lhsExpr, *dead = rhsExpr;
1960 if (!condExprBool)
1961 std::swap(a&: live, b&: dead);
1962
1963 // If the dead side doesn't have labels we need, just emit the Live part.
1964 if (!cgf.containsLabel(dead)) {
1965 if (condExprBool)
1966 assert(!cir::MissingFeatures::incrementProfileCounter());
1967 mlir::Value result = Visit(live);
1968
1969 // If the live part is a throw expression, it acts like it has a void
1970 // type, so evaluating it returns a null Value. However, a conditional
1971 // with non-void type must return a non-null Value.
1972 if (!result && !e->getType()->isVoidType()) {
1973 cgf.cgm.errorNYI(e->getSourceRange(),
1974 "throw expression in conditional operator");
1975 result = {};
1976 }
1977
1978 return result;
1979 }
1980 }
1981
1982 QualType condType = condExpr->getType();
1983
1984 // OpenCL: If the condition is a vector, we can treat this condition like
1985 // the select function.
1986 if ((cgf.getLangOpts().OpenCL && condType->isVectorType()) ||
1987 condType->isExtVectorType()) {
1988 assert(!cir::MissingFeatures::vectorType());
1989 cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
1990 }
1991
1992 if (condType->isVectorType() || condType->isSveVLSBuiltinType()) {
1993 if (!condType->isVectorType()) {
1994 assert(!cir::MissingFeatures::vecTernaryOp());
1995 cgf.cgm.errorNYI(loc, "TernaryOp for SVE vector");
1996 return {};
1997 }
1998
1999 mlir::Value condValue = Visit(condExpr);
2000 mlir::Value lhsValue = Visit(lhsExpr);
2001 mlir::Value rhsValue = Visit(rhsExpr);
2002 return builder.create<cir::VecTernaryOp>(loc, condValue, lhsValue,
2003 rhsValue);
2004 }
2005
2006 // If this is a really simple expression (like x ? 4 : 5), emit this as a
2007 // select instead of as control flow. We can only do this if it is cheap
2008 // and safe to evaluate the LHS and RHS unconditionally.
2009 if (isCheapEnoughToEvaluateUnconditionally(e: lhsExpr, cgf) &&
2010 isCheapEnoughToEvaluateUnconditionally(e: rhsExpr, cgf)) {
2011 bool lhsIsVoid = false;
2012 mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
2013 assert(!cir::MissingFeatures::incrementProfileCounter());
2014
2015 mlir::Value lhs = Visit(lhsExpr);
2016 if (!lhs) {
2017 lhs = builder.getNullValue(cgf.VoidTy, loc);
2018 lhsIsVoid = true;
2019 }
2020
2021 mlir::Value rhs = Visit(rhsExpr);
2022 if (lhsIsVoid) {
2023 assert(!rhs && "lhs and rhs types must match");
2024 rhs = builder.getNullValue(cgf.VoidTy, loc);
2025 }
2026
2027 return builder.createSelect(loc, condV, lhs, rhs);
2028 }
2029
2030 mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
2031 CIRGenFunction::ConditionalEvaluation eval(cgf);
2032 SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
2033 mlir::Type yieldTy{};
2034
2035 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
2036 CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
2037 cgf.curLexScope->setAsTernary();
2038
2039 assert(!cir::MissingFeatures::incrementProfileCounter());
2040 eval.beginEvaluation();
2041 mlir::Value branch = Visit(expr);
2042 eval.endEvaluation();
2043
2044 if (branch) {
2045 yieldTy = branch.getType();
2046 b.create<cir::YieldOp>(loc, branch);
2047 } else {
2048 // If LHS or RHS is a throw or void expression we need to patch
2049 // arms as to properly match yield types.
2050 insertPoints.push_back(b.saveInsertionPoint());
2051 }
2052 };
2053
2054 mlir::Value result = builder
2055 .create<cir::TernaryOp>(
2056 loc, condV,
2057 /*trueBuilder=*/
2058 [&](mlir::OpBuilder &b, mlir::Location loc) {
2059 emitBranch(b, loc, lhsExpr);
2060 },
2061 /*falseBuilder=*/
2062 [&](mlir::OpBuilder &b, mlir::Location loc) {
2063 emitBranch(b, loc, rhsExpr);
2064 })
2065 .getResult();
2066
2067 if (!insertPoints.empty()) {
2068 // If both arms are void, so be it.
2069 if (!yieldTy)
2070 yieldTy = cgf.VoidTy;
2071
2072 // Insert required yields.
2073 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2074 mlir::OpBuilder::InsertionGuard guard(builder);
2075 builder.restoreInsertionPoint(toInsert);
2076
2077 // Block does not return: build empty yield.
2078 if (mlir::isa<cir::VoidType>(yieldTy)) {
2079 builder.create<cir::YieldOp>(loc);
2080 } else { // Block returns: set null yield value.
2081 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2082 builder.create<cir::YieldOp>(loc, op0);
2083 }
2084 }
2085 }
2086
2087 return result;
2088}
2089
2090mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *e,
2091 LValue lv, bool isInc,
2092 bool isPre) {
2093 return ScalarExprEmitter(*this, builder)
2094 .emitScalarPrePostIncDec(e, lv, isInc, isPre);
2095}
2096

source code of clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp